1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/export.h>
79#include <linux/nsproxy.h>
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
83#include <linux/swap.h>
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
86#include <linux/migrate.h>
87#include <linux/ksm.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92#include <linux/mm_inline.h>
93
94#include <asm/tlbflush.h>
95#include <asm/uaccess.h>
96#include <linux/random.h>
97
98#include "internal.h"
99
100
101#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
102#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
103
104static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache;
106
107
108
109enum zone_type policy_zone = 0;
110
111
112
113
114static struct mempolicy default_policy = {
115 .refcnt = ATOMIC_INIT(1),
116 .mode = MPOL_PREFERRED,
117 .flags = MPOL_F_LOCAL,
118};
119
120static const struct mempolicy_operations {
121 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137 enum mpol_rebind_step step);
138} mpol_ops[MPOL_MAX];
139
140
141static int is_valid_nodemask(const nodemask_t *nodemask)
142{
143 int nd, k;
144
145 for_each_node_mask(nd, *nodemask) {
146 struct zone *z;
147
148 for (k = 0; k <= policy_zone; k++) {
149 z = &NODE_DATA(nd)->node_zones[k];
150 if (z->present_pages > 0)
151 return 1;
152 }
153 }
154
155 return 0;
156}
157
158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159{
160 return pol->flags & MPOL_MODE_FLAGS;
161}
162
163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164 const nodemask_t *rel)
165{
166 nodemask_t tmp;
167 nodes_fold(tmp, *orig, nodes_weight(*rel));
168 nodes_onto(*ret, tmp, *rel);
169}
170
171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172{
173 if (nodes_empty(*nodes))
174 return -EINVAL;
175 pol->v.nodes = *nodes;
176 return 0;
177}
178
179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
180{
181 if (!nodes)
182 pol->flags |= MPOL_F_LOCAL;
183 else if (nodes_empty(*nodes))
184 return -EINVAL;
185 else
186 pol->v.preferred_node = first_node(*nodes);
187 return 0;
188}
189
190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191{
192 if (!is_valid_nodemask(nodes))
193 return -EINVAL;
194 pol->v.nodes = *nodes;
195 return 0;
196}
197
198
199
200
201
202
203
204
205
206
207static int mpol_set_nodemask(struct mempolicy *pol,
208 const nodemask_t *nodes, struct nodemask_scratch *nsc)
209{
210 int ret;
211
212
213 if (pol == NULL)
214 return 0;
215
216 nodes_and(nsc->mask1,
217 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
218
219 VM_BUG_ON(!nodes);
220 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221 nodes = NULL;
222 else {
223 if (pol->flags & MPOL_F_RELATIVE_NODES)
224 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225 else
226 nodes_and(nsc->mask2, *nodes, nsc->mask1);
227
228 if (mpol_store_user_nodemask(pol))
229 pol->w.user_nodemask = *nodes;
230 else
231 pol->w.cpuset_mems_allowed =
232 cpuset_current_mems_allowed;
233 }
234
235 if (nodes)
236 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 else
238 ret = mpol_ops[pol->mode].create(pol, NULL);
239 return ret;
240}
241
242
243
244
245
246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247 nodemask_t *nodes)
248{
249 struct mempolicy *policy;
250
251 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253
254 if (mode == MPOL_DEFAULT) {
255 if (nodes && !nodes_empty(*nodes))
256 return ERR_PTR(-EINVAL);
257 return NULL;
258 }
259 VM_BUG_ON(!nodes);
260
261
262
263
264
265
266 if (mode == MPOL_PREFERRED) {
267 if (nodes_empty(*nodes)) {
268 if (((flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES)))
270 return ERR_PTR(-EINVAL);
271 }
272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
278 policy->mode = mode;
279 policy->flags = flags;
280
281 return policy;
282}
283
284
285void __mpol_put(struct mempolicy *p)
286{
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
289 kmem_cache_free(policy_cache, p);
290}
291
292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293 enum mpol_rebind_step step)
294{
295}
296
297
298
299
300
301
302
303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
305{
306 nodemask_t tmp;
307
308 if (pol->flags & MPOL_F_STATIC_NODES)
309 nodes_and(tmp, pol->w.user_nodemask, *nodes);
310 else if (pol->flags & MPOL_F_RELATIVE_NODES)
311 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312 else {
313
314
315
316
317 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318 nodes_remap(tmp, pol->v.nodes,
319 pol->w.cpuset_mems_allowed, *nodes);
320 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321 } else if (step == MPOL_REBIND_STEP2) {
322 tmp = pol->w.cpuset_mems_allowed;
323 pol->w.cpuset_mems_allowed = *nodes;
324 } else
325 BUG();
326 }
327
328 if (nodes_empty(tmp))
329 tmp = *nodes;
330
331 if (step == MPOL_REBIND_STEP1)
332 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334 pol->v.nodes = tmp;
335 else
336 BUG();
337
338 if (!node_isset(current->il_next, tmp)) {
339 current->il_next = next_node(current->il_next, tmp);
340 if (current->il_next >= MAX_NUMNODES)
341 current->il_next = first_node(tmp);
342 if (current->il_next >= MAX_NUMNODES)
343 current->il_next = numa_node_id();
344 }
345}
346
347static void mpol_rebind_preferred(struct mempolicy *pol,
348 const nodemask_t *nodes,
349 enum mpol_rebind_step step)
350{
351 nodemask_t tmp;
352
353 if (pol->flags & MPOL_F_STATIC_NODES) {
354 int node = first_node(pol->w.user_nodemask);
355
356 if (node_isset(node, *nodes)) {
357 pol->v.preferred_node = node;
358 pol->flags &= ~MPOL_F_LOCAL;
359 } else
360 pol->flags |= MPOL_F_LOCAL;
361 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 pol->v.preferred_node = first_node(tmp);
364 } else if (!(pol->flags & MPOL_F_LOCAL)) {
365 pol->v.preferred_node = node_remap(pol->v.preferred_node,
366 pol->w.cpuset_mems_allowed,
367 *nodes);
368 pol->w.cpuset_mems_allowed = *nodes;
369 }
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389 enum mpol_rebind_step step)
390{
391 if (!pol)
392 return;
393 if (!mpol_store_user_nodemask(pol) && step == 0 &&
394 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 return;
396
397 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398 return;
399
400 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401 BUG();
402
403 if (step == MPOL_REBIND_STEP1)
404 pol->flags |= MPOL_F_REBINDING;
405 else if (step == MPOL_REBIND_STEP2)
406 pol->flags &= ~MPOL_F_REBINDING;
407 else if (step >= MPOL_REBIND_NSTEP)
408 BUG();
409
410 mpol_ops[pol->mode].rebind(pol, newmask, step);
411}
412
413
414
415
416
417
418
419
420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421 enum mpol_rebind_step step)
422{
423 mpol_rebind_policy(tsk->mempolicy, new, step);
424}
425
426
427
428
429
430
431
432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433{
434 struct vm_area_struct *vma;
435
436 down_write(&mm->mmap_sem);
437 for (vma = mm->mmap; vma; vma = vma->vm_next)
438 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
439 up_write(&mm->mmap_sem);
440}
441
442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 [MPOL_DEFAULT] = {
444 .rebind = mpol_rebind_default,
445 },
446 [MPOL_INTERLEAVE] = {
447 .create = mpol_new_interleave,
448 .rebind = mpol_rebind_nodemask,
449 },
450 [MPOL_PREFERRED] = {
451 .create = mpol_new_preferred,
452 .rebind = mpol_rebind_preferred,
453 },
454 [MPOL_BIND] = {
455 .create = mpol_new_bind,
456 .rebind = mpol_rebind_nodemask,
457 },
458};
459
460static void migrate_page_add(struct page *page, struct list_head *pagelist,
461 unsigned long flags);
462
463
464static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
465 unsigned long addr, unsigned long end,
466 const nodemask_t *nodes, unsigned long flags,
467 void *private)
468{
469 pte_t *orig_pte;
470 pte_t *pte;
471 spinlock_t *ptl;
472
473 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
474 do {
475 struct page *page;
476 int nid;
477
478 if (!pte_present(*pte))
479 continue;
480 page = vm_normal_page(vma, addr, *pte);
481 if (!page)
482 continue;
483
484
485
486
487
488 if (PageReserved(page) || PageKsm(page))
489 continue;
490 nid = page_to_nid(page);
491 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
492 continue;
493
494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
495 migrate_page_add(page, private, flags);
496 else
497 break;
498 } while (pte++, addr += PAGE_SIZE, addr != end);
499 pte_unmap_unlock(orig_pte, ptl);
500 return addr != end;
501}
502
503static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
504 unsigned long addr, unsigned long end,
505 const nodemask_t *nodes, unsigned long flags,
506 void *private)
507{
508 pmd_t *pmd;
509 unsigned long next;
510
511 pmd = pmd_offset(pud, addr);
512 do {
513 next = pmd_addr_end(addr, end);
514 split_huge_page_pmd(vma->vm_mm, pmd);
515 if (pmd_none_or_clear_bad(pmd))
516 continue;
517 if (check_pte_range(vma, pmd, addr, next, nodes,
518 flags, private))
519 return -EIO;
520 } while (pmd++, addr = next, addr != end);
521 return 0;
522}
523
524static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
525 unsigned long addr, unsigned long end,
526 const nodemask_t *nodes, unsigned long flags,
527 void *private)
528{
529 pud_t *pud;
530 unsigned long next;
531
532 pud = pud_offset(pgd, addr);
533 do {
534 next = pud_addr_end(addr, end);
535 if (pud_none_or_clear_bad(pud))
536 continue;
537 if (check_pmd_range(vma, pud, addr, next, nodes,
538 flags, private))
539 return -EIO;
540 } while (pud++, addr = next, addr != end);
541 return 0;
542}
543
544static inline int check_pgd_range(struct vm_area_struct *vma,
545 unsigned long addr, unsigned long end,
546 const nodemask_t *nodes, unsigned long flags,
547 void *private)
548{
549 pgd_t *pgd;
550 unsigned long next;
551
552 pgd = pgd_offset(vma->vm_mm, addr);
553 do {
554 next = pgd_addr_end(addr, end);
555 if (pgd_none_or_clear_bad(pgd))
556 continue;
557 if (check_pud_range(vma, pgd, addr, next, nodes,
558 flags, private))
559 return -EIO;
560 } while (pgd++, addr = next, addr != end);
561 return 0;
562}
563
564
565
566
567
568
569static struct vm_area_struct *
570check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
571 const nodemask_t *nodes, unsigned long flags, void *private)
572{
573 int err;
574 struct vm_area_struct *first, *vma, *prev;
575
576
577 first = find_vma(mm, start);
578 if (!first)
579 return ERR_PTR(-EFAULT);
580 prev = NULL;
581 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
582 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
583 if (!vma->vm_next && vma->vm_end < end)
584 return ERR_PTR(-EFAULT);
585 if (prev && prev->vm_end < vma->vm_start)
586 return ERR_PTR(-EFAULT);
587 }
588 if (!is_vm_hugetlb_page(vma) &&
589 ((flags & MPOL_MF_STRICT) ||
590 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
591 vma_migratable(vma)))) {
592 unsigned long endvma = vma->vm_end;
593
594 if (endvma > end)
595 endvma = end;
596 if (vma->vm_start > start)
597 start = vma->vm_start;
598 err = check_pgd_range(vma, start, endvma, nodes,
599 flags, private);
600 if (err) {
601 first = ERR_PTR(err);
602 break;
603 }
604 }
605 prev = vma;
606 }
607 return first;
608}
609
610
611static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
612{
613 int err = 0;
614 struct mempolicy *old = vma->vm_policy;
615
616 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
617 vma->vm_start, vma->vm_end, vma->vm_pgoff,
618 vma->vm_ops, vma->vm_file,
619 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
620
621 if (vma->vm_ops && vma->vm_ops->set_policy)
622 err = vma->vm_ops->set_policy(vma, new);
623 if (!err) {
624 mpol_get(new);
625 vma->vm_policy = new;
626 mpol_put(old);
627 }
628 return err;
629}
630
631
632static int mbind_range(struct mm_struct *mm, unsigned long start,
633 unsigned long end, struct mempolicy *new_pol)
634{
635 struct vm_area_struct *next;
636 struct vm_area_struct *prev;
637 struct vm_area_struct *vma;
638 int err = 0;
639 pgoff_t pgoff;
640 unsigned long vmstart;
641 unsigned long vmend;
642
643 vma = find_vma(mm, start);
644 if (!vma || vma->vm_start > start)
645 return -EFAULT;
646
647 prev = vma->vm_prev;
648 if (start > vma->vm_start)
649 prev = vma;
650
651 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
652 next = vma->vm_next;
653 vmstart = max(start, vma->vm_start);
654 vmend = min(end, vma->vm_end);
655
656 if (mpol_equal(vma_policy(vma), new_pol))
657 continue;
658
659 pgoff = vma->vm_pgoff +
660 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
661 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
662 vma->anon_vma, vma->vm_file, pgoff,
663 new_pol);
664 if (prev) {
665 vma = prev;
666 next = vma->vm_next;
667 continue;
668 }
669 if (vma->vm_start != vmstart) {
670 err = split_vma(vma->vm_mm, vma, vmstart, 1);
671 if (err)
672 goto out;
673 }
674 if (vma->vm_end != vmend) {
675 err = split_vma(vma->vm_mm, vma, vmend, 0);
676 if (err)
677 goto out;
678 }
679 err = policy_vma(vma, new_pol);
680 if (err)
681 goto out;
682 }
683
684 out:
685 return err;
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705void mpol_fix_fork_child_flag(struct task_struct *p)
706{
707 if (p->mempolicy)
708 p->flags |= PF_MEMPOLICY;
709 else
710 p->flags &= ~PF_MEMPOLICY;
711}
712
713static void mpol_set_task_struct_flag(void)
714{
715 mpol_fix_fork_child_flag(current);
716}
717
718
719static long do_set_mempolicy(unsigned short mode, unsigned short flags,
720 nodemask_t *nodes)
721{
722 struct mempolicy *new, *old;
723 struct mm_struct *mm = current->mm;
724 NODEMASK_SCRATCH(scratch);
725 int ret;
726
727 if (!scratch)
728 return -ENOMEM;
729
730 new = mpol_new(mode, flags, nodes);
731 if (IS_ERR(new)) {
732 ret = PTR_ERR(new);
733 goto out;
734 }
735
736
737
738
739
740
741 if (mm)
742 down_write(&mm->mmap_sem);
743 task_lock(current);
744 ret = mpol_set_nodemask(new, nodes, scratch);
745 if (ret) {
746 task_unlock(current);
747 if (mm)
748 up_write(&mm->mmap_sem);
749 mpol_put(new);
750 goto out;
751 }
752 old = current->mempolicy;
753 current->mempolicy = new;
754 mpol_set_task_struct_flag();
755 if (new && new->mode == MPOL_INTERLEAVE &&
756 nodes_weight(new->v.nodes))
757 current->il_next = first_node(new->v.nodes);
758 task_unlock(current);
759 if (mm)
760 up_write(&mm->mmap_sem);
761
762 mpol_put(old);
763 ret = 0;
764out:
765 NODEMASK_SCRATCH_FREE(scratch);
766 return ret;
767}
768
769
770
771
772
773
774static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
775{
776 nodes_clear(*nodes);
777 if (p == &default_policy)
778 return;
779
780 switch (p->mode) {
781 case MPOL_BIND:
782
783 case MPOL_INTERLEAVE:
784 *nodes = p->v.nodes;
785 break;
786 case MPOL_PREFERRED:
787 if (!(p->flags & MPOL_F_LOCAL))
788 node_set(p->v.preferred_node, *nodes);
789
790 break;
791 default:
792 BUG();
793 }
794}
795
796static int lookup_node(struct mm_struct *mm, unsigned long addr)
797{
798 struct page *p;
799 int err;
800
801 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
802 if (err >= 0) {
803 err = page_to_nid(p);
804 put_page(p);
805 }
806 return err;
807}
808
809
810static long do_get_mempolicy(int *policy, nodemask_t *nmask,
811 unsigned long addr, unsigned long flags)
812{
813 int err;
814 struct mm_struct *mm = current->mm;
815 struct vm_area_struct *vma = NULL;
816 struct mempolicy *pol = current->mempolicy;
817
818 if (flags &
819 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
820 return -EINVAL;
821
822 if (flags & MPOL_F_MEMS_ALLOWED) {
823 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
824 return -EINVAL;
825 *policy = 0;
826 task_lock(current);
827 *nmask = cpuset_current_mems_allowed;
828 task_unlock(current);
829 return 0;
830 }
831
832 if (flags & MPOL_F_ADDR) {
833
834
835
836
837
838 down_read(&mm->mmap_sem);
839 vma = find_vma_intersection(mm, addr, addr+1);
840 if (!vma) {
841 up_read(&mm->mmap_sem);
842 return -EFAULT;
843 }
844 if (vma->vm_ops && vma->vm_ops->get_policy)
845 pol = vma->vm_ops->get_policy(vma, addr);
846 else
847 pol = vma->vm_policy;
848 } else if (addr)
849 return -EINVAL;
850
851 if (!pol)
852 pol = &default_policy;
853
854 if (flags & MPOL_F_NODE) {
855 if (flags & MPOL_F_ADDR) {
856 err = lookup_node(mm, addr);
857 if (err < 0)
858 goto out;
859 *policy = err;
860 } else if (pol == current->mempolicy &&
861 pol->mode == MPOL_INTERLEAVE) {
862 *policy = current->il_next;
863 } else {
864 err = -EINVAL;
865 goto out;
866 }
867 } else {
868 *policy = pol == &default_policy ? MPOL_DEFAULT :
869 pol->mode;
870
871
872
873
874 *policy |= (pol->flags & MPOL_MODE_FLAGS);
875 }
876
877 if (vma) {
878 up_read(¤t->mm->mmap_sem);
879 vma = NULL;
880 }
881
882 err = 0;
883 if (nmask) {
884 if (mpol_store_user_nodemask(pol)) {
885 *nmask = pol->w.user_nodemask;
886 } else {
887 task_lock(current);
888 get_policy_nodemask(pol, nmask);
889 task_unlock(current);
890 }
891 }
892
893 out:
894 mpol_cond_put(pol);
895 if (vma)
896 up_read(¤t->mm->mmap_sem);
897 return err;
898}
899
900#ifdef CONFIG_MIGRATION
901
902
903
904static void migrate_page_add(struct page *page, struct list_head *pagelist,
905 unsigned long flags)
906{
907
908
909
910 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
911 if (!isolate_lru_page(page)) {
912 list_add_tail(&page->lru, pagelist);
913 inc_zone_page_state(page, NR_ISOLATED_ANON +
914 page_is_file_cache(page));
915 }
916 }
917}
918
919static struct page *new_node_page(struct page *page, unsigned long node, int **x)
920{
921 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
922}
923
924
925
926
927
928static int migrate_to_node(struct mm_struct *mm, int source, int dest,
929 int flags)
930{
931 nodemask_t nmask;
932 LIST_HEAD(pagelist);
933 int err = 0;
934 struct vm_area_struct *vma;
935
936 nodes_clear(nmask);
937 node_set(source, nmask);
938
939 vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
940 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
941 if (IS_ERR(vma))
942 return PTR_ERR(vma);
943
944 if (!list_empty(&pagelist)) {
945 err = migrate_pages(&pagelist, new_node_page, dest,
946 false, MIGRATE_SYNC);
947 if (err)
948 putback_lru_pages(&pagelist);
949 }
950
951 return err;
952}
953
954
955
956
957
958
959
960int do_migrate_pages(struct mm_struct *mm,
961 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
962{
963 int busy = 0;
964 int err;
965 nodemask_t tmp;
966
967 err = migrate_prep();
968 if (err)
969 return err;
970
971 down_read(&mm->mmap_sem);
972
973 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
974 if (err)
975 goto out;
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 tmp = *from_nodes;
1009 while (!nodes_empty(tmp)) {
1010 int s,d;
1011 int source = -1;
1012 int dest = 0;
1013
1014 for_each_node_mask(s, tmp) {
1015 d = node_remap(s, *from_nodes, *to_nodes);
1016 if (s == d)
1017 continue;
1018
1019 source = s;
1020 dest = d;
1021
1022
1023 if (!node_isset(dest, tmp))
1024 break;
1025 }
1026 if (source == -1)
1027 break;
1028
1029 node_clear(source, tmp);
1030 err = migrate_to_node(mm, source, dest, flags);
1031 if (err > 0)
1032 busy += err;
1033 if (err < 0)
1034 break;
1035 }
1036out:
1037 up_read(&mm->mmap_sem);
1038 if (err < 0)
1039 return err;
1040 return busy;
1041
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1052{
1053 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1054 unsigned long uninitialized_var(address);
1055
1056 while (vma) {
1057 address = page_address_in_vma(page, vma);
1058 if (address != -EFAULT)
1059 break;
1060 vma = vma->vm_next;
1061 }
1062
1063
1064
1065
1066 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1067}
1068#else
1069
1070static void migrate_page_add(struct page *page, struct list_head *pagelist,
1071 unsigned long flags)
1072{
1073}
1074
1075int do_migrate_pages(struct mm_struct *mm,
1076 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1077{
1078 return -ENOSYS;
1079}
1080
1081static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1082{
1083 return NULL;
1084}
1085#endif
1086
1087static long do_mbind(unsigned long start, unsigned long len,
1088 unsigned short mode, unsigned short mode_flags,
1089 nodemask_t *nmask, unsigned long flags)
1090{
1091 struct vm_area_struct *vma;
1092 struct mm_struct *mm = current->mm;
1093 struct mempolicy *new;
1094 unsigned long end;
1095 int err;
1096 LIST_HEAD(pagelist);
1097
1098 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1099 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1100 return -EINVAL;
1101 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1102 return -EPERM;
1103
1104 if (start & ~PAGE_MASK)
1105 return -EINVAL;
1106
1107 if (mode == MPOL_DEFAULT)
1108 flags &= ~MPOL_MF_STRICT;
1109
1110 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1111 end = start + len;
1112
1113 if (end < start)
1114 return -EINVAL;
1115 if (end == start)
1116 return 0;
1117
1118 new = mpol_new(mode, mode_flags, nmask);
1119 if (IS_ERR(new))
1120 return PTR_ERR(new);
1121
1122
1123
1124
1125
1126 if (!new)
1127 flags |= MPOL_MF_DISCONTIG_OK;
1128
1129 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1130 start, start + len, mode, mode_flags,
1131 nmask ? nodes_addr(*nmask)[0] : -1);
1132
1133 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1134
1135 err = migrate_prep();
1136 if (err)
1137 goto mpol_out;
1138 }
1139 {
1140 NODEMASK_SCRATCH(scratch);
1141 if (scratch) {
1142 down_write(&mm->mmap_sem);
1143 task_lock(current);
1144 err = mpol_set_nodemask(new, nmask, scratch);
1145 task_unlock(current);
1146 if (err)
1147 up_write(&mm->mmap_sem);
1148 } else
1149 err = -ENOMEM;
1150 NODEMASK_SCRATCH_FREE(scratch);
1151 }
1152 if (err)
1153 goto mpol_out;
1154
1155 vma = check_range(mm, start, end, nmask,
1156 flags | MPOL_MF_INVERT, &pagelist);
1157
1158 err = PTR_ERR(vma);
1159 if (!IS_ERR(vma)) {
1160 int nr_failed = 0;
1161
1162 err = mbind_range(mm, start, end, new);
1163
1164 if (!list_empty(&pagelist)) {
1165 nr_failed = migrate_pages(&pagelist, new_vma_page,
1166 (unsigned long)vma,
1167 false, true);
1168 if (nr_failed)
1169 putback_lru_pages(&pagelist);
1170 }
1171
1172 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1173 err = -EIO;
1174 } else
1175 putback_lru_pages(&pagelist);
1176
1177 up_write(&mm->mmap_sem);
1178 mpol_out:
1179 mpol_put(new);
1180 return err;
1181}
1182
1183
1184
1185
1186
1187
1188static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1189 unsigned long maxnode)
1190{
1191 unsigned long k;
1192 unsigned long nlongs;
1193 unsigned long endmask;
1194
1195 --maxnode;
1196 nodes_clear(*nodes);
1197 if (maxnode == 0 || !nmask)
1198 return 0;
1199 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1200 return -EINVAL;
1201
1202 nlongs = BITS_TO_LONGS(maxnode);
1203 if ((maxnode % BITS_PER_LONG) == 0)
1204 endmask = ~0UL;
1205 else
1206 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1207
1208
1209
1210 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1211 if (nlongs > PAGE_SIZE/sizeof(long))
1212 return -EINVAL;
1213 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1214 unsigned long t;
1215 if (get_user(t, nmask + k))
1216 return -EFAULT;
1217 if (k == nlongs - 1) {
1218 if (t & endmask)
1219 return -EINVAL;
1220 } else if (t)
1221 return -EINVAL;
1222 }
1223 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1224 endmask = ~0UL;
1225 }
1226
1227 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1228 return -EFAULT;
1229 nodes_addr(*nodes)[nlongs-1] &= endmask;
1230 return 0;
1231}
1232
1233
1234static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1235 nodemask_t *nodes)
1236{
1237 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1238 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1239
1240 if (copy > nbytes) {
1241 if (copy > PAGE_SIZE)
1242 return -EINVAL;
1243 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1244 return -EFAULT;
1245 copy = nbytes;
1246 }
1247 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1248}
1249
1250SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1251 unsigned long, mode, unsigned long __user *, nmask,
1252 unsigned long, maxnode, unsigned, flags)
1253{
1254 nodemask_t nodes;
1255 int err;
1256 unsigned short mode_flags;
1257
1258 mode_flags = mode & MPOL_MODE_FLAGS;
1259 mode &= ~MPOL_MODE_FLAGS;
1260 if (mode >= MPOL_MAX)
1261 return -EINVAL;
1262 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1263 (mode_flags & MPOL_F_RELATIVE_NODES))
1264 return -EINVAL;
1265 err = get_nodes(&nodes, nmask, maxnode);
1266 if (err)
1267 return err;
1268 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1269}
1270
1271
1272SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1273 unsigned long, maxnode)
1274{
1275 int err;
1276 nodemask_t nodes;
1277 unsigned short flags;
1278
1279 flags = mode & MPOL_MODE_FLAGS;
1280 mode &= ~MPOL_MODE_FLAGS;
1281 if ((unsigned int)mode >= MPOL_MAX)
1282 return -EINVAL;
1283 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1284 return -EINVAL;
1285 err = get_nodes(&nodes, nmask, maxnode);
1286 if (err)
1287 return err;
1288 return do_set_mempolicy(mode, flags, &nodes);
1289}
1290
1291SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1292 const unsigned long __user *, old_nodes,
1293 const unsigned long __user *, new_nodes)
1294{
1295 const struct cred *cred = current_cred(), *tcred;
1296 struct mm_struct *mm = NULL;
1297 struct task_struct *task;
1298 nodemask_t task_nodes;
1299 int err;
1300 nodemask_t *old;
1301 nodemask_t *new;
1302 NODEMASK_SCRATCH(scratch);
1303
1304 if (!scratch)
1305 return -ENOMEM;
1306
1307 old = &scratch->mask1;
1308 new = &scratch->mask2;
1309
1310 err = get_nodes(old, old_nodes, maxnode);
1311 if (err)
1312 goto out;
1313
1314 err = get_nodes(new, new_nodes, maxnode);
1315 if (err)
1316 goto out;
1317
1318
1319 rcu_read_lock();
1320 task = pid ? find_task_by_vpid(pid) : current;
1321 if (!task) {
1322 rcu_read_unlock();
1323 err = -ESRCH;
1324 goto out;
1325 }
1326 mm = get_task_mm(task);
1327 rcu_read_unlock();
1328
1329 err = -EINVAL;
1330 if (!mm)
1331 goto out;
1332
1333
1334
1335
1336
1337
1338
1339 rcu_read_lock();
1340 tcred = __task_cred(task);
1341 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1342 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1343 !capable(CAP_SYS_NICE)) {
1344 rcu_read_unlock();
1345 err = -EPERM;
1346 goto out;
1347 }
1348 rcu_read_unlock();
1349
1350 task_nodes = cpuset_mems_allowed(task);
1351
1352 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1353 err = -EPERM;
1354 goto out;
1355 }
1356
1357 if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1358 err = -EINVAL;
1359 goto out;
1360 }
1361
1362 err = security_task_movememory(task);
1363 if (err)
1364 goto out;
1365
1366 err = do_migrate_pages(mm, old, new,
1367 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1368out:
1369 if (mm)
1370 mmput(mm);
1371 NODEMASK_SCRATCH_FREE(scratch);
1372
1373 return err;
1374}
1375
1376
1377
1378SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1379 unsigned long __user *, nmask, unsigned long, maxnode,
1380 unsigned long, addr, unsigned long, flags)
1381{
1382 int err;
1383 int uninitialized_var(pval);
1384 nodemask_t nodes;
1385
1386 if (nmask != NULL && maxnode < MAX_NUMNODES)
1387 return -EINVAL;
1388
1389 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1390
1391 if (err)
1392 return err;
1393
1394 if (policy && put_user(pval, policy))
1395 return -EFAULT;
1396
1397 if (nmask)
1398 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1399
1400 return err;
1401}
1402
1403#ifdef CONFIG_COMPAT
1404
1405asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1406 compat_ulong_t __user *nmask,
1407 compat_ulong_t maxnode,
1408 compat_ulong_t addr, compat_ulong_t flags)
1409{
1410 long err;
1411 unsigned long __user *nm = NULL;
1412 unsigned long nr_bits, alloc_size;
1413 DECLARE_BITMAP(bm, MAX_NUMNODES);
1414
1415 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1416 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1417
1418 if (nmask)
1419 nm = compat_alloc_user_space(alloc_size);
1420
1421 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1422
1423 if (!err && nmask) {
1424 unsigned long copy_size;
1425 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1426 err = copy_from_user(bm, nm, copy_size);
1427
1428 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1429 err |= compat_put_bitmap(nmask, bm, nr_bits);
1430 }
1431
1432 return err;
1433}
1434
1435asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1436 compat_ulong_t maxnode)
1437{
1438 long err = 0;
1439 unsigned long __user *nm = NULL;
1440 unsigned long nr_bits, alloc_size;
1441 DECLARE_BITMAP(bm, MAX_NUMNODES);
1442
1443 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1444 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1445
1446 if (nmask) {
1447 err = compat_get_bitmap(bm, nmask, nr_bits);
1448 nm = compat_alloc_user_space(alloc_size);
1449 err |= copy_to_user(nm, bm, alloc_size);
1450 }
1451
1452 if (err)
1453 return -EFAULT;
1454
1455 return sys_set_mempolicy(mode, nm, nr_bits+1);
1456}
1457
1458asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1459 compat_ulong_t mode, compat_ulong_t __user *nmask,
1460 compat_ulong_t maxnode, compat_ulong_t flags)
1461{
1462 long err = 0;
1463 unsigned long __user *nm = NULL;
1464 unsigned long nr_bits, alloc_size;
1465 nodemask_t bm;
1466
1467 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1468 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1469
1470 if (nmask) {
1471 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1472 nm = compat_alloc_user_space(alloc_size);
1473 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1474 }
1475
1476 if (err)
1477 return -EFAULT;
1478
1479 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1480}
1481
1482#endif
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500struct mempolicy *get_vma_policy(struct task_struct *task,
1501 struct vm_area_struct *vma, unsigned long addr)
1502{
1503 struct mempolicy *pol = task->mempolicy;
1504
1505 if (vma) {
1506 if (vma->vm_ops && vma->vm_ops->get_policy) {
1507 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1508 addr);
1509 if (vpol)
1510 pol = vpol;
1511 } else if (vma->vm_policy)
1512 pol = vma->vm_policy;
1513 }
1514 if (!pol)
1515 pol = &default_policy;
1516 return pol;
1517}
1518
1519
1520
1521
1522
1523static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1524{
1525
1526 if (unlikely(policy->mode == MPOL_BIND) &&
1527 gfp_zone(gfp) >= policy_zone &&
1528 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1529 return &policy->v.nodes;
1530
1531 return NULL;
1532}
1533
1534
1535static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1536 int nd)
1537{
1538 switch (policy->mode) {
1539 case MPOL_PREFERRED:
1540 if (!(policy->flags & MPOL_F_LOCAL))
1541 nd = policy->v.preferred_node;
1542 break;
1543 case MPOL_BIND:
1544
1545
1546
1547
1548
1549
1550 if (unlikely(gfp & __GFP_THISNODE) &&
1551 unlikely(!node_isset(nd, policy->v.nodes)))
1552 nd = first_node(policy->v.nodes);
1553 break;
1554 default:
1555 BUG();
1556 }
1557 return node_zonelist(nd, gfp);
1558}
1559
1560
1561static unsigned interleave_nodes(struct mempolicy *policy)
1562{
1563 unsigned nid, next;
1564 struct task_struct *me = current;
1565
1566 nid = me->il_next;
1567 next = next_node(nid, policy->v.nodes);
1568 if (next >= MAX_NUMNODES)
1569 next = first_node(policy->v.nodes);
1570 if (next < MAX_NUMNODES)
1571 me->il_next = next;
1572 return nid;
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583unsigned slab_node(struct mempolicy *policy)
1584{
1585 if (!policy || policy->flags & MPOL_F_LOCAL)
1586 return numa_node_id();
1587
1588 switch (policy->mode) {
1589 case MPOL_PREFERRED:
1590
1591
1592
1593 return policy->v.preferred_node;
1594
1595 case MPOL_INTERLEAVE:
1596 return interleave_nodes(policy);
1597
1598 case MPOL_BIND: {
1599
1600
1601
1602
1603 struct zonelist *zonelist;
1604 struct zone *zone;
1605 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1606 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1607 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1608 &policy->v.nodes,
1609 &zone);
1610 return zone ? zone->node : numa_node_id();
1611 }
1612
1613 default:
1614 BUG();
1615 }
1616}
1617
1618
1619static unsigned offset_il_node(struct mempolicy *pol,
1620 struct vm_area_struct *vma, unsigned long off)
1621{
1622 unsigned nnodes = nodes_weight(pol->v.nodes);
1623 unsigned target;
1624 int c;
1625 int nid = -1;
1626
1627 if (!nnodes)
1628 return numa_node_id();
1629 target = (unsigned int)off % nnodes;
1630 c = 0;
1631 do {
1632 nid = next_node(nid, pol->v.nodes);
1633 c++;
1634 } while (c <= target);
1635 return nid;
1636}
1637
1638
1639static inline unsigned interleave_nid(struct mempolicy *pol,
1640 struct vm_area_struct *vma, unsigned long addr, int shift)
1641{
1642 if (vma) {
1643 unsigned long off;
1644
1645
1646
1647
1648
1649
1650
1651
1652 BUG_ON(shift < PAGE_SHIFT);
1653 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1654 off += (addr - vma->vm_start) >> shift;
1655 return offset_il_node(pol, vma, off);
1656 } else
1657 return interleave_nodes(pol);
1658}
1659
1660
1661
1662
1663
1664int node_random(const nodemask_t *maskp)
1665{
1666 int w, bit = -1;
1667
1668 w = nodes_weight(*maskp);
1669 if (w)
1670 bit = bitmap_ord_to_pos(maskp->bits,
1671 get_random_int() % w, MAX_NUMNODES);
1672 return bit;
1673}
1674
1675#ifdef CONFIG_HUGETLBFS
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1692 gfp_t gfp_flags, struct mempolicy **mpol,
1693 nodemask_t **nodemask)
1694{
1695 struct zonelist *zl;
1696
1697 *mpol = get_vma_policy(current, vma, addr);
1698 *nodemask = NULL;
1699
1700 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1701 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1702 huge_page_shift(hstate_vma(vma))), gfp_flags);
1703 } else {
1704 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1705 if ((*mpol)->mode == MPOL_BIND)
1706 *nodemask = &(*mpol)->v.nodes;
1707 }
1708 return zl;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727bool init_nodemask_of_mempolicy(nodemask_t *mask)
1728{
1729 struct mempolicy *mempolicy;
1730 int nid;
1731
1732 if (!(mask && current->mempolicy))
1733 return false;
1734
1735 task_lock(current);
1736 mempolicy = current->mempolicy;
1737 switch (mempolicy->mode) {
1738 case MPOL_PREFERRED:
1739 if (mempolicy->flags & MPOL_F_LOCAL)
1740 nid = numa_node_id();
1741 else
1742 nid = mempolicy->v.preferred_node;
1743 init_nodemask_of_node(mask, nid);
1744 break;
1745
1746 case MPOL_BIND:
1747
1748 case MPOL_INTERLEAVE:
1749 *mask = mempolicy->v.nodes;
1750 break;
1751
1752 default:
1753 BUG();
1754 }
1755 task_unlock(current);
1756
1757 return true;
1758}
1759#endif
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1772 const nodemask_t *mask)
1773{
1774 struct mempolicy *mempolicy;
1775 bool ret = true;
1776
1777 if (!mask)
1778 return ret;
1779 task_lock(tsk);
1780 mempolicy = tsk->mempolicy;
1781 if (!mempolicy)
1782 goto out;
1783
1784 switch (mempolicy->mode) {
1785 case MPOL_PREFERRED:
1786
1787
1788
1789
1790
1791
1792 break;
1793 case MPOL_BIND:
1794 case MPOL_INTERLEAVE:
1795 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1796 break;
1797 default:
1798 BUG();
1799 }
1800out:
1801 task_unlock(tsk);
1802 return ret;
1803}
1804
1805
1806
1807static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1808 unsigned nid)
1809{
1810 struct zonelist *zl;
1811 struct page *page;
1812
1813 zl = node_zonelist(nid, gfp);
1814 page = __alloc_pages(gfp, order, zl);
1815 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1816 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1817 return page;
1818}
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843struct page *
1844alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1845 unsigned long addr, int node)
1846{
1847 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1848 struct zonelist *zl;
1849 struct page *page;
1850
1851 get_mems_allowed();
1852 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1853 unsigned nid;
1854
1855 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1856 mpol_cond_put(pol);
1857 page = alloc_page_interleave(gfp, order, nid);
1858 put_mems_allowed();
1859 return page;
1860 }
1861 zl = policy_zonelist(gfp, pol, node);
1862 if (unlikely(mpol_needs_cond_ref(pol))) {
1863
1864
1865
1866 struct page *page = __alloc_pages_nodemask(gfp, order,
1867 zl, policy_nodemask(gfp, pol));
1868 __mpol_put(pol);
1869 put_mems_allowed();
1870 return page;
1871 }
1872
1873
1874
1875 page = __alloc_pages_nodemask(gfp, order, zl,
1876 policy_nodemask(gfp, pol));
1877 put_mems_allowed();
1878 return page;
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1901{
1902 struct mempolicy *pol = current->mempolicy;
1903 struct page *page;
1904
1905 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1906 pol = &default_policy;
1907
1908 get_mems_allowed();
1909
1910
1911
1912
1913 if (pol->mode == MPOL_INTERLEAVE)
1914 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1915 else
1916 page = __alloc_pages_nodemask(gfp, order,
1917 policy_zonelist(gfp, pol, numa_node_id()),
1918 policy_nodemask(gfp, pol));
1919 put_mems_allowed();
1920 return page;
1921}
1922EXPORT_SYMBOL(alloc_pages_current);
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936struct mempolicy *__mpol_dup(struct mempolicy *old)
1937{
1938 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1939
1940 if (!new)
1941 return ERR_PTR(-ENOMEM);
1942
1943
1944 if (old == current->mempolicy) {
1945 task_lock(current);
1946 *new = *old;
1947 task_unlock(current);
1948 } else
1949 *new = *old;
1950
1951 rcu_read_lock();
1952 if (current_cpuset_is_being_rebound()) {
1953 nodemask_t mems = cpuset_mems_allowed(current);
1954 if (new->flags & MPOL_F_REBINDING)
1955 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1956 else
1957 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1958 }
1959 rcu_read_unlock();
1960 atomic_set(&new->refcnt, 1);
1961 return new;
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1975 struct mempolicy *frompol)
1976{
1977 if (!mpol_needs_cond_ref(frompol))
1978 return frompol;
1979
1980 *tompol = *frompol;
1981 tompol->flags &= ~MPOL_F_SHARED;
1982 __mpol_put(frompol);
1983 return tompol;
1984}
1985
1986
1987bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1988{
1989 if (!a || !b)
1990 return false;
1991 if (a->mode != b->mode)
1992 return false;
1993 if (a->flags != b->flags)
1994 return false;
1995 if (mpol_store_user_nodemask(a))
1996 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1997 return false;
1998
1999 switch (a->mode) {
2000 case MPOL_BIND:
2001
2002 case MPOL_INTERLEAVE:
2003 return !!nodes_equal(a->v.nodes, b->v.nodes);
2004 case MPOL_PREFERRED:
2005 return a->v.preferred_node == b->v.preferred_node;
2006 default:
2007 BUG();
2008 return false;
2009 }
2010}
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023static struct sp_node *
2024sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2025{
2026 struct rb_node *n = sp->root.rb_node;
2027
2028 while (n) {
2029 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2030
2031 if (start >= p->end)
2032 n = n->rb_right;
2033 else if (end <= p->start)
2034 n = n->rb_left;
2035 else
2036 break;
2037 }
2038 if (!n)
2039 return NULL;
2040 for (;;) {
2041 struct sp_node *w = NULL;
2042 struct rb_node *prev = rb_prev(n);
2043 if (!prev)
2044 break;
2045 w = rb_entry(prev, struct sp_node, nd);
2046 if (w->end <= start)
2047 break;
2048 n = prev;
2049 }
2050 return rb_entry(n, struct sp_node, nd);
2051}
2052
2053
2054
2055static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2056{
2057 struct rb_node **p = &sp->root.rb_node;
2058 struct rb_node *parent = NULL;
2059 struct sp_node *nd;
2060
2061 while (*p) {
2062 parent = *p;
2063 nd = rb_entry(parent, struct sp_node, nd);
2064 if (new->start < nd->start)
2065 p = &(*p)->rb_left;
2066 else if (new->end > nd->end)
2067 p = &(*p)->rb_right;
2068 else
2069 BUG();
2070 }
2071 rb_link_node(&new->nd, parent, p);
2072 rb_insert_color(&new->nd, &sp->root);
2073 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2074 new->policy ? new->policy->mode : 0);
2075}
2076
2077
2078struct mempolicy *
2079mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2080{
2081 struct mempolicy *pol = NULL;
2082 struct sp_node *sn;
2083
2084 if (!sp->root.rb_node)
2085 return NULL;
2086 spin_lock(&sp->lock);
2087 sn = sp_lookup(sp, idx, idx+1);
2088 if (sn) {
2089 mpol_get(sn->policy);
2090 pol = sn->policy;
2091 }
2092 spin_unlock(&sp->lock);
2093 return pol;
2094}
2095
2096static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2097{
2098 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2099 rb_erase(&n->nd, &sp->root);
2100 mpol_put(n->policy);
2101 kmem_cache_free(sn_cache, n);
2102}
2103
2104static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2105 struct mempolicy *pol)
2106{
2107 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2108
2109 if (!n)
2110 return NULL;
2111 n->start = start;
2112 n->end = end;
2113 mpol_get(pol);
2114 pol->flags |= MPOL_F_SHARED;
2115 n->policy = pol;
2116 return n;
2117}
2118
2119
2120static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2121 unsigned long end, struct sp_node *new)
2122{
2123 struct sp_node *n, *new2 = NULL;
2124
2125restart:
2126 spin_lock(&sp->lock);
2127 n = sp_lookup(sp, start, end);
2128
2129 while (n && n->start < end) {
2130 struct rb_node *next = rb_next(&n->nd);
2131 if (n->start >= start) {
2132 if (n->end <= end)
2133 sp_delete(sp, n);
2134 else
2135 n->start = end;
2136 } else {
2137
2138 if (n->end > end) {
2139 if (!new2) {
2140 spin_unlock(&sp->lock);
2141 new2 = sp_alloc(end, n->end, n->policy);
2142 if (!new2)
2143 return -ENOMEM;
2144 goto restart;
2145 }
2146 n->end = start;
2147 sp_insert(sp, new2);
2148 new2 = NULL;
2149 break;
2150 } else
2151 n->end = start;
2152 }
2153 if (!next)
2154 break;
2155 n = rb_entry(next, struct sp_node, nd);
2156 }
2157 if (new)
2158 sp_insert(sp, new);
2159 spin_unlock(&sp->lock);
2160 if (new2) {
2161 mpol_put(new2->policy);
2162 kmem_cache_free(sn_cache, new2);
2163 }
2164 return 0;
2165}
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2178{
2179 int ret;
2180
2181 sp->root = RB_ROOT;
2182 spin_lock_init(&sp->lock);
2183
2184 if (mpol) {
2185 struct vm_area_struct pvma;
2186 struct mempolicy *new;
2187 NODEMASK_SCRATCH(scratch);
2188
2189 if (!scratch)
2190 goto put_mpol;
2191
2192 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2193 if (IS_ERR(new))
2194 goto free_scratch;
2195
2196 task_lock(current);
2197 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2198 task_unlock(current);
2199 if (ret)
2200 goto put_new;
2201
2202
2203 memset(&pvma, 0, sizeof(struct vm_area_struct));
2204 pvma.vm_end = TASK_SIZE;
2205 mpol_set_shared_policy(sp, &pvma, new);
2206
2207put_new:
2208 mpol_put(new);
2209free_scratch:
2210 NODEMASK_SCRATCH_FREE(scratch);
2211put_mpol:
2212 mpol_put(mpol);
2213 }
2214}
2215
2216int mpol_set_shared_policy(struct shared_policy *info,
2217 struct vm_area_struct *vma, struct mempolicy *npol)
2218{
2219 int err;
2220 struct sp_node *new = NULL;
2221 unsigned long sz = vma_pages(vma);
2222
2223 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2224 vma->vm_pgoff,
2225 sz, npol ? npol->mode : -1,
2226 npol ? npol->flags : -1,
2227 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2228
2229 if (npol) {
2230 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2231 if (!new)
2232 return -ENOMEM;
2233 }
2234 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2235 if (err && new)
2236 kmem_cache_free(sn_cache, new);
2237 return err;
2238}
2239
2240
2241void mpol_free_shared_policy(struct shared_policy *p)
2242{
2243 struct sp_node *n;
2244 struct rb_node *next;
2245
2246 if (!p->root.rb_node)
2247 return;
2248 spin_lock(&p->lock);
2249 next = rb_first(&p->root);
2250 while (next) {
2251 n = rb_entry(next, struct sp_node, nd);
2252 next = rb_next(&n->nd);
2253 rb_erase(&n->nd, &p->root);
2254 mpol_put(n->policy);
2255 kmem_cache_free(sn_cache, n);
2256 }
2257 spin_unlock(&p->lock);
2258}
2259
2260
2261void __init numa_policy_init(void)
2262{
2263 nodemask_t interleave_nodes;
2264 unsigned long largest = 0;
2265 int nid, prefer = 0;
2266
2267 policy_cache = kmem_cache_create("numa_policy",
2268 sizeof(struct mempolicy),
2269 0, SLAB_PANIC, NULL);
2270
2271 sn_cache = kmem_cache_create("shared_policy_node",
2272 sizeof(struct sp_node),
2273 0, SLAB_PANIC, NULL);
2274
2275
2276
2277
2278
2279
2280 nodes_clear(interleave_nodes);
2281 for_each_node_state(nid, N_HIGH_MEMORY) {
2282 unsigned long total_pages = node_present_pages(nid);
2283
2284
2285 if (largest < total_pages) {
2286 largest = total_pages;
2287 prefer = nid;
2288 }
2289
2290
2291 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2292 node_set(nid, interleave_nodes);
2293 }
2294
2295
2296 if (unlikely(nodes_empty(interleave_nodes)))
2297 node_set(prefer, interleave_nodes);
2298
2299 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2300 printk("numa_policy_init: interleaving failed\n");
2301}
2302
2303
2304void numa_default_policy(void)
2305{
2306 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317#define MPOL_LOCAL MPOL_MAX
2318static const char * const policy_modes[] =
2319{
2320 [MPOL_DEFAULT] = "default",
2321 [MPOL_PREFERRED] = "prefer",
2322 [MPOL_BIND] = "bind",
2323 [MPOL_INTERLEAVE] = "interleave",
2324 [MPOL_LOCAL] = "local"
2325};
2326
2327
2328#ifdef CONFIG_TMPFS
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2348{
2349 struct mempolicy *new = NULL;
2350 unsigned short mode;
2351 unsigned short uninitialized_var(mode_flags);
2352 nodemask_t nodes;
2353 char *nodelist = strchr(str, ':');
2354 char *flags = strchr(str, '=');
2355 int err = 1;
2356
2357 if (nodelist) {
2358
2359 *nodelist++ = '\0';
2360 if (nodelist_parse(nodelist, nodes))
2361 goto out;
2362 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2363 goto out;
2364 } else
2365 nodes_clear(nodes);
2366
2367 if (flags)
2368 *flags++ = '\0';
2369
2370 for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2371 if (!strcmp(str, policy_modes[mode])) {
2372 break;
2373 }
2374 }
2375 if (mode > MPOL_LOCAL)
2376 goto out;
2377
2378 switch (mode) {
2379 case MPOL_PREFERRED:
2380
2381
2382
2383 if (nodelist) {
2384 char *rest = nodelist;
2385 while (isdigit(*rest))
2386 rest++;
2387 if (*rest)
2388 goto out;
2389 }
2390 break;
2391 case MPOL_INTERLEAVE:
2392
2393
2394
2395 if (!nodelist)
2396 nodes = node_states[N_HIGH_MEMORY];
2397 break;
2398 case MPOL_LOCAL:
2399
2400
2401
2402 if (nodelist)
2403 goto out;
2404 mode = MPOL_PREFERRED;
2405 break;
2406 case MPOL_DEFAULT:
2407
2408
2409
2410 if (!nodelist)
2411 err = 0;
2412 goto out;
2413 case MPOL_BIND:
2414
2415
2416
2417 if (!nodelist)
2418 goto out;
2419 }
2420
2421 mode_flags = 0;
2422 if (flags) {
2423
2424
2425
2426
2427 if (!strcmp(flags, "static"))
2428 mode_flags |= MPOL_F_STATIC_NODES;
2429 else if (!strcmp(flags, "relative"))
2430 mode_flags |= MPOL_F_RELATIVE_NODES;
2431 else
2432 goto out;
2433 }
2434
2435 new = mpol_new(mode, mode_flags, &nodes);
2436 if (IS_ERR(new))
2437 goto out;
2438
2439 if (no_context) {
2440
2441 new->w.user_nodemask = nodes;
2442 } else {
2443 int ret;
2444 NODEMASK_SCRATCH(scratch);
2445 if (scratch) {
2446 task_lock(current);
2447 ret = mpol_set_nodemask(new, &nodes, scratch);
2448 task_unlock(current);
2449 } else
2450 ret = -ENOMEM;
2451 NODEMASK_SCRATCH_FREE(scratch);
2452 if (ret) {
2453 mpol_put(new);
2454 goto out;
2455 }
2456 }
2457 err = 0;
2458
2459out:
2460
2461 if (nodelist)
2462 *--nodelist = ':';
2463 if (flags)
2464 *--flags = '=';
2465 if (!err)
2466 *mpol = new;
2467 return err;
2468}
2469#endif
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2483{
2484 char *p = buffer;
2485 int l;
2486 nodemask_t nodes;
2487 unsigned short mode;
2488 unsigned short flags = pol ? pol->flags : 0;
2489
2490
2491
2492
2493 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2494
2495 if (!pol || pol == &default_policy)
2496 mode = MPOL_DEFAULT;
2497 else
2498 mode = pol->mode;
2499
2500 switch (mode) {
2501 case MPOL_DEFAULT:
2502 nodes_clear(nodes);
2503 break;
2504
2505 case MPOL_PREFERRED:
2506 nodes_clear(nodes);
2507 if (flags & MPOL_F_LOCAL)
2508 mode = MPOL_LOCAL;
2509 else
2510 node_set(pol->v.preferred_node, nodes);
2511 break;
2512
2513 case MPOL_BIND:
2514
2515 case MPOL_INTERLEAVE:
2516 if (no_context)
2517 nodes = pol->w.user_nodemask;
2518 else
2519 nodes = pol->v.nodes;
2520 break;
2521
2522 default:
2523 BUG();
2524 }
2525
2526 l = strlen(policy_modes[mode]);
2527 if (buffer + maxlen < p + l + 1)
2528 return -ENOSPC;
2529
2530 strcpy(p, policy_modes[mode]);
2531 p += l;
2532
2533 if (flags & MPOL_MODE_FLAGS) {
2534 if (buffer + maxlen < p + 2)
2535 return -ENOSPC;
2536 *p++ = '=';
2537
2538
2539
2540
2541 if (flags & MPOL_F_STATIC_NODES)
2542 p += snprintf(p, buffer + maxlen - p, "static");
2543 else if (flags & MPOL_F_RELATIVE_NODES)
2544 p += snprintf(p, buffer + maxlen - p, "relative");
2545 }
2546
2547 if (!nodes_empty(nodes)) {
2548 if (buffer + maxlen < p + 2)
2549 return -ENOSPC;
2550 *p++ = ':';
2551 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2552 }
2553 return p - buffer;
2554}
2555