1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/module.h>
79#include <linux/nsproxy.h>
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
83#include <linux/swap.h>
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
86#include <linux/migrate.h>
87#include <linux/ksm.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92#include <linux/mm_inline.h>
93
94#include <asm/tlbflush.h>
95#include <asm/uaccess.h>
96
97#include "internal.h"
98
99
100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
102#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)
103
104static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache;
106
107
108
109enum zone_type policy_zone = 0;
110
111
112
113
114struct mempolicy default_policy = {
115 .refcnt = ATOMIC_INIT(1),
116 .mode = MPOL_PREFERRED,
117 .flags = MPOL_F_LOCAL,
118};
119
120static const struct mempolicy_operations {
121 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137 enum mpol_rebind_step step);
138} mpol_ops[MPOL_MAX];
139
140
141static int is_valid_nodemask(const nodemask_t *nodemask)
142{
143 int nd, k;
144
145 for_each_node_mask(nd, *nodemask) {
146 struct zone *z;
147
148 for (k = 0; k <= policy_zone; k++) {
149 z = &NODE_DATA(nd)->node_zones[k];
150 if (z->present_pages > 0)
151 return 1;
152 }
153 }
154
155 return 0;
156}
157
158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159{
160 return pol->flags & MPOL_MODE_FLAGS;
161}
162
163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164 const nodemask_t *rel)
165{
166 nodemask_t tmp;
167 nodes_fold(tmp, *orig, nodes_weight(*rel));
168 nodes_onto(*ret, tmp, *rel);
169}
170
171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172{
173 if (nodes_empty(*nodes))
174 return -EINVAL;
175 pol->v.nodes = *nodes;
176 return 0;
177}
178
179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
180{
181 if (!nodes)
182 pol->flags |= MPOL_F_LOCAL;
183 else if (nodes_empty(*nodes))
184 return -EINVAL;
185 else
186 pol->v.preferred_node = first_node(*nodes);
187 return 0;
188}
189
190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191{
192 if (!is_valid_nodemask(nodes))
193 return -EINVAL;
194 pol->v.nodes = *nodes;
195 return 0;
196}
197
198
199
200
201
202
203
204
205
206
207static int mpol_set_nodemask(struct mempolicy *pol,
208 const nodemask_t *nodes, struct nodemask_scratch *nsc)
209{
210 int ret;
211
212
213 if (pol == NULL)
214 return 0;
215
216 nodes_and(nsc->mask1,
217 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
218
219 VM_BUG_ON(!nodes);
220 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221 nodes = NULL;
222 else {
223 if (pol->flags & MPOL_F_RELATIVE_NODES)
224 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225 else
226 nodes_and(nsc->mask2, *nodes, nsc->mask1);
227
228 if (mpol_store_user_nodemask(pol))
229 pol->w.user_nodemask = *nodes;
230 else
231 pol->w.cpuset_mems_allowed =
232 cpuset_current_mems_allowed;
233 }
234
235 if (nodes)
236 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 else
238 ret = mpol_ops[pol->mode].create(pol, NULL);
239 return ret;
240}
241
242
243
244
245
246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247 nodemask_t *nodes)
248{
249 struct mempolicy *policy;
250
251 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253
254 if (mode == MPOL_DEFAULT) {
255 if (nodes && !nodes_empty(*nodes))
256 return ERR_PTR(-EINVAL);
257 return NULL;
258 }
259 VM_BUG_ON(!nodes);
260
261
262
263
264
265
266 if (mode == MPOL_PREFERRED) {
267 if (nodes_empty(*nodes)) {
268 if (((flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES)))
270 return ERR_PTR(-EINVAL);
271 }
272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
278 policy->mode = mode;
279 policy->flags = flags;
280
281 return policy;
282}
283
284
285void __mpol_put(struct mempolicy *p)
286{
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
289 kmem_cache_free(policy_cache, p);
290}
291
292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293 enum mpol_rebind_step step)
294{
295}
296
297
298
299
300
301
302
303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
305{
306 nodemask_t tmp;
307
308 if (pol->flags & MPOL_F_STATIC_NODES)
309 nodes_and(tmp, pol->w.user_nodemask, *nodes);
310 else if (pol->flags & MPOL_F_RELATIVE_NODES)
311 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312 else {
313
314
315
316
317 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318 nodes_remap(tmp, pol->v.nodes,
319 pol->w.cpuset_mems_allowed, *nodes);
320 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321 } else if (step == MPOL_REBIND_STEP2) {
322 tmp = pol->w.cpuset_mems_allowed;
323 pol->w.cpuset_mems_allowed = *nodes;
324 } else
325 BUG();
326 }
327
328 if (nodes_empty(tmp))
329 tmp = *nodes;
330
331 if (step == MPOL_REBIND_STEP1)
332 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334 pol->v.nodes = tmp;
335 else
336 BUG();
337
338 if (!node_isset(current->il_next, tmp)) {
339 current->il_next = next_node(current->il_next, tmp);
340 if (current->il_next >= MAX_NUMNODES)
341 current->il_next = first_node(tmp);
342 if (current->il_next >= MAX_NUMNODES)
343 current->il_next = numa_node_id();
344 }
345}
346
347static void mpol_rebind_preferred(struct mempolicy *pol,
348 const nodemask_t *nodes,
349 enum mpol_rebind_step step)
350{
351 nodemask_t tmp;
352
353 if (pol->flags & MPOL_F_STATIC_NODES) {
354 int node = first_node(pol->w.user_nodemask);
355
356 if (node_isset(node, *nodes)) {
357 pol->v.preferred_node = node;
358 pol->flags &= ~MPOL_F_LOCAL;
359 } else
360 pol->flags |= MPOL_F_LOCAL;
361 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 pol->v.preferred_node = first_node(tmp);
364 } else if (!(pol->flags & MPOL_F_LOCAL)) {
365 pol->v.preferred_node = node_remap(pol->v.preferred_node,
366 pol->w.cpuset_mems_allowed,
367 *nodes);
368 pol->w.cpuset_mems_allowed = *nodes;
369 }
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389 enum mpol_rebind_step step)
390{
391 if (!pol)
392 return;
393 if (!mpol_store_user_nodemask(pol) && step == 0 &&
394 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 return;
396
397 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398 return;
399
400 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401 BUG();
402
403 if (step == MPOL_REBIND_STEP1)
404 pol->flags |= MPOL_F_REBINDING;
405 else if (step == MPOL_REBIND_STEP2)
406 pol->flags &= ~MPOL_F_REBINDING;
407 else if (step >= MPOL_REBIND_NSTEP)
408 BUG();
409
410 mpol_ops[pol->mode].rebind(pol, newmask, step);
411}
412
413
414
415
416
417
418
419
420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421 enum mpol_rebind_step step)
422{
423 mpol_rebind_policy(tsk->mempolicy, new, step);
424}
425
426
427
428
429
430
431
432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433{
434 struct vm_area_struct *vma;
435
436 down_write(&mm->mmap_sem);
437 for (vma = mm->mmap; vma; vma = vma->vm_next)
438 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
439 up_write(&mm->mmap_sem);
440}
441
442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 [MPOL_DEFAULT] = {
444 .rebind = mpol_rebind_default,
445 },
446 [MPOL_INTERLEAVE] = {
447 .create = mpol_new_interleave,
448 .rebind = mpol_rebind_nodemask,
449 },
450 [MPOL_PREFERRED] = {
451 .create = mpol_new_preferred,
452 .rebind = mpol_rebind_preferred,
453 },
454 [MPOL_BIND] = {
455 .create = mpol_new_bind,
456 .rebind = mpol_rebind_nodemask,
457 },
458};
459
460static void gather_stats(struct page *, void *, int pte_dirty);
461static void migrate_page_add(struct page *page, struct list_head *pagelist,
462 unsigned long flags);
463
464
465static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
466 unsigned long addr, unsigned long end,
467 const nodemask_t *nodes, unsigned long flags,
468 void *private)
469{
470 pte_t *orig_pte;
471 pte_t *pte;
472 spinlock_t *ptl;
473
474 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
475 do {
476 struct page *page;
477 int nid;
478
479 if (!pte_present(*pte))
480 continue;
481 page = vm_normal_page(vma, addr, *pte);
482 if (!page)
483 continue;
484
485
486
487
488
489 if (PageReserved(page) || PageKsm(page))
490 continue;
491 nid = page_to_nid(page);
492 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
493 continue;
494
495 if (flags & MPOL_MF_STATS)
496 gather_stats(page, private, pte_dirty(*pte));
497 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
498 migrate_page_add(page, private, flags);
499 else
500 break;
501 } while (pte++, addr += PAGE_SIZE, addr != end);
502 pte_unmap_unlock(orig_pte, ptl);
503 return addr != end;
504}
505
506static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
507 unsigned long addr, unsigned long end,
508 const nodemask_t *nodes, unsigned long flags,
509 void *private)
510{
511 pmd_t *pmd;
512 unsigned long next;
513
514 pmd = pmd_offset(pud, addr);
515 do {
516 next = pmd_addr_end(addr, end);
517 if (pmd_none_or_clear_bad(pmd))
518 continue;
519 if (check_pte_range(vma, pmd, addr, next, nodes,
520 flags, private))
521 return -EIO;
522 } while (pmd++, addr = next, addr != end);
523 return 0;
524}
525
526static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
527 unsigned long addr, unsigned long end,
528 const nodemask_t *nodes, unsigned long flags,
529 void *private)
530{
531 pud_t *pud;
532 unsigned long next;
533
534 pud = pud_offset(pgd, addr);
535 do {
536 next = pud_addr_end(addr, end);
537 if (pud_none_or_clear_bad(pud))
538 continue;
539 if (check_pmd_range(vma, pud, addr, next, nodes,
540 flags, private))
541 return -EIO;
542 } while (pud++, addr = next, addr != end);
543 return 0;
544}
545
546static inline int check_pgd_range(struct vm_area_struct *vma,
547 unsigned long addr, unsigned long end,
548 const nodemask_t *nodes, unsigned long flags,
549 void *private)
550{
551 pgd_t *pgd;
552 unsigned long next;
553
554 pgd = pgd_offset(vma->vm_mm, addr);
555 do {
556 next = pgd_addr_end(addr, end);
557 if (pgd_none_or_clear_bad(pgd))
558 continue;
559 if (check_pud_range(vma, pgd, addr, next, nodes,
560 flags, private))
561 return -EIO;
562 } while (pgd++, addr = next, addr != end);
563 return 0;
564}
565
566
567
568
569
570
571static struct vm_area_struct *
572check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
573 const nodemask_t *nodes, unsigned long flags, void *private)
574{
575 int err;
576 struct vm_area_struct *first, *vma, *prev;
577
578
579 first = find_vma(mm, start);
580 if (!first)
581 return ERR_PTR(-EFAULT);
582 prev = NULL;
583 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
584 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
585 if (!vma->vm_next && vma->vm_end < end)
586 return ERR_PTR(-EFAULT);
587 if (prev && prev->vm_end < vma->vm_start)
588 return ERR_PTR(-EFAULT);
589 }
590 if (!is_vm_hugetlb_page(vma) &&
591 ((flags & MPOL_MF_STRICT) ||
592 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
593 vma_migratable(vma)))) {
594 unsigned long endvma = vma->vm_end;
595
596 if (endvma > end)
597 endvma = end;
598 if (vma->vm_start > start)
599 start = vma->vm_start;
600 err = check_pgd_range(vma, start, endvma, nodes,
601 flags, private);
602 if (err) {
603 first = ERR_PTR(err);
604 break;
605 }
606 }
607 prev = vma;
608 }
609 return first;
610}
611
612
613static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
614{
615 int err = 0;
616 struct mempolicy *old = vma->vm_policy;
617
618 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
619 vma->vm_start, vma->vm_end, vma->vm_pgoff,
620 vma->vm_ops, vma->vm_file,
621 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
622
623 if (vma->vm_ops && vma->vm_ops->set_policy)
624 err = vma->vm_ops->set_policy(vma, new);
625 if (!err) {
626 mpol_get(new);
627 vma->vm_policy = new;
628 mpol_put(old);
629 }
630 return err;
631}
632
633
634static int mbind_range(struct mm_struct *mm, unsigned long start,
635 unsigned long end, struct mempolicy *new_pol)
636{
637 struct vm_area_struct *next;
638 struct vm_area_struct *prev;
639 struct vm_area_struct *vma;
640 int err = 0;
641 pgoff_t pgoff;
642 unsigned long vmstart;
643 unsigned long vmend;
644
645 vma = find_vma_prev(mm, start, &prev);
646 if (!vma || vma->vm_start > start)
647 return -EFAULT;
648
649 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
650 next = vma->vm_next;
651 vmstart = max(start, vma->vm_start);
652 vmend = min(end, vma->vm_end);
653
654 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
655 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
656 vma->anon_vma, vma->vm_file, pgoff, new_pol);
657 if (prev) {
658 vma = prev;
659 next = vma->vm_next;
660 continue;
661 }
662 if (vma->vm_start != vmstart) {
663 err = split_vma(vma->vm_mm, vma, vmstart, 1);
664 if (err)
665 goto out;
666 }
667 if (vma->vm_end != vmend) {
668 err = split_vma(vma->vm_mm, vma, vmend, 0);
669 if (err)
670 goto out;
671 }
672 err = policy_vma(vma, new_pol);
673 if (err)
674 goto out;
675 }
676
677 out:
678 return err;
679}
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698void mpol_fix_fork_child_flag(struct task_struct *p)
699{
700 if (p->mempolicy)
701 p->flags |= PF_MEMPOLICY;
702 else
703 p->flags &= ~PF_MEMPOLICY;
704}
705
706static void mpol_set_task_struct_flag(void)
707{
708 mpol_fix_fork_child_flag(current);
709}
710
711
712static long do_set_mempolicy(unsigned short mode, unsigned short flags,
713 nodemask_t *nodes)
714{
715 struct mempolicy *new, *old;
716 struct mm_struct *mm = current->mm;
717 NODEMASK_SCRATCH(scratch);
718 int ret;
719
720 if (!scratch)
721 return -ENOMEM;
722
723 new = mpol_new(mode, flags, nodes);
724 if (IS_ERR(new)) {
725 ret = PTR_ERR(new);
726 goto out;
727 }
728
729
730
731
732
733
734 if (mm)
735 down_write(&mm->mmap_sem);
736 task_lock(current);
737 ret = mpol_set_nodemask(new, nodes, scratch);
738 if (ret) {
739 task_unlock(current);
740 if (mm)
741 up_write(&mm->mmap_sem);
742 mpol_put(new);
743 goto out;
744 }
745 old = current->mempolicy;
746 current->mempolicy = new;
747 mpol_set_task_struct_flag();
748 if (new && new->mode == MPOL_INTERLEAVE &&
749 nodes_weight(new->v.nodes))
750 current->il_next = first_node(new->v.nodes);
751 task_unlock(current);
752 if (mm)
753 up_write(&mm->mmap_sem);
754
755 mpol_put(old);
756 ret = 0;
757out:
758 NODEMASK_SCRATCH_FREE(scratch);
759 return ret;
760}
761
762
763
764
765
766
767static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
768{
769 nodes_clear(*nodes);
770 if (p == &default_policy)
771 return;
772
773 switch (p->mode) {
774 case MPOL_BIND:
775
776 case MPOL_INTERLEAVE:
777 *nodes = p->v.nodes;
778 break;
779 case MPOL_PREFERRED:
780 if (!(p->flags & MPOL_F_LOCAL))
781 node_set(p->v.preferred_node, *nodes);
782
783 break;
784 default:
785 BUG();
786 }
787}
788
789static int lookup_node(struct mm_struct *mm, unsigned long addr)
790{
791 struct page *p;
792 int err;
793
794 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
795 if (err >= 0) {
796 err = page_to_nid(p);
797 put_page(p);
798 }
799 return err;
800}
801
802
803static long do_get_mempolicy(int *policy, nodemask_t *nmask,
804 unsigned long addr, unsigned long flags)
805{
806 int err;
807 struct mm_struct *mm = current->mm;
808 struct vm_area_struct *vma = NULL;
809 struct mempolicy *pol = current->mempolicy;
810
811 if (flags &
812 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
813 return -EINVAL;
814
815 if (flags & MPOL_F_MEMS_ALLOWED) {
816 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
817 return -EINVAL;
818 *policy = 0;
819 task_lock(current);
820 *nmask = cpuset_current_mems_allowed;
821 task_unlock(current);
822 return 0;
823 }
824
825 if (flags & MPOL_F_ADDR) {
826
827
828
829
830
831 down_read(&mm->mmap_sem);
832 vma = find_vma_intersection(mm, addr, addr+1);
833 if (!vma) {
834 up_read(&mm->mmap_sem);
835 return -EFAULT;
836 }
837 if (vma->vm_ops && vma->vm_ops->get_policy)
838 pol = vma->vm_ops->get_policy(vma, addr);
839 else
840 pol = vma->vm_policy;
841 } else if (addr)
842 return -EINVAL;
843
844 if (!pol)
845 pol = &default_policy;
846
847 if (flags & MPOL_F_NODE) {
848 if (flags & MPOL_F_ADDR) {
849 err = lookup_node(mm, addr);
850 if (err < 0)
851 goto out;
852 *policy = err;
853 } else if (pol == current->mempolicy &&
854 pol->mode == MPOL_INTERLEAVE) {
855 *policy = current->il_next;
856 } else {
857 err = -EINVAL;
858 goto out;
859 }
860 } else {
861 *policy = pol == &default_policy ? MPOL_DEFAULT :
862 pol->mode;
863
864
865
866
867 *policy |= (pol->flags & MPOL_MODE_FLAGS);
868 }
869
870 if (vma) {
871 up_read(¤t->mm->mmap_sem);
872 vma = NULL;
873 }
874
875 err = 0;
876 if (nmask) {
877 if (mpol_store_user_nodemask(pol)) {
878 *nmask = pol->w.user_nodemask;
879 } else {
880 task_lock(current);
881 get_policy_nodemask(pol, nmask);
882 task_unlock(current);
883 }
884 }
885
886 out:
887 mpol_cond_put(pol);
888 if (vma)
889 up_read(¤t->mm->mmap_sem);
890 return err;
891}
892
893#ifdef CONFIG_MIGRATION
894
895
896
897static void migrate_page_add(struct page *page, struct list_head *pagelist,
898 unsigned long flags)
899{
900
901
902
903 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
904 if (!isolate_lru_page(page)) {
905 list_add_tail(&page->lru, pagelist);
906 inc_zone_page_state(page, NR_ISOLATED_ANON +
907 page_is_file_cache(page));
908 }
909 }
910}
911
912static struct page *new_node_page(struct page *page, unsigned long node, int **x)
913{
914 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
915}
916
917
918
919
920
921static int migrate_to_node(struct mm_struct *mm, int source, int dest,
922 int flags)
923{
924 nodemask_t nmask;
925 LIST_HEAD(pagelist);
926 int err = 0;
927
928 nodes_clear(nmask);
929 node_set(source, nmask);
930
931 check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
932 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
933
934 if (!list_empty(&pagelist))
935 err = migrate_pages(&pagelist, new_node_page, dest, 0);
936
937 return err;
938}
939
940
941
942
943
944
945
946int do_migrate_pages(struct mm_struct *mm,
947 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
948{
949 int busy = 0;
950 int err;
951 nodemask_t tmp;
952
953 err = migrate_prep();
954 if (err)
955 return err;
956
957 down_read(&mm->mmap_sem);
958
959 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
960 if (err)
961 goto out;
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994 tmp = *from_nodes;
995 while (!nodes_empty(tmp)) {
996 int s,d;
997 int source = -1;
998 int dest = 0;
999
1000 for_each_node_mask(s, tmp) {
1001 d = node_remap(s, *from_nodes, *to_nodes);
1002 if (s == d)
1003 continue;
1004
1005 source = s;
1006 dest = d;
1007
1008
1009 if (!node_isset(dest, tmp))
1010 break;
1011 }
1012 if (source == -1)
1013 break;
1014
1015 node_clear(source, tmp);
1016 err = migrate_to_node(mm, source, dest, flags);
1017 if (err > 0)
1018 busy += err;
1019 if (err < 0)
1020 break;
1021 }
1022out:
1023 up_read(&mm->mmap_sem);
1024 if (err < 0)
1025 return err;
1026 return busy;
1027
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1038{
1039 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1040 unsigned long uninitialized_var(address);
1041
1042 while (vma) {
1043 address = page_address_in_vma(page, vma);
1044 if (address != -EFAULT)
1045 break;
1046 vma = vma->vm_next;
1047 }
1048
1049
1050
1051
1052 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1053}
1054#else
1055
1056static void migrate_page_add(struct page *page, struct list_head *pagelist,
1057 unsigned long flags)
1058{
1059}
1060
1061int do_migrate_pages(struct mm_struct *mm,
1062 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1063{
1064 return -ENOSYS;
1065}
1066
1067static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1068{
1069 return NULL;
1070}
1071#endif
1072
1073static long do_mbind(unsigned long start, unsigned long len,
1074 unsigned short mode, unsigned short mode_flags,
1075 nodemask_t *nmask, unsigned long flags)
1076{
1077 struct vm_area_struct *vma;
1078 struct mm_struct *mm = current->mm;
1079 struct mempolicy *new;
1080 unsigned long end;
1081 int err;
1082 LIST_HEAD(pagelist);
1083
1084 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1085 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1086 return -EINVAL;
1087 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1088 return -EPERM;
1089
1090 if (start & ~PAGE_MASK)
1091 return -EINVAL;
1092
1093 if (mode == MPOL_DEFAULT)
1094 flags &= ~MPOL_MF_STRICT;
1095
1096 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1097 end = start + len;
1098
1099 if (end < start)
1100 return -EINVAL;
1101 if (end == start)
1102 return 0;
1103
1104 new = mpol_new(mode, mode_flags, nmask);
1105 if (IS_ERR(new))
1106 return PTR_ERR(new);
1107
1108
1109
1110
1111
1112 if (!new)
1113 flags |= MPOL_MF_DISCONTIG_OK;
1114
1115 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1116 start, start + len, mode, mode_flags,
1117 nmask ? nodes_addr(*nmask)[0] : -1);
1118
1119 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1120
1121 err = migrate_prep();
1122 if (err)
1123 goto mpol_out;
1124 }
1125 {
1126 NODEMASK_SCRATCH(scratch);
1127 if (scratch) {
1128 down_write(&mm->mmap_sem);
1129 task_lock(current);
1130 err = mpol_set_nodemask(new, nmask, scratch);
1131 task_unlock(current);
1132 if (err)
1133 up_write(&mm->mmap_sem);
1134 } else
1135 err = -ENOMEM;
1136 NODEMASK_SCRATCH_FREE(scratch);
1137 }
1138 if (err)
1139 goto mpol_out;
1140
1141 vma = check_range(mm, start, end, nmask,
1142 flags | MPOL_MF_INVERT, &pagelist);
1143
1144 err = PTR_ERR(vma);
1145 if (!IS_ERR(vma)) {
1146 int nr_failed = 0;
1147
1148 err = mbind_range(mm, start, end, new);
1149
1150 if (!list_empty(&pagelist))
1151 nr_failed = migrate_pages(&pagelist, new_vma_page,
1152 (unsigned long)vma, 0);
1153
1154 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1155 err = -EIO;
1156 } else
1157 putback_lru_pages(&pagelist);
1158
1159 up_write(&mm->mmap_sem);
1160 mpol_out:
1161 mpol_put(new);
1162 return err;
1163}
1164
1165
1166
1167
1168
1169
1170static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1171 unsigned long maxnode)
1172{
1173 unsigned long k;
1174 unsigned long nlongs;
1175 unsigned long endmask;
1176
1177 --maxnode;
1178 nodes_clear(*nodes);
1179 if (maxnode == 0 || !nmask)
1180 return 0;
1181 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1182 return -EINVAL;
1183
1184 nlongs = BITS_TO_LONGS(maxnode);
1185 if ((maxnode % BITS_PER_LONG) == 0)
1186 endmask = ~0UL;
1187 else
1188 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1189
1190
1191
1192 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1193 if (nlongs > PAGE_SIZE/sizeof(long))
1194 return -EINVAL;
1195 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1196 unsigned long t;
1197 if (get_user(t, nmask + k))
1198 return -EFAULT;
1199 if (k == nlongs - 1) {
1200 if (t & endmask)
1201 return -EINVAL;
1202 } else if (t)
1203 return -EINVAL;
1204 }
1205 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1206 endmask = ~0UL;
1207 }
1208
1209 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1210 return -EFAULT;
1211 nodes_addr(*nodes)[nlongs-1] &= endmask;
1212 return 0;
1213}
1214
1215
1216static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1217 nodemask_t *nodes)
1218{
1219 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1220 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1221
1222 if (copy > nbytes) {
1223 if (copy > PAGE_SIZE)
1224 return -EINVAL;
1225 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1226 return -EFAULT;
1227 copy = nbytes;
1228 }
1229 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1230}
1231
1232SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1233 unsigned long, mode, unsigned long __user *, nmask,
1234 unsigned long, maxnode, unsigned, flags)
1235{
1236 nodemask_t nodes;
1237 int err;
1238 unsigned short mode_flags;
1239
1240 mode_flags = mode & MPOL_MODE_FLAGS;
1241 mode &= ~MPOL_MODE_FLAGS;
1242 if (mode >= MPOL_MAX)
1243 return -EINVAL;
1244 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1245 (mode_flags & MPOL_F_RELATIVE_NODES))
1246 return -EINVAL;
1247 err = get_nodes(&nodes, nmask, maxnode);
1248 if (err)
1249 return err;
1250 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1251}
1252
1253
1254SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1255 unsigned long, maxnode)
1256{
1257 int err;
1258 nodemask_t nodes;
1259 unsigned short flags;
1260
1261 flags = mode & MPOL_MODE_FLAGS;
1262 mode &= ~MPOL_MODE_FLAGS;
1263 if ((unsigned int)mode >= MPOL_MAX)
1264 return -EINVAL;
1265 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1266 return -EINVAL;
1267 err = get_nodes(&nodes, nmask, maxnode);
1268 if (err)
1269 return err;
1270 return do_set_mempolicy(mode, flags, &nodes);
1271}
1272
1273SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1274 const unsigned long __user *, old_nodes,
1275 const unsigned long __user *, new_nodes)
1276{
1277 const struct cred *cred = current_cred(), *tcred;
1278 struct mm_struct *mm;
1279 struct task_struct *task;
1280 nodemask_t old;
1281 nodemask_t new;
1282 nodemask_t task_nodes;
1283 int err;
1284
1285 err = get_nodes(&old, old_nodes, maxnode);
1286 if (err)
1287 return err;
1288
1289 err = get_nodes(&new, new_nodes, maxnode);
1290 if (err)
1291 return err;
1292
1293
1294 read_lock(&tasklist_lock);
1295 task = pid ? find_task_by_vpid(pid) : current;
1296 if (!task) {
1297 read_unlock(&tasklist_lock);
1298 return -ESRCH;
1299 }
1300 mm = get_task_mm(task);
1301 read_unlock(&tasklist_lock);
1302
1303 if (!mm)
1304 return -EINVAL;
1305
1306
1307
1308
1309
1310
1311
1312 rcu_read_lock();
1313 tcred = __task_cred(task);
1314 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1315 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1316 !capable(CAP_SYS_NICE)) {
1317 rcu_read_unlock();
1318 err = -EPERM;
1319 goto out;
1320 }
1321 rcu_read_unlock();
1322
1323 task_nodes = cpuset_mems_allowed(task);
1324
1325 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1326 err = -EPERM;
1327 goto out;
1328 }
1329
1330 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1331 err = -EINVAL;
1332 goto out;
1333 }
1334
1335 err = security_task_movememory(task);
1336 if (err)
1337 goto out;
1338
1339 err = do_migrate_pages(mm, &old, &new,
1340 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1341out:
1342 mmput(mm);
1343 return err;
1344}
1345
1346
1347
1348SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1349 unsigned long __user *, nmask, unsigned long, maxnode,
1350 unsigned long, addr, unsigned long, flags)
1351{
1352 int err;
1353 int uninitialized_var(pval);
1354 nodemask_t nodes;
1355
1356 if (nmask != NULL && maxnode < MAX_NUMNODES)
1357 return -EINVAL;
1358
1359 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1360
1361 if (err)
1362 return err;
1363
1364 if (policy && put_user(pval, policy))
1365 return -EFAULT;
1366
1367 if (nmask)
1368 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1369
1370 return err;
1371}
1372
1373#ifdef CONFIG_COMPAT
1374
1375asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1376 compat_ulong_t __user *nmask,
1377 compat_ulong_t maxnode,
1378 compat_ulong_t addr, compat_ulong_t flags)
1379{
1380 long err;
1381 unsigned long __user *nm = NULL;
1382 unsigned long nr_bits, alloc_size;
1383 DECLARE_BITMAP(bm, MAX_NUMNODES);
1384
1385 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1386 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1387
1388 if (nmask)
1389 nm = compat_alloc_user_space(alloc_size);
1390
1391 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1392
1393 if (!err && nmask) {
1394 err = copy_from_user(bm, nm, alloc_size);
1395
1396 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1397 err |= compat_put_bitmap(nmask, bm, nr_bits);
1398 }
1399
1400 return err;
1401}
1402
1403asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1404 compat_ulong_t maxnode)
1405{
1406 long err = 0;
1407 unsigned long __user *nm = NULL;
1408 unsigned long nr_bits, alloc_size;
1409 DECLARE_BITMAP(bm, MAX_NUMNODES);
1410
1411 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1412 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1413
1414 if (nmask) {
1415 err = compat_get_bitmap(bm, nmask, nr_bits);
1416 nm = compat_alloc_user_space(alloc_size);
1417 err |= copy_to_user(nm, bm, alloc_size);
1418 }
1419
1420 if (err)
1421 return -EFAULT;
1422
1423 return sys_set_mempolicy(mode, nm, nr_bits+1);
1424}
1425
1426asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1427 compat_ulong_t mode, compat_ulong_t __user *nmask,
1428 compat_ulong_t maxnode, compat_ulong_t flags)
1429{
1430 long err = 0;
1431 unsigned long __user *nm = NULL;
1432 unsigned long nr_bits, alloc_size;
1433 nodemask_t bm;
1434
1435 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1436 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1437
1438 if (nmask) {
1439 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1440 nm = compat_alloc_user_space(alloc_size);
1441 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1442 }
1443
1444 if (err)
1445 return -EFAULT;
1446
1447 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1448}
1449
1450#endif
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static struct mempolicy *get_vma_policy(struct task_struct *task,
1469 struct vm_area_struct *vma, unsigned long addr)
1470{
1471 struct mempolicy *pol = task->mempolicy;
1472
1473 if (vma) {
1474 if (vma->vm_ops && vma->vm_ops->get_policy) {
1475 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1476 addr);
1477 if (vpol)
1478 pol = vpol;
1479 } else if (vma->vm_policy)
1480 pol = vma->vm_policy;
1481 }
1482 if (!pol)
1483 pol = &default_policy;
1484 return pol;
1485}
1486
1487
1488
1489
1490
1491static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1492{
1493
1494 if (unlikely(policy->mode == MPOL_BIND) &&
1495 gfp_zone(gfp) >= policy_zone &&
1496 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1497 return &policy->v.nodes;
1498
1499 return NULL;
1500}
1501
1502
1503static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1504{
1505 int nd = numa_node_id();
1506
1507 switch (policy->mode) {
1508 case MPOL_PREFERRED:
1509 if (!(policy->flags & MPOL_F_LOCAL))
1510 nd = policy->v.preferred_node;
1511 break;
1512 case MPOL_BIND:
1513
1514
1515
1516
1517
1518
1519 if (unlikely(gfp & __GFP_THISNODE) &&
1520 unlikely(!node_isset(nd, policy->v.nodes)))
1521 nd = first_node(policy->v.nodes);
1522 break;
1523 default:
1524 BUG();
1525 }
1526 return node_zonelist(nd, gfp);
1527}
1528
1529
1530static unsigned interleave_nodes(struct mempolicy *policy)
1531{
1532 unsigned nid, next;
1533 struct task_struct *me = current;
1534
1535 nid = me->il_next;
1536 next = next_node(nid, policy->v.nodes);
1537 if (next >= MAX_NUMNODES)
1538 next = first_node(policy->v.nodes);
1539 if (next < MAX_NUMNODES)
1540 me->il_next = next;
1541 return nid;
1542}
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552unsigned slab_node(struct mempolicy *policy)
1553{
1554 if (!policy || policy->flags & MPOL_F_LOCAL)
1555 return numa_node_id();
1556
1557 switch (policy->mode) {
1558 case MPOL_PREFERRED:
1559
1560
1561
1562 return policy->v.preferred_node;
1563
1564 case MPOL_INTERLEAVE:
1565 return interleave_nodes(policy);
1566
1567 case MPOL_BIND: {
1568
1569
1570
1571
1572 struct zonelist *zonelist;
1573 struct zone *zone;
1574 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1575 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1576 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1577 &policy->v.nodes,
1578 &zone);
1579 return zone->node;
1580 }
1581
1582 default:
1583 BUG();
1584 }
1585}
1586
1587
1588static unsigned offset_il_node(struct mempolicy *pol,
1589 struct vm_area_struct *vma, unsigned long off)
1590{
1591 unsigned nnodes = nodes_weight(pol->v.nodes);
1592 unsigned target;
1593 int c;
1594 int nid = -1;
1595
1596 if (!nnodes)
1597 return numa_node_id();
1598 target = (unsigned int)off % nnodes;
1599 c = 0;
1600 do {
1601 nid = next_node(nid, pol->v.nodes);
1602 c++;
1603 } while (c <= target);
1604 return nid;
1605}
1606
1607
1608static inline unsigned interleave_nid(struct mempolicy *pol,
1609 struct vm_area_struct *vma, unsigned long addr, int shift)
1610{
1611 if (vma) {
1612 unsigned long off;
1613
1614
1615
1616
1617
1618
1619
1620
1621 BUG_ON(shift < PAGE_SHIFT);
1622 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1623 off += (addr - vma->vm_start) >> shift;
1624 return offset_il_node(pol, vma, off);
1625 } else
1626 return interleave_nodes(pol);
1627}
1628
1629#ifdef CONFIG_HUGETLBFS
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1646 gfp_t gfp_flags, struct mempolicy **mpol,
1647 nodemask_t **nodemask)
1648{
1649 struct zonelist *zl;
1650
1651 *mpol = get_vma_policy(current, vma, addr);
1652 *nodemask = NULL;
1653
1654 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1655 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1656 huge_page_shift(hstate_vma(vma))), gfp_flags);
1657 } else {
1658 zl = policy_zonelist(gfp_flags, *mpol);
1659 if ((*mpol)->mode == MPOL_BIND)
1660 *nodemask = &(*mpol)->v.nodes;
1661 }
1662 return zl;
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681bool init_nodemask_of_mempolicy(nodemask_t *mask)
1682{
1683 struct mempolicy *mempolicy;
1684 int nid;
1685
1686 if (!(mask && current->mempolicy))
1687 return false;
1688
1689 task_lock(current);
1690 mempolicy = current->mempolicy;
1691 switch (mempolicy->mode) {
1692 case MPOL_PREFERRED:
1693 if (mempolicy->flags & MPOL_F_LOCAL)
1694 nid = numa_node_id();
1695 else
1696 nid = mempolicy->v.preferred_node;
1697 init_nodemask_of_node(mask, nid);
1698 break;
1699
1700 case MPOL_BIND:
1701
1702 case MPOL_INTERLEAVE:
1703 *mask = mempolicy->v.nodes;
1704 break;
1705
1706 default:
1707 BUG();
1708 }
1709 task_unlock(current);
1710
1711 return true;
1712}
1713#endif
1714
1715
1716
1717static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1718 unsigned nid)
1719{
1720 struct zonelist *zl;
1721 struct page *page;
1722
1723 zl = node_zonelist(nid, gfp);
1724 page = __alloc_pages(gfp, order, zl);
1725 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1726 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1727 return page;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752struct page *
1753alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1754{
1755 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1756 struct zonelist *zl;
1757 struct page *page;
1758
1759 get_mems_allowed();
1760 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1761 unsigned nid;
1762
1763 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1764 mpol_cond_put(pol);
1765 page = alloc_page_interleave(gfp, 0, nid);
1766 put_mems_allowed();
1767 return page;
1768 }
1769 zl = policy_zonelist(gfp, pol);
1770 if (unlikely(mpol_needs_cond_ref(pol))) {
1771
1772
1773
1774 struct page *page = __alloc_pages_nodemask(gfp, 0,
1775 zl, policy_nodemask(gfp, pol));
1776 __mpol_put(pol);
1777 put_mems_allowed();
1778 return page;
1779 }
1780
1781
1782
1783 page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1784 put_mems_allowed();
1785 return page;
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1808{
1809 struct mempolicy *pol = current->mempolicy;
1810 struct page *page;
1811
1812 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1813 pol = &default_policy;
1814
1815 get_mems_allowed();
1816
1817
1818
1819
1820 if (pol->mode == MPOL_INTERLEAVE)
1821 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1822 else
1823 page = __alloc_pages_nodemask(gfp, order,
1824 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1825 put_mems_allowed();
1826 return page;
1827}
1828EXPORT_SYMBOL(alloc_pages_current);
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842struct mempolicy *__mpol_dup(struct mempolicy *old)
1843{
1844 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1845
1846 if (!new)
1847 return ERR_PTR(-ENOMEM);
1848
1849
1850 if (old == current->mempolicy) {
1851 task_lock(current);
1852 *new = *old;
1853 task_unlock(current);
1854 } else
1855 *new = *old;
1856
1857 rcu_read_lock();
1858 if (current_cpuset_is_being_rebound()) {
1859 nodemask_t mems = cpuset_mems_allowed(current);
1860 if (new->flags & MPOL_F_REBINDING)
1861 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1862 else
1863 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1864 }
1865 rcu_read_unlock();
1866 atomic_set(&new->refcnt, 1);
1867 return new;
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1881 struct mempolicy *frompol)
1882{
1883 if (!mpol_needs_cond_ref(frompol))
1884 return frompol;
1885
1886 *tompol = *frompol;
1887 tompol->flags &= ~MPOL_F_SHARED;
1888 __mpol_put(frompol);
1889 return tompol;
1890}
1891
1892
1893int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1894{
1895 if (!a || !b)
1896 return 0;
1897 if (a->mode != b->mode)
1898 return 0;
1899 if (a->flags != b->flags)
1900 return 0;
1901 if (mpol_store_user_nodemask(a))
1902 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1903 return 0;
1904
1905 switch (a->mode) {
1906 case MPOL_BIND:
1907
1908 case MPOL_INTERLEAVE:
1909 return nodes_equal(a->v.nodes, b->v.nodes);
1910 case MPOL_PREFERRED:
1911 return a->v.preferred_node == b->v.preferred_node &&
1912 a->flags == b->flags;
1913 default:
1914 BUG();
1915 return 0;
1916 }
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static struct sp_node *
1931sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1932{
1933 struct rb_node *n = sp->root.rb_node;
1934
1935 while (n) {
1936 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1937
1938 if (start >= p->end)
1939 n = n->rb_right;
1940 else if (end <= p->start)
1941 n = n->rb_left;
1942 else
1943 break;
1944 }
1945 if (!n)
1946 return NULL;
1947 for (;;) {
1948 struct sp_node *w = NULL;
1949 struct rb_node *prev = rb_prev(n);
1950 if (!prev)
1951 break;
1952 w = rb_entry(prev, struct sp_node, nd);
1953 if (w->end <= start)
1954 break;
1955 n = prev;
1956 }
1957 return rb_entry(n, struct sp_node, nd);
1958}
1959
1960
1961
1962static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1963{
1964 struct rb_node **p = &sp->root.rb_node;
1965 struct rb_node *parent = NULL;
1966 struct sp_node *nd;
1967
1968 while (*p) {
1969 parent = *p;
1970 nd = rb_entry(parent, struct sp_node, nd);
1971 if (new->start < nd->start)
1972 p = &(*p)->rb_left;
1973 else if (new->end > nd->end)
1974 p = &(*p)->rb_right;
1975 else
1976 BUG();
1977 }
1978 rb_link_node(&new->nd, parent, p);
1979 rb_insert_color(&new->nd, &sp->root);
1980 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1981 new->policy ? new->policy->mode : 0);
1982}
1983
1984
1985struct mempolicy *
1986mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1987{
1988 struct mempolicy *pol = NULL;
1989 struct sp_node *sn;
1990
1991 if (!sp->root.rb_node)
1992 return NULL;
1993 spin_lock(&sp->lock);
1994 sn = sp_lookup(sp, idx, idx+1);
1995 if (sn) {
1996 mpol_get(sn->policy);
1997 pol = sn->policy;
1998 }
1999 spin_unlock(&sp->lock);
2000 return pol;
2001}
2002
2003static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2004{
2005 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2006 rb_erase(&n->nd, &sp->root);
2007 mpol_put(n->policy);
2008 kmem_cache_free(sn_cache, n);
2009}
2010
2011static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2012 struct mempolicy *pol)
2013{
2014 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2015
2016 if (!n)
2017 return NULL;
2018 n->start = start;
2019 n->end = end;
2020 mpol_get(pol);
2021 pol->flags |= MPOL_F_SHARED;
2022 n->policy = pol;
2023 return n;
2024}
2025
2026
2027static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2028 unsigned long end, struct sp_node *new)
2029{
2030 struct sp_node *n, *new2 = NULL;
2031
2032restart:
2033 spin_lock(&sp->lock);
2034 n = sp_lookup(sp, start, end);
2035
2036 while (n && n->start < end) {
2037 struct rb_node *next = rb_next(&n->nd);
2038 if (n->start >= start) {
2039 if (n->end <= end)
2040 sp_delete(sp, n);
2041 else
2042 n->start = end;
2043 } else {
2044
2045 if (n->end > end) {
2046 if (!new2) {
2047 spin_unlock(&sp->lock);
2048 new2 = sp_alloc(end, n->end, n->policy);
2049 if (!new2)
2050 return -ENOMEM;
2051 goto restart;
2052 }
2053 n->end = start;
2054 sp_insert(sp, new2);
2055 new2 = NULL;
2056 break;
2057 } else
2058 n->end = start;
2059 }
2060 if (!next)
2061 break;
2062 n = rb_entry(next, struct sp_node, nd);
2063 }
2064 if (new)
2065 sp_insert(sp, new);
2066 spin_unlock(&sp->lock);
2067 if (new2) {
2068 mpol_put(new2->policy);
2069 kmem_cache_free(sn_cache, new2);
2070 }
2071 return 0;
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2085{
2086 int ret;
2087
2088 sp->root = RB_ROOT;
2089 spin_lock_init(&sp->lock);
2090
2091 if (mpol) {
2092 struct vm_area_struct pvma;
2093 struct mempolicy *new;
2094 NODEMASK_SCRATCH(scratch);
2095
2096 if (!scratch)
2097 goto put_mpol;
2098
2099 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2100 if (IS_ERR(new))
2101 goto free_scratch;
2102
2103 task_lock(current);
2104 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2105 task_unlock(current);
2106 if (ret)
2107 goto put_new;
2108
2109
2110 memset(&pvma, 0, sizeof(struct vm_area_struct));
2111 pvma.vm_end = TASK_SIZE;
2112 mpol_set_shared_policy(sp, &pvma, new);
2113
2114put_new:
2115 mpol_put(new);
2116free_scratch:
2117 NODEMASK_SCRATCH_FREE(scratch);
2118put_mpol:
2119 mpol_put(mpol);
2120 }
2121}
2122
2123int mpol_set_shared_policy(struct shared_policy *info,
2124 struct vm_area_struct *vma, struct mempolicy *npol)
2125{
2126 int err;
2127 struct sp_node *new = NULL;
2128 unsigned long sz = vma_pages(vma);
2129
2130 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2131 vma->vm_pgoff,
2132 sz, npol ? npol->mode : -1,
2133 npol ? npol->flags : -1,
2134 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2135
2136 if (npol) {
2137 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2138 if (!new)
2139 return -ENOMEM;
2140 }
2141 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2142 if (err && new)
2143 kmem_cache_free(sn_cache, new);
2144 return err;
2145}
2146
2147
2148void mpol_free_shared_policy(struct shared_policy *p)
2149{
2150 struct sp_node *n;
2151 struct rb_node *next;
2152
2153 if (!p->root.rb_node)
2154 return;
2155 spin_lock(&p->lock);
2156 next = rb_first(&p->root);
2157 while (next) {
2158 n = rb_entry(next, struct sp_node, nd);
2159 next = rb_next(&n->nd);
2160 rb_erase(&n->nd, &p->root);
2161 mpol_put(n->policy);
2162 kmem_cache_free(sn_cache, n);
2163 }
2164 spin_unlock(&p->lock);
2165}
2166
2167
2168void __init numa_policy_init(void)
2169{
2170 nodemask_t interleave_nodes;
2171 unsigned long largest = 0;
2172 int nid, prefer = 0;
2173
2174 policy_cache = kmem_cache_create("numa_policy",
2175 sizeof(struct mempolicy),
2176 0, SLAB_PANIC, NULL);
2177
2178 sn_cache = kmem_cache_create("shared_policy_node",
2179 sizeof(struct sp_node),
2180 0, SLAB_PANIC, NULL);
2181
2182
2183
2184
2185
2186
2187 nodes_clear(interleave_nodes);
2188 for_each_node_state(nid, N_HIGH_MEMORY) {
2189 unsigned long total_pages = node_present_pages(nid);
2190
2191
2192 if (largest < total_pages) {
2193 largest = total_pages;
2194 prefer = nid;
2195 }
2196
2197
2198 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2199 node_set(nid, interleave_nodes);
2200 }
2201
2202
2203 if (unlikely(nodes_empty(interleave_nodes)))
2204 node_set(prefer, interleave_nodes);
2205
2206 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2207 printk("numa_policy_init: interleaving failed\n");
2208}
2209
2210
2211void numa_default_policy(void)
2212{
2213 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2214}
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224#define MPOL_LOCAL MPOL_MAX
2225static const char * const policy_modes[] =
2226{
2227 [MPOL_DEFAULT] = "default",
2228 [MPOL_PREFERRED] = "prefer",
2229 [MPOL_BIND] = "bind",
2230 [MPOL_INTERLEAVE] = "interleave",
2231 [MPOL_LOCAL] = "local"
2232};
2233
2234
2235#ifdef CONFIG_TMPFS
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2255{
2256 struct mempolicy *new = NULL;
2257 unsigned short mode;
2258 unsigned short uninitialized_var(mode_flags);
2259 nodemask_t nodes;
2260 char *nodelist = strchr(str, ':');
2261 char *flags = strchr(str, '=');
2262 int err = 1;
2263
2264 if (nodelist) {
2265
2266 *nodelist++ = '\0';
2267 if (nodelist_parse(nodelist, nodes))
2268 goto out;
2269 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2270 goto out;
2271 } else
2272 nodes_clear(nodes);
2273
2274 if (flags)
2275 *flags++ = '\0';
2276
2277 for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2278 if (!strcmp(str, policy_modes[mode])) {
2279 break;
2280 }
2281 }
2282 if (mode > MPOL_LOCAL)
2283 goto out;
2284
2285 switch (mode) {
2286 case MPOL_PREFERRED:
2287
2288
2289
2290 if (nodelist) {
2291 char *rest = nodelist;
2292 while (isdigit(*rest))
2293 rest++;
2294 if (*rest)
2295 goto out;
2296 }
2297 break;
2298 case MPOL_INTERLEAVE:
2299
2300
2301
2302 if (!nodelist)
2303 nodes = node_states[N_HIGH_MEMORY];
2304 break;
2305 case MPOL_LOCAL:
2306
2307
2308
2309 if (nodelist)
2310 goto out;
2311 mode = MPOL_PREFERRED;
2312 break;
2313 case MPOL_DEFAULT:
2314
2315
2316
2317 if (!nodelist)
2318 err = 0;
2319 goto out;
2320 case MPOL_BIND:
2321
2322
2323
2324 if (!nodelist)
2325 goto out;
2326 }
2327
2328 mode_flags = 0;
2329 if (flags) {
2330
2331
2332
2333
2334 if (!strcmp(flags, "static"))
2335 mode_flags |= MPOL_F_STATIC_NODES;
2336 else if (!strcmp(flags, "relative"))
2337 mode_flags |= MPOL_F_RELATIVE_NODES;
2338 else
2339 goto out;
2340 }
2341
2342 new = mpol_new(mode, mode_flags, &nodes);
2343 if (IS_ERR(new))
2344 goto out;
2345
2346 if (no_context) {
2347
2348 new->w.user_nodemask = nodes;
2349 } else {
2350 int ret;
2351 NODEMASK_SCRATCH(scratch);
2352 if (scratch) {
2353 task_lock(current);
2354 ret = mpol_set_nodemask(new, &nodes, scratch);
2355 task_unlock(current);
2356 } else
2357 ret = -ENOMEM;
2358 NODEMASK_SCRATCH_FREE(scratch);
2359 if (ret) {
2360 mpol_put(new);
2361 goto out;
2362 }
2363 }
2364 err = 0;
2365
2366out:
2367
2368 if (nodelist)
2369 *--nodelist = ':';
2370 if (flags)
2371 *--flags = '=';
2372 if (!err)
2373 *mpol = new;
2374 return err;
2375}
2376#endif
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2390{
2391 char *p = buffer;
2392 int l;
2393 nodemask_t nodes;
2394 unsigned short mode;
2395 unsigned short flags = pol ? pol->flags : 0;
2396
2397
2398
2399
2400 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2401
2402 if (!pol || pol == &default_policy)
2403 mode = MPOL_DEFAULT;
2404 else
2405 mode = pol->mode;
2406
2407 switch (mode) {
2408 case MPOL_DEFAULT:
2409 nodes_clear(nodes);
2410 break;
2411
2412 case MPOL_PREFERRED:
2413 nodes_clear(nodes);
2414 if (flags & MPOL_F_LOCAL)
2415 mode = MPOL_LOCAL;
2416 else
2417 node_set(pol->v.preferred_node, nodes);
2418 break;
2419
2420 case MPOL_BIND:
2421
2422 case MPOL_INTERLEAVE:
2423 if (no_context)
2424 nodes = pol->w.user_nodemask;
2425 else
2426 nodes = pol->v.nodes;
2427 break;
2428
2429 default:
2430 BUG();
2431 }
2432
2433 l = strlen(policy_modes[mode]);
2434 if (buffer + maxlen < p + l + 1)
2435 return -ENOSPC;
2436
2437 strcpy(p, policy_modes[mode]);
2438 p += l;
2439
2440 if (flags & MPOL_MODE_FLAGS) {
2441 if (buffer + maxlen < p + 2)
2442 return -ENOSPC;
2443 *p++ = '=';
2444
2445
2446
2447
2448 if (flags & MPOL_F_STATIC_NODES)
2449 p += snprintf(p, buffer + maxlen - p, "static");
2450 else if (flags & MPOL_F_RELATIVE_NODES)
2451 p += snprintf(p, buffer + maxlen - p, "relative");
2452 }
2453
2454 if (!nodes_empty(nodes)) {
2455 if (buffer + maxlen < p + 2)
2456 return -ENOSPC;
2457 *p++ = ':';
2458 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2459 }
2460 return p - buffer;
2461}
2462
2463struct numa_maps {
2464 unsigned long pages;
2465 unsigned long anon;
2466 unsigned long active;
2467 unsigned long writeback;
2468 unsigned long mapcount_max;
2469 unsigned long dirty;
2470 unsigned long swapcache;
2471 unsigned long node[MAX_NUMNODES];
2472};
2473
2474static void gather_stats(struct page *page, void *private, int pte_dirty)
2475{
2476 struct numa_maps *md = private;
2477 int count = page_mapcount(page);
2478
2479 md->pages++;
2480 if (pte_dirty || PageDirty(page))
2481 md->dirty++;
2482
2483 if (PageSwapCache(page))
2484 md->swapcache++;
2485
2486 if (PageActive(page) || PageUnevictable(page))
2487 md->active++;
2488
2489 if (PageWriteback(page))
2490 md->writeback++;
2491
2492 if (PageAnon(page))
2493 md->anon++;
2494
2495 if (count > md->mapcount_max)
2496 md->mapcount_max = count;
2497
2498 md->node[page_to_nid(page)]++;
2499}
2500
2501#ifdef CONFIG_HUGETLB_PAGE
2502static void check_huge_range(struct vm_area_struct *vma,
2503 unsigned long start, unsigned long end,
2504 struct numa_maps *md)
2505{
2506 unsigned long addr;
2507 struct page *page;
2508 struct hstate *h = hstate_vma(vma);
2509 unsigned long sz = huge_page_size(h);
2510
2511 for (addr = start; addr < end; addr += sz) {
2512 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2513 addr & huge_page_mask(h));
2514 pte_t pte;
2515
2516 if (!ptep)
2517 continue;
2518
2519 pte = *ptep;
2520 if (pte_none(pte))
2521 continue;
2522
2523 page = pte_page(pte);
2524 if (!page)
2525 continue;
2526
2527 gather_stats(page, md, pte_dirty(*ptep));
2528 }
2529}
2530#else
2531static inline void check_huge_range(struct vm_area_struct *vma,
2532 unsigned long start, unsigned long end,
2533 struct numa_maps *md)
2534{
2535}
2536#endif
2537
2538
2539
2540
2541int show_numa_map(struct seq_file *m, void *v)
2542{
2543 struct proc_maps_private *priv = m->private;
2544 struct vm_area_struct *vma = v;
2545 struct numa_maps *md;
2546 struct file *file = vma->vm_file;
2547 struct mm_struct *mm = vma->vm_mm;
2548 struct mempolicy *pol;
2549 int n;
2550 char buffer[50];
2551
2552 if (!mm)
2553 return 0;
2554
2555 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2556 if (!md)
2557 return 0;
2558
2559 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2560 mpol_to_str(buffer, sizeof(buffer), pol, 0);
2561 mpol_cond_put(pol);
2562
2563 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2564
2565 if (file) {
2566 seq_printf(m, " file=");
2567 seq_path(m, &file->f_path, "\n\t= ");
2568 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2569 seq_printf(m, " heap");
2570 } else if (vma->vm_start <= mm->start_stack &&
2571 vma->vm_end >= mm->start_stack) {
2572 seq_printf(m, " stack");
2573 }
2574
2575 if (is_vm_hugetlb_page(vma)) {
2576 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2577 seq_printf(m, " huge");
2578 } else {
2579 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2580 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2581 }
2582
2583 if (!md->pages)
2584 goto out;
2585
2586 if (md->anon)
2587 seq_printf(m," anon=%lu",md->anon);
2588
2589 if (md->dirty)
2590 seq_printf(m," dirty=%lu",md->dirty);
2591
2592 if (md->pages != md->anon && md->pages != md->dirty)
2593 seq_printf(m, " mapped=%lu", md->pages);
2594
2595 if (md->mapcount_max > 1)
2596 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2597
2598 if (md->swapcache)
2599 seq_printf(m," swapcache=%lu", md->swapcache);
2600
2601 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2602 seq_printf(m," active=%lu", md->active);
2603
2604 if (md->writeback)
2605 seq_printf(m," writeback=%lu", md->writeback);
2606
2607 for_each_node_state(n, N_HIGH_MEMORY)
2608 if (md->node[n])
2609 seq_printf(m, " N%d=%lu", n, md->node[n]);
2610out:
2611 seq_putc(m, '\n');
2612 kfree(md);
2613
2614 if (m->count < m->size)
2615 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2616 return 0;
2617}
2618