1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/proc_fs.h>
19#include <linux/seq_file.h>
20#include <linux/debugobjects.h>
21#include <linux/kallsyms.h>
22#include <linux/list.h>
23#include <linux/rbtree.h>
24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h>
26#include <linux/bootmem.h>
27#include <linux/pfn.h>
28
29#include <asm/atomic.h>
30#include <asm/uaccess.h>
31#include <asm/tlbflush.h>
32
33
34
35
36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
37{
38 pte_t *pte;
39
40 pte = pte_offset_kernel(pmd, addr);
41 do {
42 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
43 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
44 } while (pte++, addr += PAGE_SIZE, addr != end);
45}
46
47static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
48{
49 pmd_t *pmd;
50 unsigned long next;
51
52 pmd = pmd_offset(pud, addr);
53 do {
54 next = pmd_addr_end(addr, end);
55 if (pmd_none_or_clear_bad(pmd))
56 continue;
57 vunmap_pte_range(pmd, addr, next);
58 } while (pmd++, addr = next, addr != end);
59}
60
61static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
62{
63 pud_t *pud;
64 unsigned long next;
65
66 pud = pud_offset(pgd, addr);
67 do {
68 next = pud_addr_end(addr, end);
69 if (pud_none_or_clear_bad(pud))
70 continue;
71 vunmap_pmd_range(pud, addr, next);
72 } while (pud++, addr = next, addr != end);
73}
74
75static void vunmap_page_range(unsigned long addr, unsigned long end)
76{
77 pgd_t *pgd;
78 unsigned long next;
79
80 BUG_ON(addr >= end);
81 pgd = pgd_offset_k(addr);
82 do {
83 next = pgd_addr_end(addr, end);
84 if (pgd_none_or_clear_bad(pgd))
85 continue;
86 vunmap_pud_range(pgd, addr, next);
87 } while (pgd++, addr = next, addr != end);
88}
89
90static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
91 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
92{
93 pte_t *pte;
94
95
96
97
98
99
100 pte = pte_alloc_kernel(pmd, addr);
101 if (!pte)
102 return -ENOMEM;
103 do {
104 struct page *page = pages[*nr];
105
106 if (WARN_ON(!pte_none(*pte)))
107 return -EBUSY;
108 if (WARN_ON(!page))
109 return -ENOMEM;
110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
111 (*nr)++;
112 } while (pte++, addr += PAGE_SIZE, addr != end);
113 return 0;
114}
115
116static int vmap_pmd_range(pud_t *pud, unsigned long addr,
117 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
118{
119 pmd_t *pmd;
120 unsigned long next;
121
122 pmd = pmd_alloc(&init_mm, pud, addr);
123 if (!pmd)
124 return -ENOMEM;
125 do {
126 next = pmd_addr_end(addr, end);
127 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
128 return -ENOMEM;
129 } while (pmd++, addr = next, addr != end);
130 return 0;
131}
132
133static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
134 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
135{
136 pud_t *pud;
137 unsigned long next;
138
139 pud = pud_alloc(&init_mm, pgd, addr);
140 if (!pud)
141 return -ENOMEM;
142 do {
143 next = pud_addr_end(addr, end);
144 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
145 return -ENOMEM;
146 } while (pud++, addr = next, addr != end);
147 return 0;
148}
149
150
151
152
153
154
155
156static int vmap_page_range_noflush(unsigned long start, unsigned long end,
157 pgprot_t prot, struct page **pages)
158{
159 pgd_t *pgd;
160 unsigned long next;
161 unsigned long addr = start;
162 int err = 0;
163 int nr = 0;
164
165 BUG_ON(addr >= end);
166 pgd = pgd_offset_k(addr);
167 do {
168 next = pgd_addr_end(addr, end);
169 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
170 if (err)
171 break;
172 } while (pgd++, addr = next, addr != end);
173
174 if (unlikely(err))
175 return err;
176 return nr;
177}
178
179static int vmap_page_range(unsigned long start, unsigned long end,
180 pgprot_t prot, struct page **pages)
181{
182 int ret;
183
184 ret = vmap_page_range_noflush(start, end, prot, pages);
185 flush_cache_vmap(start, end);
186 return ret;
187}
188
189static inline int is_vmalloc_or_module_addr(const void *x)
190{
191
192
193
194
195
196#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
197 unsigned long addr = (unsigned long)x;
198 if (addr >= MODULES_VADDR && addr < MODULES_END)
199 return 1;
200#endif
201 return is_vmalloc_addr(x);
202}
203
204
205
206
207struct page *vmalloc_to_page(const void *vmalloc_addr)
208{
209 unsigned long addr = (unsigned long) vmalloc_addr;
210 struct page *page = NULL;
211 pgd_t *pgd = pgd_offset_k(addr);
212
213
214
215
216
217 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
218
219 if (!pgd_none(*pgd)) {
220 pud_t *pud = pud_offset(pgd, addr);
221 if (!pud_none(*pud)) {
222 pmd_t *pmd = pmd_offset(pud, addr);
223 if (!pmd_none(*pmd)) {
224 pte_t *ptep, pte;
225
226 ptep = pte_offset_map(pmd, addr);
227 pte = *ptep;
228 if (pte_present(pte))
229 page = pte_page(pte);
230 pte_unmap(ptep);
231 }
232 }
233 }
234 return page;
235}
236EXPORT_SYMBOL(vmalloc_to_page);
237
238
239
240
241unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
242{
243 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
244}
245EXPORT_SYMBOL(vmalloc_to_pfn);
246
247
248
249
250#define VM_LAZY_FREE 0x01
251#define VM_LAZY_FREEING 0x02
252#define VM_VM_AREA 0x04
253
254struct vmap_area {
255 unsigned long va_start;
256 unsigned long va_end;
257 unsigned long flags;
258 struct rb_node rb_node;
259 struct list_head list;
260 struct list_head purge_list;
261 void *private;
262 struct rcu_head rcu_head;
263};
264
265static DEFINE_SPINLOCK(vmap_area_lock);
266static struct rb_root vmap_area_root = RB_ROOT;
267static LIST_HEAD(vmap_area_list);
268
269static struct vmap_area *__find_vmap_area(unsigned long addr)
270{
271 struct rb_node *n = vmap_area_root.rb_node;
272
273 while (n) {
274 struct vmap_area *va;
275
276 va = rb_entry(n, struct vmap_area, rb_node);
277 if (addr < va->va_start)
278 n = n->rb_left;
279 else if (addr > va->va_start)
280 n = n->rb_right;
281 else
282 return va;
283 }
284
285 return NULL;
286}
287
288static void __insert_vmap_area(struct vmap_area *va)
289{
290 struct rb_node **p = &vmap_area_root.rb_node;
291 struct rb_node *parent = NULL;
292 struct rb_node *tmp;
293
294 while (*p) {
295 struct vmap_area *tmp;
296
297 parent = *p;
298 tmp = rb_entry(parent, struct vmap_area, rb_node);
299 if (va->va_start < tmp->va_end)
300 p = &(*p)->rb_left;
301 else if (va->va_end > tmp->va_start)
302 p = &(*p)->rb_right;
303 else
304 BUG();
305 }
306
307 rb_link_node(&va->rb_node, parent, p);
308 rb_insert_color(&va->rb_node, &vmap_area_root);
309
310
311 tmp = rb_prev(&va->rb_node);
312 if (tmp) {
313 struct vmap_area *prev;
314 prev = rb_entry(tmp, struct vmap_area, rb_node);
315 list_add_rcu(&va->list, &prev->list);
316 } else
317 list_add_rcu(&va->list, &vmap_area_list);
318}
319
320static void purge_vmap_area_lazy(void);
321
322
323
324
325
326static struct vmap_area *alloc_vmap_area(unsigned long size,
327 unsigned long align,
328 unsigned long vstart, unsigned long vend,
329 int node, gfp_t gfp_mask)
330{
331 struct vmap_area *va;
332 struct rb_node *n;
333 unsigned long addr;
334 int purged = 0;
335
336 BUG_ON(!size);
337 BUG_ON(size & ~PAGE_MASK);
338
339 va = kmalloc_node(sizeof(struct vmap_area),
340 gfp_mask & GFP_RECLAIM_MASK, node);
341 if (unlikely(!va))
342 return ERR_PTR(-ENOMEM);
343
344retry:
345 addr = ALIGN(vstart, align);
346
347 spin_lock(&vmap_area_lock);
348 if (addr + size - 1 < addr)
349 goto overflow;
350
351
352 n = vmap_area_root.rb_node;
353 if (n) {
354 struct vmap_area *first = NULL;
355
356 do {
357 struct vmap_area *tmp;
358 tmp = rb_entry(n, struct vmap_area, rb_node);
359 if (tmp->va_end >= addr) {
360 if (!first && tmp->va_start < addr + size)
361 first = tmp;
362 n = n->rb_left;
363 } else {
364 first = tmp;
365 n = n->rb_right;
366 }
367 } while (n);
368
369 if (!first)
370 goto found;
371
372 if (first->va_end < addr) {
373 n = rb_next(&first->rb_node);
374 if (n)
375 first = rb_entry(n, struct vmap_area, rb_node);
376 else
377 goto found;
378 }
379
380 while (addr + size > first->va_start && addr + size <= vend) {
381 addr = ALIGN(first->va_end + PAGE_SIZE, align);
382 if (addr + size - 1 < addr)
383 goto overflow;
384
385 n = rb_next(&first->rb_node);
386 if (n)
387 first = rb_entry(n, struct vmap_area, rb_node);
388 else
389 goto found;
390 }
391 }
392found:
393 if (addr + size > vend) {
394overflow:
395 spin_unlock(&vmap_area_lock);
396 if (!purged) {
397 purge_vmap_area_lazy();
398 purged = 1;
399 goto retry;
400 }
401 if (printk_ratelimit())
402 printk(KERN_WARNING
403 "vmap allocation for size %lu failed: "
404 "use vmalloc=<size> to increase size.\n", size);
405 kfree(va);
406 return ERR_PTR(-EBUSY);
407 }
408
409 BUG_ON(addr & (align-1));
410
411 va->va_start = addr;
412 va->va_end = addr + size;
413 va->flags = 0;
414 __insert_vmap_area(va);
415 spin_unlock(&vmap_area_lock);
416
417 return va;
418}
419
420static void rcu_free_va(struct rcu_head *head)
421{
422 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
423
424 kfree(va);
425}
426
427static void __free_vmap_area(struct vmap_area *va)
428{
429 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
430 rb_erase(&va->rb_node, &vmap_area_root);
431 RB_CLEAR_NODE(&va->rb_node);
432 list_del_rcu(&va->list);
433
434 call_rcu(&va->rcu_head, rcu_free_va);
435}
436
437
438
439
440static void free_vmap_area(struct vmap_area *va)
441{
442 spin_lock(&vmap_area_lock);
443 __free_vmap_area(va);
444 spin_unlock(&vmap_area_lock);
445}
446
447
448
449
450static void unmap_vmap_area(struct vmap_area *va)
451{
452 vunmap_page_range(va->va_start, va->va_end);
453}
454
455static void vmap_debug_free_range(unsigned long start, unsigned long end)
456{
457
458
459
460
461
462
463
464
465
466
467
468
469
470#ifdef CONFIG_DEBUG_PAGEALLOC
471 vunmap_page_range(start, end);
472 flush_tlb_kernel_range(start, end);
473#endif
474}
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492static unsigned long lazy_max_pages(void)
493{
494 unsigned int log;
495
496 log = fls(num_online_cpus());
497
498 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
499}
500
501static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
502
503
504
505
506
507
508
509
510
511
512
513static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
514 int sync, int force_flush)
515{
516 static DEFINE_SPINLOCK(purge_lock);
517 LIST_HEAD(valist);
518 struct vmap_area *va;
519 struct vmap_area *n_va;
520 int nr = 0;
521
522
523
524
525
526
527 if (!sync && !force_flush) {
528 if (!spin_trylock(&purge_lock))
529 return;
530 } else
531 spin_lock(&purge_lock);
532
533 rcu_read_lock();
534 list_for_each_entry_rcu(va, &vmap_area_list, list) {
535 if (va->flags & VM_LAZY_FREE) {
536 if (va->va_start < *start)
537 *start = va->va_start;
538 if (va->va_end > *end)
539 *end = va->va_end;
540 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
541 unmap_vmap_area(va);
542 list_add_tail(&va->purge_list, &valist);
543 va->flags |= VM_LAZY_FREEING;
544 va->flags &= ~VM_LAZY_FREE;
545 }
546 }
547 rcu_read_unlock();
548
549 if (nr) {
550 BUG_ON(nr > atomic_read(&vmap_lazy_nr));
551 atomic_sub(nr, &vmap_lazy_nr);
552 }
553
554 if (nr || force_flush)
555 flush_tlb_kernel_range(*start, *end);
556
557 if (nr) {
558 spin_lock(&vmap_area_lock);
559 list_for_each_entry_safe(va, n_va, &valist, purge_list)
560 __free_vmap_area(va);
561 spin_unlock(&vmap_area_lock);
562 }
563 spin_unlock(&purge_lock);
564}
565
566
567
568
569
570static void try_purge_vmap_area_lazy(void)
571{
572 unsigned long start = ULONG_MAX, end = 0;
573
574 __purge_vmap_area_lazy(&start, &end, 0, 0);
575}
576
577
578
579
580static void purge_vmap_area_lazy(void)
581{
582 unsigned long start = ULONG_MAX, end = 0;
583
584 __purge_vmap_area_lazy(&start, &end, 1, 0);
585}
586
587
588
589
590
591static void free_unmap_vmap_area_noflush(struct vmap_area *va)
592{
593 va->flags |= VM_LAZY_FREE;
594 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
595 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
596 try_purge_vmap_area_lazy();
597}
598
599
600
601
602static void free_unmap_vmap_area(struct vmap_area *va)
603{
604 flush_cache_vunmap(va->va_start, va->va_end);
605 free_unmap_vmap_area_noflush(va);
606}
607
608static struct vmap_area *find_vmap_area(unsigned long addr)
609{
610 struct vmap_area *va;
611
612 spin_lock(&vmap_area_lock);
613 va = __find_vmap_area(addr);
614 spin_unlock(&vmap_area_lock);
615
616 return va;
617}
618
619static void free_unmap_vmap_area_addr(unsigned long addr)
620{
621 struct vmap_area *va;
622
623 va = find_vmap_area(addr);
624 BUG_ON(!va);
625 free_unmap_vmap_area(va);
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640#if BITS_PER_LONG == 32
641#define VMALLOC_SPACE (128UL*1024*1024)
642#else
643#define VMALLOC_SPACE (128UL*1024*1024*1024)
644#endif
645
646#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
647#define VMAP_MAX_ALLOC BITS_PER_LONG
648#define VMAP_BBMAP_BITS_MAX 1024
649#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
650#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
651#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
652#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
653 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
654 VMALLOC_PAGES / NR_CPUS / 16))
655
656#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
657
658static bool vmap_initialized __read_mostly = false;
659
660struct vmap_block_queue {
661 spinlock_t lock;
662 struct list_head free;
663 struct list_head dirty;
664 unsigned int nr_dirty;
665};
666
667struct vmap_block {
668 spinlock_t lock;
669 struct vmap_area *va;
670 struct vmap_block_queue *vbq;
671 unsigned long free, dirty;
672 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
673 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
674 union {
675 struct list_head free_list;
676 struct rcu_head rcu_head;
677 };
678};
679
680
681static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
682
683
684
685
686
687
688static DEFINE_SPINLOCK(vmap_block_tree_lock);
689static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
690
691
692
693
694
695
696
697
698static unsigned long addr_to_vb_idx(unsigned long addr)
699{
700 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
701 addr /= VMAP_BLOCK_SIZE;
702 return addr;
703}
704
705static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
706{
707 struct vmap_block_queue *vbq;
708 struct vmap_block *vb;
709 struct vmap_area *va;
710 unsigned long vb_idx;
711 int node, err;
712
713 node = numa_node_id();
714
715 vb = kmalloc_node(sizeof(struct vmap_block),
716 gfp_mask & GFP_RECLAIM_MASK, node);
717 if (unlikely(!vb))
718 return ERR_PTR(-ENOMEM);
719
720 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
721 VMALLOC_START, VMALLOC_END,
722 node, gfp_mask);
723 if (unlikely(IS_ERR(va))) {
724 kfree(vb);
725 return ERR_PTR(PTR_ERR(va));
726 }
727
728 err = radix_tree_preload(gfp_mask);
729 if (unlikely(err)) {
730 kfree(vb);
731 free_vmap_area(va);
732 return ERR_PTR(err);
733 }
734
735 spin_lock_init(&vb->lock);
736 vb->va = va;
737 vb->free = VMAP_BBMAP_BITS;
738 vb->dirty = 0;
739 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
740 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
741 INIT_LIST_HEAD(&vb->free_list);
742
743 vb_idx = addr_to_vb_idx(va->va_start);
744 spin_lock(&vmap_block_tree_lock);
745 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
746 spin_unlock(&vmap_block_tree_lock);
747 BUG_ON(err);
748 radix_tree_preload_end();
749
750 vbq = &get_cpu_var(vmap_block_queue);
751 vb->vbq = vbq;
752 spin_lock(&vbq->lock);
753 list_add(&vb->free_list, &vbq->free);
754 spin_unlock(&vbq->lock);
755 put_cpu_var(vmap_cpu_blocks);
756
757 return vb;
758}
759
760static void rcu_free_vb(struct rcu_head *head)
761{
762 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
763
764 kfree(vb);
765}
766
767static void free_vmap_block(struct vmap_block *vb)
768{
769 struct vmap_block *tmp;
770 unsigned long vb_idx;
771
772 BUG_ON(!list_empty(&vb->free_list));
773
774 vb_idx = addr_to_vb_idx(vb->va->va_start);
775 spin_lock(&vmap_block_tree_lock);
776 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
777 spin_unlock(&vmap_block_tree_lock);
778 BUG_ON(tmp != vb);
779
780 free_unmap_vmap_area_noflush(vb->va);
781 call_rcu(&vb->rcu_head, rcu_free_vb);
782}
783
784static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
785{
786 struct vmap_block_queue *vbq;
787 struct vmap_block *vb;
788 unsigned long addr = 0;
789 unsigned int order;
790
791 BUG_ON(size & ~PAGE_MASK);
792 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
793 order = get_order(size);
794
795again:
796 rcu_read_lock();
797 vbq = &get_cpu_var(vmap_block_queue);
798 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
799 int i;
800
801 spin_lock(&vb->lock);
802 i = bitmap_find_free_region(vb->alloc_map,
803 VMAP_BBMAP_BITS, order);
804
805 if (i >= 0) {
806 addr = vb->va->va_start + (i << PAGE_SHIFT);
807 BUG_ON(addr_to_vb_idx(addr) !=
808 addr_to_vb_idx(vb->va->va_start));
809 vb->free -= 1UL << order;
810 if (vb->free == 0) {
811 spin_lock(&vbq->lock);
812 list_del_init(&vb->free_list);
813 spin_unlock(&vbq->lock);
814 }
815 spin_unlock(&vb->lock);
816 break;
817 }
818 spin_unlock(&vb->lock);
819 }
820 put_cpu_var(vmap_cpu_blocks);
821 rcu_read_unlock();
822
823 if (!addr) {
824 vb = new_vmap_block(gfp_mask);
825 if (IS_ERR(vb))
826 return vb;
827 goto again;
828 }
829
830 return (void *)addr;
831}
832
833static void vb_free(const void *addr, unsigned long size)
834{
835 unsigned long offset;
836 unsigned long vb_idx;
837 unsigned int order;
838 struct vmap_block *vb;
839
840 BUG_ON(size & ~PAGE_MASK);
841 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
842
843 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
844
845 order = get_order(size);
846
847 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
848
849 vb_idx = addr_to_vb_idx((unsigned long)addr);
850 rcu_read_lock();
851 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
852 rcu_read_unlock();
853 BUG_ON(!vb);
854
855 spin_lock(&vb->lock);
856 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
857
858 vb->dirty += 1UL << order;
859 if (vb->dirty == VMAP_BBMAP_BITS) {
860 BUG_ON(vb->free || !list_empty(&vb->free_list));
861 spin_unlock(&vb->lock);
862 free_vmap_block(vb);
863 } else
864 spin_unlock(&vb->lock);
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880void vm_unmap_aliases(void)
881{
882 unsigned long start = ULONG_MAX, end = 0;
883 int cpu;
884 int flush = 0;
885
886 if (unlikely(!vmap_initialized))
887 return;
888
889 for_each_possible_cpu(cpu) {
890 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
891 struct vmap_block *vb;
892
893 rcu_read_lock();
894 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
895 int i;
896
897 spin_lock(&vb->lock);
898 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
899 while (i < VMAP_BBMAP_BITS) {
900 unsigned long s, e;
901 int j;
902 j = find_next_zero_bit(vb->dirty_map,
903 VMAP_BBMAP_BITS, i);
904
905 s = vb->va->va_start + (i << PAGE_SHIFT);
906 e = vb->va->va_start + (j << PAGE_SHIFT);
907 vunmap_page_range(s, e);
908 flush = 1;
909
910 if (s < start)
911 start = s;
912 if (e > end)
913 end = e;
914
915 i = j;
916 i = find_next_bit(vb->dirty_map,
917 VMAP_BBMAP_BITS, i);
918 }
919 spin_unlock(&vb->lock);
920 }
921 rcu_read_unlock();
922 }
923
924 __purge_vmap_area_lazy(&start, &end, 1, flush);
925}
926EXPORT_SYMBOL_GPL(vm_unmap_aliases);
927
928
929
930
931
932
933void vm_unmap_ram(const void *mem, unsigned int count)
934{
935 unsigned long size = count << PAGE_SHIFT;
936 unsigned long addr = (unsigned long)mem;
937
938 BUG_ON(!addr);
939 BUG_ON(addr < VMALLOC_START);
940 BUG_ON(addr > VMALLOC_END);
941 BUG_ON(addr & (PAGE_SIZE-1));
942
943 debug_check_no_locks_freed(mem, size);
944 vmap_debug_free_range(addr, addr+size);
945
946 if (likely(count <= VMAP_MAX_ALLOC))
947 vb_free(mem, size);
948 else
949 free_unmap_vmap_area_addr(addr);
950}
951EXPORT_SYMBOL(vm_unmap_ram);
952
953
954
955
956
957
958
959
960
961
962void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
963{
964 unsigned long size = count << PAGE_SHIFT;
965 unsigned long addr;
966 void *mem;
967
968 if (likely(count <= VMAP_MAX_ALLOC)) {
969 mem = vb_alloc(size, GFP_KERNEL);
970 if (IS_ERR(mem))
971 return NULL;
972 addr = (unsigned long)mem;
973 } else {
974 struct vmap_area *va;
975 va = alloc_vmap_area(size, PAGE_SIZE,
976 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
977 if (IS_ERR(va))
978 return NULL;
979
980 addr = va->va_start;
981 mem = (void *)addr;
982 }
983 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
984 vm_unmap_ram(mem, count);
985 return NULL;
986 }
987 return mem;
988}
989EXPORT_SYMBOL(vm_map_ram);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1004{
1005 static size_t vm_init_off __initdata;
1006 unsigned long addr;
1007
1008 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1009 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1010
1011 vm->addr = (void *)addr;
1012
1013 vm->next = vmlist;
1014 vmlist = vm;
1015}
1016
1017void __init vmalloc_init(void)
1018{
1019 struct vmap_area *va;
1020 struct vm_struct *tmp;
1021 int i;
1022
1023 for_each_possible_cpu(i) {
1024 struct vmap_block_queue *vbq;
1025
1026 vbq = &per_cpu(vmap_block_queue, i);
1027 spin_lock_init(&vbq->lock);
1028 INIT_LIST_HEAD(&vbq->free);
1029 INIT_LIST_HEAD(&vbq->dirty);
1030 vbq->nr_dirty = 0;
1031 }
1032
1033
1034 for (tmp = vmlist; tmp; tmp = tmp->next) {
1035 va = alloc_bootmem(sizeof(struct vmap_area));
1036 va->flags = tmp->flags | VM_VM_AREA;
1037 va->va_start = (unsigned long)tmp->addr;
1038 va->va_end = va->va_start + tmp->size;
1039 __insert_vmap_area(va);
1040 }
1041 vmap_initialized = true;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1064 pgprot_t prot, struct page **pages)
1065{
1066 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1084{
1085 vunmap_page_range(addr, addr + size);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096void unmap_kernel_range(unsigned long addr, unsigned long size)
1097{
1098 unsigned long end = addr + size;
1099
1100 flush_cache_vunmap(addr, end);
1101 vunmap_page_range(addr, end);
1102 flush_tlb_kernel_range(addr, end);
1103}
1104
1105int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1106{
1107 unsigned long addr = (unsigned long)area->addr;
1108 unsigned long end = addr + area->size - PAGE_SIZE;
1109 int err;
1110
1111 err = vmap_page_range(addr, end, prot, *pages);
1112 if (err > 0) {
1113 *pages += err;
1114 err = 0;
1115 }
1116
1117 return err;
1118}
1119EXPORT_SYMBOL_GPL(map_vm_area);
1120
1121
1122DEFINE_RWLOCK(vmlist_lock);
1123struct vm_struct *vmlist;
1124
1125static struct vm_struct *__get_vm_area_node(unsigned long size,
1126 unsigned long flags, unsigned long start, unsigned long end,
1127 int node, gfp_t gfp_mask, void *caller)
1128{
1129 static struct vmap_area *va;
1130 struct vm_struct *area;
1131 struct vm_struct *tmp, **p;
1132 unsigned long align = 1;
1133
1134 BUG_ON(in_interrupt());
1135 if (flags & VM_IOREMAP) {
1136 int bit = fls(size);
1137
1138 if (bit > IOREMAP_MAX_ORDER)
1139 bit = IOREMAP_MAX_ORDER;
1140 else if (bit < PAGE_SHIFT)
1141 bit = PAGE_SHIFT;
1142
1143 align = 1ul << bit;
1144 }
1145
1146 size = PAGE_ALIGN(size);
1147 if (unlikely(!size))
1148 return NULL;
1149
1150 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1151 if (unlikely(!area))
1152 return NULL;
1153
1154
1155
1156
1157 size += PAGE_SIZE;
1158
1159 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1160 if (IS_ERR(va)) {
1161 kfree(area);
1162 return NULL;
1163 }
1164
1165 area->flags = flags;
1166 area->addr = (void *)va->va_start;
1167 area->size = size;
1168 area->pages = NULL;
1169 area->nr_pages = 0;
1170 area->phys_addr = 0;
1171 area->caller = caller;
1172 va->private = area;
1173 va->flags |= VM_VM_AREA;
1174
1175 write_lock(&vmlist_lock);
1176 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1177 if (tmp->addr >= area->addr)
1178 break;
1179 }
1180 area->next = *p;
1181 *p = area;
1182 write_unlock(&vmlist_lock);
1183
1184 return area;
1185}
1186
1187struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1188 unsigned long start, unsigned long end)
1189{
1190 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1191 __builtin_return_address(0));
1192}
1193EXPORT_SYMBOL_GPL(__get_vm_area);
1194
1195struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1196 unsigned long start, unsigned long end,
1197 void *caller)
1198{
1199 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1200 caller);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1213{
1214 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1215 -1, GFP_KERNEL, __builtin_return_address(0));
1216}
1217
1218struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1219 void *caller)
1220{
1221 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1222 -1, GFP_KERNEL, caller);
1223}
1224
1225struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1226 int node, gfp_t gfp_mask)
1227{
1228 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
1229 gfp_mask, __builtin_return_address(0));
1230}
1231
1232static struct vm_struct *find_vm_area(const void *addr)
1233{
1234 struct vmap_area *va;
1235
1236 va = find_vmap_area((unsigned long)addr);
1237 if (va && va->flags & VM_VM_AREA)
1238 return va->private;
1239
1240 return NULL;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251struct vm_struct *remove_vm_area(const void *addr)
1252{
1253 struct vmap_area *va;
1254
1255 va = find_vmap_area((unsigned long)addr);
1256 if (va && va->flags & VM_VM_AREA) {
1257 struct vm_struct *vm = va->private;
1258 struct vm_struct *tmp, **p;
1259
1260 vmap_debug_free_range(va->va_start, va->va_end);
1261 free_unmap_vmap_area(va);
1262 vm->size -= PAGE_SIZE;
1263
1264 write_lock(&vmlist_lock);
1265 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
1266 ;
1267 *p = tmp->next;
1268 write_unlock(&vmlist_lock);
1269
1270 return vm;
1271 }
1272 return NULL;
1273}
1274
1275static void __vunmap(const void *addr, int deallocate_pages)
1276{
1277 struct vm_struct *area;
1278
1279 if (!addr)
1280 return;
1281
1282 if ((PAGE_SIZE-1) & (unsigned long)addr) {
1283 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
1284 return;
1285 }
1286
1287 area = remove_vm_area(addr);
1288 if (unlikely(!area)) {
1289 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1290 addr);
1291 return;
1292 }
1293
1294 debug_check_no_locks_freed(addr, area->size);
1295 debug_check_no_obj_freed(addr, area->size);
1296
1297 if (deallocate_pages) {
1298 int i;
1299
1300 for (i = 0; i < area->nr_pages; i++) {
1301 struct page *page = area->pages[i];
1302
1303 BUG_ON(!page);
1304 __free_page(page);
1305 }
1306
1307 if (area->flags & VM_VPAGES)
1308 vfree(area->pages);
1309 else
1310 kfree(area->pages);
1311 }
1312
1313 kfree(area);
1314 return;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327void vfree(const void *addr)
1328{
1329 BUG_ON(in_interrupt());
1330 __vunmap(addr, 1);
1331}
1332EXPORT_SYMBOL(vfree);
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343void vunmap(const void *addr)
1344{
1345 BUG_ON(in_interrupt());
1346 might_sleep();
1347 __vunmap(addr, 0);
1348}
1349EXPORT_SYMBOL(vunmap);
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361void *vmap(struct page **pages, unsigned int count,
1362 unsigned long flags, pgprot_t prot)
1363{
1364 struct vm_struct *area;
1365
1366 might_sleep();
1367
1368 if (count > num_physpages)
1369 return NULL;
1370
1371 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1372 __builtin_return_address(0));
1373 if (!area)
1374 return NULL;
1375
1376 if (map_vm_area(area, prot, &pages)) {
1377 vunmap(area->addr);
1378 return NULL;
1379 }
1380
1381 return area->addr;
1382}
1383EXPORT_SYMBOL(vmap);
1384
1385static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1386 int node, void *caller);
1387static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1388 pgprot_t prot, int node, void *caller)
1389{
1390 struct page **pages;
1391 unsigned int nr_pages, array_size, i;
1392
1393 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1394 array_size = (nr_pages * sizeof(struct page *));
1395
1396 area->nr_pages = nr_pages;
1397
1398 if (array_size > PAGE_SIZE) {
1399 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
1400 PAGE_KERNEL, node, caller);
1401 area->flags |= VM_VPAGES;
1402 } else {
1403 pages = kmalloc_node(array_size,
1404 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1405 node);
1406 }
1407 area->pages = pages;
1408 area->caller = caller;
1409 if (!area->pages) {
1410 remove_vm_area(area->addr);
1411 kfree(area);
1412 return NULL;
1413 }
1414
1415 for (i = 0; i < area->nr_pages; i++) {
1416 struct page *page;
1417
1418 if (node < 0)
1419 page = alloc_page(gfp_mask);
1420 else
1421 page = alloc_pages_node(node, gfp_mask, 0);
1422
1423 if (unlikely(!page)) {
1424
1425 area->nr_pages = i;
1426 goto fail;
1427 }
1428 area->pages[i] = page;
1429 }
1430
1431 if (map_vm_area(area, prot, &pages))
1432 goto fail;
1433 return area->addr;
1434
1435fail:
1436 vfree(area->addr);
1437 return NULL;
1438}
1439
1440void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1441{
1442 return __vmalloc_area_node(area, gfp_mask, prot, -1,
1443 __builtin_return_address(0));
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1459 int node, void *caller)
1460{
1461 struct vm_struct *area;
1462
1463 size = PAGE_ALIGN(size);
1464 if (!size || (size >> PAGE_SHIFT) > num_physpages)
1465 return NULL;
1466
1467 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
1468 node, gfp_mask, caller);
1469
1470 if (!area)
1471 return NULL;
1472
1473 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1474}
1475
1476void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1477{
1478 return __vmalloc_node(size, gfp_mask, prot, -1,
1479 __builtin_return_address(0));
1480}
1481EXPORT_SYMBOL(__vmalloc);
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492void *vmalloc(unsigned long size)
1493{
1494 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1495 -1, __builtin_return_address(0));
1496}
1497EXPORT_SYMBOL(vmalloc);
1498
1499
1500
1501
1502
1503
1504
1505
1506void *vmalloc_user(unsigned long size)
1507{
1508 struct vm_struct *area;
1509 void *ret;
1510
1511 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1512 PAGE_KERNEL, -1, __builtin_return_address(0));
1513 if (ret) {
1514 area = find_vm_area(ret);
1515 area->flags |= VM_USERMAP;
1516 }
1517 return ret;
1518}
1519EXPORT_SYMBOL(vmalloc_user);
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532void *vmalloc_node(unsigned long size, int node)
1533{
1534 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1535 node, __builtin_return_address(0));
1536}
1537EXPORT_SYMBOL(vmalloc_node);
1538
1539#ifndef PAGE_KERNEL_EXEC
1540# define PAGE_KERNEL_EXEC PAGE_KERNEL
1541#endif
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555void *vmalloc_exec(unsigned long size)
1556{
1557 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1558 -1, __builtin_return_address(0));
1559}
1560
1561#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1562#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1563#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1564#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1565#else
1566#define GFP_VMALLOC32 GFP_KERNEL
1567#endif
1568
1569
1570
1571
1572
1573
1574
1575
1576void *vmalloc_32(unsigned long size)
1577{
1578 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
1579 -1, __builtin_return_address(0));
1580}
1581EXPORT_SYMBOL(vmalloc_32);
1582
1583
1584
1585
1586
1587
1588
1589
1590void *vmalloc_32_user(unsigned long size)
1591{
1592 struct vm_struct *area;
1593 void *ret;
1594
1595 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1596 -1, __builtin_return_address(0));
1597 if (ret) {
1598 area = find_vm_area(ret);
1599 area->flags |= VM_USERMAP;
1600 }
1601 return ret;
1602}
1603EXPORT_SYMBOL(vmalloc_32_user);
1604
1605long vread(char *buf, char *addr, unsigned long count)
1606{
1607 struct vm_struct *tmp;
1608 char *vaddr, *buf_start = buf;
1609 unsigned long n;
1610
1611
1612 if ((unsigned long) addr + count < count)
1613 count = -(unsigned long) addr;
1614
1615 read_lock(&vmlist_lock);
1616 for (tmp = vmlist; tmp; tmp = tmp->next) {
1617 vaddr = (char *) tmp->addr;
1618 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1619 continue;
1620 while (addr < vaddr) {
1621 if (count == 0)
1622 goto finished;
1623 *buf = '\0';
1624 buf++;
1625 addr++;
1626 count--;
1627 }
1628 n = vaddr + tmp->size - PAGE_SIZE - addr;
1629 do {
1630 if (count == 0)
1631 goto finished;
1632 *buf = *addr;
1633 buf++;
1634 addr++;
1635 count--;
1636 } while (--n > 0);
1637 }
1638finished:
1639 read_unlock(&vmlist_lock);
1640 return buf - buf_start;
1641}
1642
1643long vwrite(char *buf, char *addr, unsigned long count)
1644{
1645 struct vm_struct *tmp;
1646 char *vaddr, *buf_start = buf;
1647 unsigned long n;
1648
1649
1650 if ((unsigned long) addr + count < count)
1651 count = -(unsigned long) addr;
1652
1653 read_lock(&vmlist_lock);
1654 for (tmp = vmlist; tmp; tmp = tmp->next) {
1655 vaddr = (char *) tmp->addr;
1656 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1657 continue;
1658 while (addr < vaddr) {
1659 if (count == 0)
1660 goto finished;
1661 buf++;
1662 addr++;
1663 count--;
1664 }
1665 n = vaddr + tmp->size - PAGE_SIZE - addr;
1666 do {
1667 if (count == 0)
1668 goto finished;
1669 *addr = *buf;
1670 buf++;
1671 addr++;
1672 count--;
1673 } while (--n > 0);
1674 }
1675finished:
1676 read_unlock(&vmlist_lock);
1677 return buf - buf_start;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1695 unsigned long pgoff)
1696{
1697 struct vm_struct *area;
1698 unsigned long uaddr = vma->vm_start;
1699 unsigned long usize = vma->vm_end - vma->vm_start;
1700
1701 if ((PAGE_SIZE-1) & (unsigned long)addr)
1702 return -EINVAL;
1703
1704 area = find_vm_area(addr);
1705 if (!area)
1706 return -EINVAL;
1707
1708 if (!(area->flags & VM_USERMAP))
1709 return -EINVAL;
1710
1711 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
1712 return -EINVAL;
1713
1714 addr += pgoff << PAGE_SHIFT;
1715 do {
1716 struct page *page = vmalloc_to_page(addr);
1717 int ret;
1718
1719 ret = vm_insert_page(vma, uaddr, page);
1720 if (ret)
1721 return ret;
1722
1723 uaddr += PAGE_SIZE;
1724 addr += PAGE_SIZE;
1725 usize -= PAGE_SIZE;
1726 } while (usize > 0);
1727
1728
1729 vma->vm_flags |= VM_RESERVED;
1730
1731 return 0;
1732}
1733EXPORT_SYMBOL(remap_vmalloc_range);
1734
1735
1736
1737
1738
1739void __attribute__((weak)) vmalloc_sync_all(void)
1740{
1741}
1742
1743
1744static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
1745{
1746
1747 return 0;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762struct vm_struct *alloc_vm_area(size_t size)
1763{
1764 struct vm_struct *area;
1765
1766 area = get_vm_area_caller(size, VM_IOREMAP,
1767 __builtin_return_address(0));
1768 if (area == NULL)
1769 return NULL;
1770
1771
1772
1773
1774
1775 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
1776 area->size, f, NULL)) {
1777 free_vm_area(area);
1778 return NULL;
1779 }
1780
1781
1782
1783 vmalloc_sync_all();
1784
1785 return area;
1786}
1787EXPORT_SYMBOL_GPL(alloc_vm_area);
1788
1789void free_vm_area(struct vm_struct *area)
1790{
1791 struct vm_struct *ret;
1792 ret = remove_vm_area(area->addr);
1793 BUG_ON(ret != area);
1794 kfree(area);
1795}
1796EXPORT_SYMBOL_GPL(free_vm_area);
1797
1798
1799#ifdef CONFIG_PROC_FS
1800static void *s_start(struct seq_file *m, loff_t *pos)
1801{
1802 loff_t n = *pos;
1803 struct vm_struct *v;
1804
1805 read_lock(&vmlist_lock);
1806 v = vmlist;
1807 while (n > 0 && v) {
1808 n--;
1809 v = v->next;
1810 }
1811 if (!n)
1812 return v;
1813
1814 return NULL;
1815
1816}
1817
1818static void *s_next(struct seq_file *m, void *p, loff_t *pos)
1819{
1820 struct vm_struct *v = p;
1821
1822 ++*pos;
1823 return v->next;
1824}
1825
1826static void s_stop(struct seq_file *m, void *p)
1827{
1828 read_unlock(&vmlist_lock);
1829}
1830
1831static void show_numa_info(struct seq_file *m, struct vm_struct *v)
1832{
1833 if (NUMA_BUILD) {
1834 unsigned int nr, *counters = m->private;
1835
1836 if (!counters)
1837 return;
1838
1839 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
1840
1841 for (nr = 0; nr < v->nr_pages; nr++)
1842 counters[page_to_nid(v->pages[nr])]++;
1843
1844 for_each_node_state(nr, N_HIGH_MEMORY)
1845 if (counters[nr])
1846 seq_printf(m, " N%u=%u", nr, counters[nr]);
1847 }
1848}
1849
1850static int s_show(struct seq_file *m, void *p)
1851{
1852 struct vm_struct *v = p;
1853
1854 seq_printf(m, "0x%p-0x%p %7ld",
1855 v->addr, v->addr + v->size, v->size);
1856
1857 if (v->caller) {
1858 char buff[KSYM_SYMBOL_LEN];
1859
1860 seq_putc(m, ' ');
1861 sprint_symbol(buff, (unsigned long)v->caller);
1862 seq_puts(m, buff);
1863 }
1864
1865 if (v->nr_pages)
1866 seq_printf(m, " pages=%d", v->nr_pages);
1867
1868 if (v->phys_addr)
1869 seq_printf(m, " phys=%lx", v->phys_addr);
1870
1871 if (v->flags & VM_IOREMAP)
1872 seq_printf(m, " ioremap");
1873
1874 if (v->flags & VM_ALLOC)
1875 seq_printf(m, " vmalloc");
1876
1877 if (v->flags & VM_MAP)
1878 seq_printf(m, " vmap");
1879
1880 if (v->flags & VM_USERMAP)
1881 seq_printf(m, " user");
1882
1883 if (v->flags & VM_VPAGES)
1884 seq_printf(m, " vpages");
1885
1886 show_numa_info(m, v);
1887 seq_putc(m, '\n');
1888 return 0;
1889}
1890
1891static const struct seq_operations vmalloc_op = {
1892 .start = s_start,
1893 .next = s_next,
1894 .stop = s_stop,
1895 .show = s_show,
1896};
1897
1898static int vmalloc_open(struct inode *inode, struct file *file)
1899{
1900 unsigned int *ptr = NULL;
1901 int ret;
1902
1903 if (NUMA_BUILD)
1904 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
1905 ret = seq_open(file, &vmalloc_op);
1906 if (!ret) {
1907 struct seq_file *m = file->private_data;
1908 m->private = ptr;
1909 } else
1910 kfree(ptr);
1911 return ret;
1912}
1913
1914static const struct file_operations proc_vmalloc_operations = {
1915 .open = vmalloc_open,
1916 .read = seq_read,
1917 .llseek = seq_lseek,
1918 .release = seq_release_private,
1919};
1920
1921static int __init proc_vmalloc_init(void)
1922{
1923 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
1924 return 0;
1925}
1926module_init(proc_vmalloc_init);
1927#endif
1928
1929