1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/gfp.h>
9#include <linux/list.h>
10#include <linux/mmdebug.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/prio_tree.h>
14#include <linux/debug_locks.h>
15#include <linux/mm_types.h>
16
17struct mempolicy;
18struct anon_vma;
19struct file_ra_state;
20struct user_struct;
21struct writeback_control;
22
23#ifndef CONFIG_DISCONTIGMEM
24extern unsigned long max_mapnr;
25#endif
26
27extern unsigned long num_physpages;
28extern void * high_memory;
29extern int page_cluster;
30
31#ifdef CONFIG_SYSCTL
32extern int sysctl_legacy_va_layout;
33#else
34#define sysctl_legacy_va_layout 0
35#endif
36
37extern unsigned long mmap_min_addr;
38
39#include <asm/page.h>
40#include <asm/pgtable.h>
41#include <asm/processor.h>
42
43#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
44
45
46#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
47
48
49
50
51
52
53
54
55
56
57extern struct kmem_cache *vm_area_cachep;
58
59
60
61
62
63
64struct vm_list_struct {
65 struct vm_list_struct *next;
66 struct vm_area_struct *vma;
67};
68
69#ifndef CONFIG_MMU
70extern struct rb_root nommu_vma_tree;
71extern struct rw_semaphore nommu_vma_sem;
72
73extern unsigned int kobjsize(const void *objp);
74#endif
75
76
77
78
79#define VM_READ 0x00000001
80#define VM_WRITE 0x00000002
81#define VM_EXEC 0x00000004
82#define VM_SHARED 0x00000008
83
84
85#define VM_MAYREAD 0x00000010
86#define VM_MAYWRITE 0x00000020
87#define VM_MAYEXEC 0x00000040
88#define VM_MAYSHARE 0x00000080
89
90#define VM_GROWSDOWN 0x00000100
91#define VM_GROWSUP 0x00000200
92#define VM_PFNMAP 0x00000400
93#define VM_DENYWRITE 0x00000800
94
95#define VM_EXECUTABLE 0x00001000
96#define VM_LOCKED 0x00002000
97#define VM_IO 0x00004000
98
99
100#define VM_SEQ_READ 0x00008000
101#define VM_RAND_READ 0x00010000
102
103#define VM_DONTCOPY 0x00020000
104#define VM_DONTEXPAND 0x00040000
105#define VM_RESERVED 0x00080000
106#define VM_ACCOUNT 0x00100000
107#define VM_NORESERVE 0x00200000
108#define VM_HUGETLB 0x00400000
109#define VM_NONLINEAR 0x00800000
110#define VM_MAPPED_COPY 0x01000000
111#define VM_INSERTPAGE 0x02000000
112#define VM_ALWAYSDUMP 0x04000000
113
114#define VM_CAN_NONLINEAR 0x08000000
115#define VM_MIXEDMAP 0x10000000
116#define VM_SAO 0x20000000
117
118#ifndef VM_STACK_DEFAULT_FLAGS
119#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
120#endif
121
122#ifdef CONFIG_STACK_GROWSUP
123#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
124#else
125#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
126#endif
127
128#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
129#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
130#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
131#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
132#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
133
134
135
136
137#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
138
139
140
141
142
143extern pgprot_t protection_map[16];
144
145#define FAULT_FLAG_WRITE 0x01
146#define FAULT_FLAG_NONLINEAR 0x02
147
148
149
150
151
152
153
154
155
156
157
158struct vm_fault {
159 unsigned int flags;
160 pgoff_t pgoff;
161 void __user *virtual_address;
162
163 struct page *page;
164
165
166
167
168};
169
170
171
172
173
174
175struct vm_operations_struct {
176 void (*open)(struct vm_area_struct * area);
177 void (*close)(struct vm_area_struct * area);
178 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
179
180
181
182 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
183
184
185
186
187 int (*access)(struct vm_area_struct *vma, unsigned long addr,
188 void *buf, int len, int write);
189#ifdef CONFIG_NUMA
190
191
192
193
194
195
196
197 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
198
199
200
201
202
203
204
205
206
207
208
209 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
210 unsigned long addr);
211 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
212 const nodemask_t *to, unsigned long flags);
213#endif
214};
215
216struct mmu_gather;
217struct inode;
218
219#define page_private(page) ((page)->private)
220#define set_page_private(page, v) ((page)->private = (v))
221
222
223
224
225
226#include <linux/page-flags.h>
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244static inline int put_page_testzero(struct page *page)
245{
246 VM_BUG_ON(atomic_read(&page->_count) == 0);
247 return atomic_dec_and_test(&page->_count);
248}
249
250
251
252
253
254static inline int get_page_unless_zero(struct page *page)
255{
256 VM_BUG_ON(PageTail(page));
257 return atomic_inc_not_zero(&page->_count);
258}
259
260
261struct page *vmalloc_to_page(const void *addr);
262unsigned long vmalloc_to_pfn(const void *addr);
263
264
265
266
267
268
269
270static inline int is_vmalloc_addr(const void *x)
271{
272#ifdef CONFIG_MMU
273 unsigned long addr = (unsigned long)x;
274
275 return addr >= VMALLOC_START && addr < VMALLOC_END;
276#else
277 return 0;
278#endif
279}
280
281static inline struct page *compound_head(struct page *page)
282{
283 if (unlikely(PageTail(page)))
284 return page->first_page;
285 return page;
286}
287
288static inline int page_count(struct page *page)
289{
290 return atomic_read(&compound_head(page)->_count);
291}
292
293static inline void get_page(struct page *page)
294{
295 page = compound_head(page);
296 VM_BUG_ON(atomic_read(&page->_count) == 0);
297 atomic_inc(&page->_count);
298}
299
300static inline struct page *virt_to_head_page(const void *x)
301{
302 struct page *page = virt_to_page(x);
303 return compound_head(page);
304}
305
306
307
308
309
310static inline void init_page_count(struct page *page)
311{
312 atomic_set(&page->_count, 1);
313}
314
315void put_page(struct page *page);
316void put_pages_list(struct list_head *pages);
317
318void split_page(struct page *page, unsigned int order);
319
320
321
322
323
324
325typedef void compound_page_dtor(struct page *);
326
327static inline void set_compound_page_dtor(struct page *page,
328 compound_page_dtor *dtor)
329{
330 page[1].lru.next = (void *)dtor;
331}
332
333static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
334{
335 return (compound_page_dtor *)page[1].lru.next;
336}
337
338static inline int compound_order(struct page *page)
339{
340 if (!PageHead(page))
341 return 0;
342 return (unsigned long)page[1].lru.prev;
343}
344
345static inline void set_compound_order(struct page *page, unsigned long order)
346{
347 page[1].lru.prev = (void *)order;
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
431#define SECTIONS_WIDTH SECTIONS_SHIFT
432#else
433#define SECTIONS_WIDTH 0
434#endif
435
436#define ZONES_WIDTH ZONES_SHIFT
437
438#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
439#define NODES_WIDTH NODES_SHIFT
440#else
441#ifdef CONFIG_SPARSEMEM_VMEMMAP
442#error "Vmemmap: No space for nodes field in page flags"
443#endif
444#define NODES_WIDTH 0
445#endif
446
447
448#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
449#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
450#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
451
452
453
454
455
456#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
457#define NODE_NOT_IN_PAGE_FLAGS
458#endif
459
460#ifndef PFN_SECTION_SHIFT
461#define PFN_SECTION_SHIFT 0
462#endif
463
464
465
466
467
468
469#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
470#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
471#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
472
473
474#ifdef NODE_NOT_IN_PAGEFLAGS
475#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
476#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
477 SECTIONS_PGOFF : ZONES_PGOFF)
478#else
479#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
480#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
481 NODES_PGOFF : ZONES_PGOFF)
482#endif
483
484#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
485
486#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
487#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
488#endif
489
490#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
491#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
492#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
493#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
494
495static inline enum zone_type page_zonenum(struct page *page)
496{
497 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
498}
499
500
501
502
503
504
505
506
507
508static inline int page_zone_id(struct page *page)
509{
510 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
511}
512
513static inline int zone_to_nid(struct zone *zone)
514{
515#ifdef CONFIG_NUMA
516 return zone->node;
517#else
518 return 0;
519#endif
520}
521
522#ifdef NODE_NOT_IN_PAGE_FLAGS
523extern int page_to_nid(struct page *page);
524#else
525static inline int page_to_nid(struct page *page)
526{
527 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
528}
529#endif
530
531static inline struct zone *page_zone(struct page *page)
532{
533 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
534}
535
536#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
537static inline unsigned long page_to_section(struct page *page)
538{
539 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
540}
541#endif
542
543static inline void set_page_zone(struct page *page, enum zone_type zone)
544{
545 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
546 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
547}
548
549static inline void set_page_node(struct page *page, unsigned long node)
550{
551 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
552 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
553}
554
555static inline void set_page_section(struct page *page, unsigned long section)
556{
557 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
558 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
559}
560
561static inline void set_page_links(struct page *page, enum zone_type zone,
562 unsigned long node, unsigned long pfn)
563{
564 set_page_zone(page, zone);
565 set_page_node(page, node);
566 set_page_section(page, pfn_to_section_nr(pfn));
567}
568
569
570
571
572
573static inline unsigned long round_hint_to_min(unsigned long hint)
574{
575#ifdef CONFIG_SECURITY
576 hint &= PAGE_MASK;
577 if (((void *)hint != NULL) &&
578 (hint < mmap_min_addr))
579 return PAGE_ALIGN(mmap_min_addr);
580#endif
581 return hint;
582}
583
584
585
586
587#include <linux/vmstat.h>
588
589static __always_inline void *lowmem_page_address(struct page *page)
590{
591 return __va(page_to_pfn(page) << PAGE_SHIFT);
592}
593
594#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
595#define HASHED_PAGE_VIRTUAL
596#endif
597
598#if defined(WANT_PAGE_VIRTUAL)
599#define page_address(page) ((page)->virtual)
600#define set_page_address(page, address) \
601 do { \
602 (page)->virtual = (address); \
603 } while(0)
604#define page_address_init() do { } while(0)
605#endif
606
607#if defined(HASHED_PAGE_VIRTUAL)
608void *page_address(struct page *page);
609void set_page_address(struct page *page, void *virtual);
610void page_address_init(void);
611#endif
612
613#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
614#define page_address(page) lowmem_page_address(page)
615#define set_page_address(page, address) do { } while(0)
616#define page_address_init() do { } while(0)
617#endif
618
619
620
621
622
623
624
625
626
627
628#define PAGE_MAPPING_ANON 1
629
630extern struct address_space swapper_space;
631static inline struct address_space *page_mapping(struct page *page)
632{
633 struct address_space *mapping = page->mapping;
634
635 VM_BUG_ON(PageSlab(page));
636#ifdef CONFIG_SWAP
637 if (unlikely(PageSwapCache(page)))
638 mapping = &swapper_space;
639 else
640#endif
641 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
642 mapping = NULL;
643 return mapping;
644}
645
646static inline int PageAnon(struct page *page)
647{
648 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
649}
650
651
652
653
654
655static inline pgoff_t page_index(struct page *page)
656{
657 if (unlikely(PageSwapCache(page)))
658 return page_private(page);
659 return page->index;
660}
661
662
663
664
665
666
667static inline void reset_page_mapcount(struct page *page)
668{
669 atomic_set(&(page)->_mapcount, -1);
670}
671
672static inline int page_mapcount(struct page *page)
673{
674 return atomic_read(&(page)->_mapcount) + 1;
675}
676
677
678
679
680static inline int page_mapped(struct page *page)
681{
682 return atomic_read(&(page)->_mapcount) >= 0;
683}
684
685
686
687
688
689
690
691#define VM_FAULT_MINOR 0
692
693#define VM_FAULT_OOM 0x0001
694#define VM_FAULT_SIGBUS 0x0002
695#define VM_FAULT_MAJOR 0x0004
696#define VM_FAULT_WRITE 0x0008
697
698#define VM_FAULT_NOPAGE 0x0100
699#define VM_FAULT_LOCKED 0x0200
700
701#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
702
703#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
704
705extern void show_free_areas(void);
706
707#ifdef CONFIG_SHMEM
708extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
709#else
710static inline int shmem_lock(struct file *file, int lock,
711 struct user_struct *user)
712{
713 return 0;
714}
715#endif
716struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
717
718int shmem_zero_setup(struct vm_area_struct *);
719
720#ifndef CONFIG_MMU
721extern unsigned long shmem_get_unmapped_area(struct file *file,
722 unsigned long addr,
723 unsigned long len,
724 unsigned long pgoff,
725 unsigned long flags);
726#endif
727
728extern int can_do_mlock(void);
729extern int user_shm_lock(size_t, struct user_struct *);
730extern void user_shm_unlock(size_t, struct user_struct *);
731
732
733
734
735struct zap_details {
736 struct vm_area_struct *nonlinear_vma;
737 struct address_space *check_mapping;
738 pgoff_t first_index;
739 pgoff_t last_index;
740 spinlock_t *i_mmap_lock;
741 unsigned long truncate_count;
742};
743
744struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
745 pte_t pte);
746
747int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
748 unsigned long size);
749unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
750 unsigned long size, struct zap_details *);
751unsigned long unmap_vmas(struct mmu_gather **tlb,
752 struct vm_area_struct *start_vma, unsigned long start_addr,
753 unsigned long end_addr, unsigned long *nr_accounted,
754 struct zap_details *);
755
756
757
758
759
760
761
762
763
764
765
766struct mm_walk {
767 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
768 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
769 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
770 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
771 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
772 struct mm_struct *mm;
773 void *private;
774};
775
776int walk_page_range(unsigned long addr, unsigned long end,
777 struct mm_walk *walk);
778void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
779 unsigned long end, unsigned long floor, unsigned long ceiling);
780int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
781 struct vm_area_struct *vma);
782void unmap_mapping_range(struct address_space *mapping,
783 loff_t const holebegin, loff_t const holelen, int even_cows);
784int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
785 void *buf, int len, int write);
786
787static inline void unmap_shared_mapping_range(struct address_space *mapping,
788 loff_t const holebegin, loff_t const holelen)
789{
790 unmap_mapping_range(mapping, holebegin, holelen, 0);
791}
792
793extern int vmtruncate(struct inode * inode, loff_t offset);
794extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
795
796#ifdef CONFIG_MMU
797extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
798 unsigned long address, int write_access);
799#else
800static inline int handle_mm_fault(struct mm_struct *mm,
801 struct vm_area_struct *vma, unsigned long address,
802 int write_access)
803{
804
805 BUG();
806 return VM_FAULT_SIGBUS;
807}
808#endif
809
810extern int make_pages_present(unsigned long addr, unsigned long end);
811extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
812
813int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
814 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
815
816extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
817extern void do_invalidatepage(struct page *page, unsigned long offset);
818
819int __set_page_dirty_nobuffers(struct page *page);
820int __set_page_dirty_no_writeback(struct page *page);
821int redirty_page_for_writepage(struct writeback_control *wbc,
822 struct page *page);
823int set_page_dirty(struct page *page);
824int set_page_dirty_lock(struct page *page);
825int clear_page_dirty_for_io(struct page *page);
826
827extern unsigned long move_page_tables(struct vm_area_struct *vma,
828 unsigned long old_addr, struct vm_area_struct *new_vma,
829 unsigned long new_addr, unsigned long len);
830extern unsigned long do_mremap(unsigned long addr,
831 unsigned long old_len, unsigned long new_len,
832 unsigned long flags, unsigned long new_addr);
833extern int mprotect_fixup(struct vm_area_struct *vma,
834 struct vm_area_struct **pprev, unsigned long start,
835 unsigned long end, unsigned long newflags);
836
837
838
839
840
841
842
843
844
845
846
847int get_user_pages_fast(unsigned long start, int nr_pages, int write,
848 struct page **pages);
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865struct shrinker {
866 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
867 int seeks;
868
869
870 struct list_head list;
871 long nr;
872};
873#define DEFAULT_SEEKS 2
874extern void register_shrinker(struct shrinker *);
875extern void unregister_shrinker(struct shrinker *);
876
877int vma_wants_writenotify(struct vm_area_struct *vma);
878
879extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
880
881#ifdef __PAGETABLE_PUD_FOLDED
882static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
883 unsigned long address)
884{
885 return 0;
886}
887#else
888int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
889#endif
890
891#ifdef __PAGETABLE_PMD_FOLDED
892static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
893 unsigned long address)
894{
895 return 0;
896}
897#else
898int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
899#endif
900
901int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
902int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
903
904
905
906
907
908#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
909static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
910{
911 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
912 NULL: pud_offset(pgd, address);
913}
914
915static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
916{
917 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
918 NULL: pmd_offset(pud, address);
919}
920#endif
921
922#if USE_SPLIT_PTLOCKS
923
924
925
926
927
928
929#define __pte_lockptr(page) &((page)->ptl)
930#define pte_lock_init(_page) do { \
931 spin_lock_init(__pte_lockptr(_page)); \
932} while (0)
933#define pte_lock_deinit(page) ((page)->mapping = NULL)
934#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
935#else
936
937
938
939#define pte_lock_init(page) do {} while (0)
940#define pte_lock_deinit(page) do {} while (0)
941#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
942#endif
943
944static inline void pgtable_page_ctor(struct page *page)
945{
946 pte_lock_init(page);
947 inc_zone_page_state(page, NR_PAGETABLE);
948}
949
950static inline void pgtable_page_dtor(struct page *page)
951{
952 pte_lock_deinit(page);
953 dec_zone_page_state(page, NR_PAGETABLE);
954}
955
956#define pte_offset_map_lock(mm, pmd, address, ptlp) \
957({ \
958 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
959 pte_t *__pte = pte_offset_map(pmd, address); \
960 *(ptlp) = __ptl; \
961 spin_lock(__ptl); \
962 __pte; \
963})
964
965#define pte_unmap_unlock(pte, ptl) do { \
966 spin_unlock(ptl); \
967 pte_unmap(pte); \
968} while (0)
969
970#define pte_alloc_map(mm, pmd, address) \
971 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
972 NULL: pte_offset_map(pmd, address))
973
974#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
975 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
976 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
977
978#define pte_alloc_kernel(pmd, address) \
979 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
980 NULL: pte_offset_kernel(pmd, address))
981
982extern void free_area_init(unsigned long * zones_size);
983extern void free_area_init_node(int nid, unsigned long * zones_size,
984 unsigned long zone_start_pfn, unsigned long *zholes_size);
985#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1014extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1015 unsigned long end_pfn);
1016extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1017 unsigned long end_pfn);
1018extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
1019 unsigned long end_pfn);
1020extern void remove_all_active_ranges(void);
1021extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1022 unsigned long end_pfn);
1023extern void get_pfn_range_for_nid(unsigned int nid,
1024 unsigned long *start_pfn, unsigned long *end_pfn);
1025extern unsigned long find_min_pfn_with_active_regions(void);
1026extern void free_bootmem_with_active_regions(int nid,
1027 unsigned long max_low_pfn);
1028typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1029extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1030extern void sparse_memory_present_with_active_regions(int nid);
1031#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1032extern int early_pfn_to_nid(unsigned long pfn);
1033#endif
1034#endif
1035extern void set_dma_reserve(unsigned long new_dma_reserve);
1036extern void memmap_init_zone(unsigned long, int, unsigned long,
1037 unsigned long, enum memmap_context);
1038extern void setup_per_zone_pages_min(void);
1039extern void mem_init(void);
1040extern void show_mem(void);
1041extern void si_meminfo(struct sysinfo * val);
1042extern void si_meminfo_node(struct sysinfo *val, int nid);
1043extern int after_bootmem;
1044
1045#ifdef CONFIG_NUMA
1046extern void setup_per_cpu_pageset(void);
1047#else
1048static inline void setup_per_cpu_pageset(void) {}
1049#endif
1050
1051
1052void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
1053void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
1054void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
1055struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
1056 struct prio_tree_iter *iter);
1057
1058#define vma_prio_tree_foreach(vma, iter, root, begin, end) \
1059 for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
1060 (vma = vma_prio_tree_next(vma, iter)); )
1061
1062static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1063 struct list_head *list)
1064{
1065 vma->shared.vm_set.parent = NULL;
1066 list_add_tail(&vma->shared.vm_set.list, list);
1067}
1068
1069
1070extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1071extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
1072 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1073extern struct vm_area_struct *vma_merge(struct mm_struct *,
1074 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1075 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1076 struct mempolicy *);
1077extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1078extern int split_vma(struct mm_struct *,
1079 struct vm_area_struct *, unsigned long addr, int new_below);
1080extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1081extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1082 struct rb_node **, struct rb_node *);
1083extern void unlink_file_vma(struct vm_area_struct *);
1084extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1085 unsigned long addr, unsigned long len, pgoff_t pgoff);
1086extern void exit_mmap(struct mm_struct *);
1087
1088extern int mm_take_all_locks(struct mm_struct *mm);
1089extern void mm_drop_all_locks(struct mm_struct *mm);
1090
1091#ifdef CONFIG_PROC_FS
1092
1093extern void added_exe_file_vma(struct mm_struct *mm);
1094extern void removed_exe_file_vma(struct mm_struct *mm);
1095#else
1096static inline void added_exe_file_vma(struct mm_struct *mm)
1097{}
1098
1099static inline void removed_exe_file_vma(struct mm_struct *mm)
1100{}
1101#endif
1102
1103extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1104extern int install_special_mapping(struct mm_struct *mm,
1105 unsigned long addr, unsigned long len,
1106 unsigned long flags, struct page **pages);
1107
1108extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1109
1110extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1111 unsigned long len, unsigned long prot,
1112 unsigned long flag, unsigned long pgoff);
1113extern unsigned long mmap_region(struct file *file, unsigned long addr,
1114 unsigned long len, unsigned long flags,
1115 unsigned int vm_flags, unsigned long pgoff,
1116 int accountable);
1117
1118static inline unsigned long do_mmap(struct file *file, unsigned long addr,
1119 unsigned long len, unsigned long prot,
1120 unsigned long flag, unsigned long offset)
1121{
1122 unsigned long ret = -EINVAL;
1123 if ((offset + PAGE_ALIGN(len)) < offset)
1124 goto out;
1125 if (!(offset & ~PAGE_MASK))
1126 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1127out:
1128 return ret;
1129}
1130
1131extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1132
1133extern unsigned long do_brk(unsigned long, unsigned long);
1134
1135
1136extern unsigned long page_unuse(struct page *);
1137extern void truncate_inode_pages(struct address_space *, loff_t);
1138extern void truncate_inode_pages_range(struct address_space *,
1139 loff_t lstart, loff_t lend);
1140
1141
1142extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1143
1144
1145int write_one_page(struct page *page, int wait);
1146
1147
1148#define VM_MAX_READAHEAD 128
1149#define VM_MIN_READAHEAD 16
1150
1151int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1152 pgoff_t offset, unsigned long nr_to_read);
1153int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1154 pgoff_t offset, unsigned long nr_to_read);
1155
1156void page_cache_sync_readahead(struct address_space *mapping,
1157 struct file_ra_state *ra,
1158 struct file *filp,
1159 pgoff_t offset,
1160 unsigned long size);
1161
1162void page_cache_async_readahead(struct address_space *mapping,
1163 struct file_ra_state *ra,
1164 struct file *filp,
1165 struct page *pg,
1166 pgoff_t offset,
1167 unsigned long size);
1168
1169unsigned long max_sane_readahead(unsigned long nr);
1170
1171
1172extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1173#ifdef CONFIG_IA64
1174extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1175#endif
1176extern int expand_stack_downwards(struct vm_area_struct *vma,
1177 unsigned long address);
1178
1179
1180extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1181extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1182 struct vm_area_struct **pprev);
1183
1184
1185
1186static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1187{
1188 struct vm_area_struct * vma = find_vma(mm,start_addr);
1189
1190 if (vma && end_addr <= vma->vm_start)
1191 vma = NULL;
1192 return vma;
1193}
1194
1195static inline unsigned long vma_pages(struct vm_area_struct *vma)
1196{
1197 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1198}
1199
1200pgprot_t vm_get_page_prot(unsigned long vm_flags);
1201struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1202int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1203 unsigned long pfn, unsigned long size, pgprot_t);
1204int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1205int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1206 unsigned long pfn);
1207int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1208 unsigned long pfn);
1209
1210struct page *follow_page(struct vm_area_struct *, unsigned long address,
1211 unsigned int foll_flags);
1212#define FOLL_WRITE 0x01
1213#define FOLL_TOUCH 0x02
1214#define FOLL_GET 0x04
1215#define FOLL_ANON 0x08
1216
1217typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1218 void *data);
1219extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1220 unsigned long size, pte_fn_t fn, void *data);
1221
1222#ifdef CONFIG_PROC_FS
1223void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1224#else
1225static inline void vm_stat_account(struct mm_struct *mm,
1226 unsigned long flags, struct file *file, long pages)
1227{
1228}
1229#endif
1230
1231#ifdef CONFIG_DEBUG_PAGEALLOC
1232extern int debug_pagealloc_enabled;
1233
1234extern void kernel_map_pages(struct page *page, int numpages, int enable);
1235
1236static inline void enable_debug_pagealloc(void)
1237{
1238 debug_pagealloc_enabled = 1;
1239}
1240#ifdef CONFIG_HIBERNATION
1241extern bool kernel_page_present(struct page *page);
1242#endif
1243#else
1244static inline void
1245kernel_map_pages(struct page *page, int numpages, int enable) {}
1246static inline void enable_debug_pagealloc(void)
1247{
1248}
1249#ifdef CONFIG_HIBERNATION
1250static inline bool kernel_page_present(struct page *page) { return true; }
1251#endif
1252#endif
1253
1254extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
1255#ifdef __HAVE_ARCH_GATE_AREA
1256int in_gate_area_no_task(unsigned long addr);
1257int in_gate_area(struct task_struct *task, unsigned long addr);
1258#else
1259int in_gate_area_no_task(unsigned long addr);
1260#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
1261#endif
1262
1263int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1264 void __user *, size_t *, loff_t *);
1265unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1266 unsigned long lru_pages);
1267
1268#ifndef CONFIG_MMU
1269#define randomize_va_space 0
1270#else
1271extern int randomize_va_space;
1272#endif
1273
1274const char * arch_vma_name(struct vm_area_struct *vma);
1275void print_vma_addr(char *prefix, unsigned long rip);
1276
1277struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1278pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1279pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1280pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1281pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1282void *vmemmap_alloc_block(unsigned long size, int node);
1283void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1284int vmemmap_populate_basepages(struct page *start_page,
1285 unsigned long pages, int node);
1286int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1287void vmemmap_populate_print_last(void);
1288
1289#endif
1290#endif
1291