linux/include/linux/swap.h
<<
>>
Prefs
   1#ifndef _LINUX_SWAP_H
   2#define _LINUX_SWAP_H
   3
   4#include <linux/spinlock.h>
   5#include <linux/linkage.h>
   6#include <linux/mmzone.h>
   7#include <linux/list.h>
   8#include <linux/memcontrol.h>
   9#include <linux/sched.h>
  10#include <linux/node.h>
  11
  12#include <linux/atomic.h>
  13#include <asm/page.h>
  14
  15struct notifier_block;
  16
  17struct bio;
  18
  19#define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
  20#define SWAP_FLAG_PRIO_MASK     0x7fff
  21#define SWAP_FLAG_PRIO_SHIFT    0
  22#define SWAP_FLAG_DISCARD       0x10000 /* discard swap cluster after use */
  23
  24#define SWAP_FLAGS_VALID        (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  25                                 SWAP_FLAG_DISCARD)
  26
  27static inline int current_is_kswapd(void)
  28{
  29        return current->flags & PF_KSWAPD;
  30}
  31
  32/*
  33 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  34 * be swapped to.  The swap type and the offset into that swap type are
  35 * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
  36 * for the type means that the maximum number of swapcache pages is 27 bits
  37 * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
  38 * the type/offset into the pte as 5/27 as well.
  39 */
  40#define MAX_SWAPFILES_SHIFT     5
  41
  42/*
  43 * Use some of the swap files numbers for other purposes. This
  44 * is a convenient way to hook into the VM to trigger special
  45 * actions on faults.
  46 */
  47
  48/*
  49 * NUMA node memory migration support
  50 */
  51#ifdef CONFIG_MIGRATION
  52#define SWP_MIGRATION_NUM 2
  53#define SWP_MIGRATION_READ      (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  54#define SWP_MIGRATION_WRITE     (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  55#else
  56#define SWP_MIGRATION_NUM 0
  57#endif
  58
  59/*
  60 * Handling of hardware poisoned pages with memory corruption.
  61 */
  62#ifdef CONFIG_MEMORY_FAILURE
  63#define SWP_HWPOISON_NUM 1
  64#define SWP_HWPOISON            MAX_SWAPFILES
  65#else
  66#define SWP_HWPOISON_NUM 0
  67#endif
  68
  69#define MAX_SWAPFILES \
  70        ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  71
  72/*
  73 * Magic header for a swap area. The first part of the union is
  74 * what the swap magic looks like for the old (limited to 128MB)
  75 * swap area format, the second part of the union adds - in the
  76 * old reserved area - some extra information. Note that the first
  77 * kilobyte is reserved for boot loader or disk label stuff...
  78 *
  79 * Having the magic at the end of the PAGE_SIZE makes detecting swap
  80 * areas somewhat tricky on machines that support multiple page sizes.
  81 * For 2.5 we'll probably want to move the magic to just beyond the
  82 * bootbits...
  83 */
  84union swap_header {
  85        struct {
  86                char reserved[PAGE_SIZE - 10];
  87                char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
  88        } magic;
  89        struct {
  90                char            bootbits[1024]; /* Space for disklabel etc. */
  91                __u32           version;
  92                __u32           last_page;
  93                __u32           nr_badpages;
  94                unsigned char   sws_uuid[16];
  95                unsigned char   sws_volume[16];
  96                __u32           padding[117];
  97                __u32           badpages[1];
  98        } info;
  99};
 100
 101 /* A swap entry has to fit into a "unsigned long", as
 102  * the entry is hidden in the "index" field of the
 103  * swapper address space.
 104  */
 105typedef struct {
 106        unsigned long val;
 107} swp_entry_t;
 108
 109/*
 110 * current->reclaim_state points to one of these when a task is running
 111 * memory reclaim
 112 */
 113struct reclaim_state {
 114        unsigned long reclaimed_slab;
 115};
 116
 117#ifdef __KERNEL__
 118
 119struct address_space;
 120struct sysinfo;
 121struct writeback_control;
 122struct zone;
 123
 124/*
 125 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
 126 * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
 127 * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
 128 * from setup, they're handled identically.
 129 *
 130 * We always assume that blocks are of size PAGE_SIZE.
 131 */
 132struct swap_extent {
 133        struct list_head list;
 134        pgoff_t start_page;
 135        pgoff_t nr_pages;
 136        sector_t start_block;
 137};
 138
 139/*
 140 * Max bad pages in the new format..
 141 */
 142#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
 143#define MAX_SWAP_BADPAGES \
 144        ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
 145
 146enum {
 147        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
 148        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
 149        SWP_DISCARDABLE = (1 << 2),     /* swapon+blkdev support discard */
 150        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
 151        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
 152        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
 153        SWP_BLKDEV      = (1 << 6),     /* its a block device */
 154        SWP_FILE        = (1 << 7),     /* set after swap_activate success */
 155                                        /* add others here before... */
 156        SWP_SCANNING    = (1 << 8),     /* refcount in scan_swap_map */
 157};
 158
 159#define SWAP_CLUSTER_MAX 32
 160#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 161
 162/*
 163 * Ratio between the present memory in the zone and the "gap" that
 164 * we're allowing kswapd to shrink in addition to the per-zone high
 165 * wmark, even for zones that already have the high wmark satisfied,
 166 * in order to provide better per-zone lru behavior. We are ok to
 167 * spend not more than 1% of the memory for this zone balancing "gap".
 168 */
 169#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
 170
 171#define SWAP_MAP_MAX    0x3e    /* Max duplication count, in first swap_map */
 172#define SWAP_MAP_BAD    0x3f    /* Note pageblock is bad, in first swap_map */
 173#define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
 174#define SWAP_CONT_MAX   0x7f    /* Max count, in each swap_map continuation */
 175#define COUNT_CONTINUED 0x80    /* See swap_map continuation for full count */
 176#define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs, in first swap_map */
 177
 178/*
 179 * The in-memory structure used to track swap areas.
 180 */
 181struct swap_info_struct {
 182        unsigned long   flags;          /* SWP_USED etc: see above */
 183        signed short    prio;           /* swap priority of this type */
 184        signed char     type;           /* strange name for an index */
 185        signed char     next;           /* next type on the swap list */
 186        unsigned int    max;            /* extent of the swap_map */
 187        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
 188        unsigned int lowest_bit;        /* index of first free in swap_map */
 189        unsigned int highest_bit;       /* index of last free in swap_map */
 190        unsigned int pages;             /* total of usable pages of swap */
 191        unsigned int inuse_pages;       /* number of those currently in use */
 192        unsigned int cluster_next;      /* likely index for next allocation */
 193        unsigned int cluster_nr;        /* countdown to next cluster search */
 194        unsigned int lowest_alloc;      /* while preparing discard cluster */
 195        unsigned int highest_alloc;     /* while preparing discard cluster */
 196        struct swap_extent *curr_swap_extent;
 197        struct swap_extent first_swap_extent;
 198        struct block_device *bdev;      /* swap device or bdev of swap file */
 199        struct file *swap_file;         /* seldom referenced */
 200        unsigned int old_block_size;    /* seldom referenced */
 201#ifdef CONFIG_FRONTSWAP
 202        unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
 203        atomic_t frontswap_pages;       /* frontswap pages in-use counter */
 204#endif
 205};
 206
 207struct swap_list_t {
 208        int head;       /* head of priority-ordered swapfile list */
 209        int next;       /* swapfile to be used next */
 210};
 211
 212/* Swap 50% full? Release swapcache more aggressively.. */
 213#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
 214
 215/* linux/mm/page_alloc.c */
 216extern unsigned long totalram_pages;
 217extern unsigned long totalreserve_pages;
 218extern unsigned long dirty_balance_reserve;
 219extern unsigned int nr_free_buffer_pages(void);
 220extern unsigned int nr_free_pagecache_pages(void);
 221
 222/* Definition of global_page_state not available yet */
 223#define nr_free_pages() global_page_state(NR_FREE_PAGES)
 224
 225
 226/* linux/mm/swap.c */
 227extern void __lru_cache_add(struct page *, enum lru_list lru);
 228extern void lru_cache_add_lru(struct page *, enum lru_list lru);
 229extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 230                              struct lruvec *lruvec);
 231extern void activate_page(struct page *);
 232extern void mark_page_accessed(struct page *);
 233extern void lru_add_drain(void);
 234extern void lru_add_drain_cpu(int cpu);
 235extern int lru_add_drain_all(void);
 236extern void rotate_reclaimable_page(struct page *page);
 237extern void deactivate_page(struct page *page);
 238extern void swap_setup(void);
 239
 240extern void add_page_to_unevictable_list(struct page *page);
 241
 242/**
 243 * lru_cache_add: add a page to the page lists
 244 * @page: the page to add
 245 */
 246static inline void lru_cache_add_anon(struct page *page)
 247{
 248        __lru_cache_add(page, LRU_INACTIVE_ANON);
 249}
 250
 251static inline void lru_cache_add_file(struct page *page)
 252{
 253        __lru_cache_add(page, LRU_INACTIVE_FILE);
 254}
 255
 256/* linux/mm/vmscan.c */
 257extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 258                                        gfp_t gfp_mask, nodemask_t *mask);
 259extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 260extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
 261                                                  gfp_t gfp_mask, bool noswap);
 262extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
 263                                                gfp_t gfp_mask, bool noswap,
 264                                                struct zone *zone,
 265                                                unsigned long *nr_scanned);
 266extern unsigned long shrink_all_memory(unsigned long nr_pages);
 267extern int vm_swappiness;
 268extern int remove_mapping(struct address_space *mapping, struct page *page);
 269extern long vm_total_pages;
 270
 271#ifdef CONFIG_NUMA
 272extern int zone_reclaim_mode;
 273extern int sysctl_min_unmapped_ratio;
 274extern int sysctl_min_slab_ratio;
 275extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
 276#else
 277#define zone_reclaim_mode 0
 278static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
 279{
 280        return 0;
 281}
 282#endif
 283
 284extern int page_evictable(struct page *page, struct vm_area_struct *vma);
 285extern void check_move_unevictable_pages(struct page **, int nr_pages);
 286
 287extern unsigned long scan_unevictable_pages;
 288extern int scan_unevictable_handler(struct ctl_table *, int,
 289                                        void __user *, size_t *, loff_t *);
 290#ifdef CONFIG_NUMA
 291extern int scan_unevictable_register_node(struct node *node);
 292extern void scan_unevictable_unregister_node(struct node *node);
 293#else
 294static inline int scan_unevictable_register_node(struct node *node)
 295{
 296        return 0;
 297}
 298static inline void scan_unevictable_unregister_node(struct node *node)
 299{
 300}
 301#endif
 302
 303extern int kswapd_run(int nid);
 304extern void kswapd_stop(int nid);
 305#ifdef CONFIG_MEMCG
 306extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
 307#else
 308static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 309{
 310        return vm_swappiness;
 311}
 312#endif
 313#ifdef CONFIG_MEMCG_SWAP
 314extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
 315#else
 316static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
 317{
 318}
 319#endif
 320#ifdef CONFIG_SWAP
 321/* linux/mm/page_io.c */
 322extern int swap_readpage(struct page *);
 323extern int swap_writepage(struct page *page, struct writeback_control *wbc);
 324extern int swap_set_page_dirty(struct page *page);
 325extern void end_swap_bio_read(struct bio *bio, int err);
 326
 327int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
 328                unsigned long nr_pages, sector_t start_block);
 329int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 330                sector_t *);
 331
 332/* linux/mm/swap_state.c */
 333extern struct address_space swapper_space;
 334#define total_swapcache_pages  swapper_space.nrpages
 335extern void show_swap_cache_info(void);
 336extern int add_to_swap(struct page *);
 337extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
 338extern void __delete_from_swap_cache(struct page *);
 339extern void delete_from_swap_cache(struct page *);
 340extern void free_page_and_swap_cache(struct page *);
 341extern void free_pages_and_swap_cache(struct page **, int);
 342extern struct page *lookup_swap_cache(swp_entry_t);
 343extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 344                        struct vm_area_struct *vma, unsigned long addr);
 345extern struct page *swapin_readahead(swp_entry_t, gfp_t,
 346                        struct vm_area_struct *vma, unsigned long addr);
 347
 348/* linux/mm/swapfile.c */
 349extern long nr_swap_pages;
 350extern long total_swap_pages;
 351extern void si_swapinfo(struct sysinfo *);
 352extern swp_entry_t get_swap_page(void);
 353extern swp_entry_t get_swap_page_of_type(int);
 354extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 355extern void swap_shmem_alloc(swp_entry_t);
 356extern int swap_duplicate(swp_entry_t);
 357extern int swapcache_prepare(swp_entry_t);
 358extern void swap_free(swp_entry_t);
 359extern void swapcache_free(swp_entry_t, struct page *page);
 360extern int free_swap_and_cache(swp_entry_t);
 361extern int swap_type_of(dev_t, sector_t, struct block_device **);
 362extern unsigned int count_swap_pages(int, int);
 363extern sector_t map_swap_page(struct page *, struct block_device **);
 364extern sector_t swapdev_block(int, pgoff_t);
 365extern int page_swapcount(struct page *);
 366extern struct swap_info_struct *page_swap_info(struct page *);
 367extern int reuse_swap_page(struct page *);
 368extern int try_to_free_swap(struct page *);
 369struct backing_dev_info;
 370
 371#ifdef CONFIG_MEMCG
 372extern void
 373mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
 374#else
 375static inline void
 376mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
 377{
 378}
 379#endif
 380
 381#else /* CONFIG_SWAP */
 382
 383#define nr_swap_pages                           0L
 384#define total_swap_pages                        0L
 385#define total_swapcache_pages                   0UL
 386
 387#define si_swapinfo(val) \
 388        do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 389/* only sparc can not include linux/pagemap.h in this file
 390 * so leave page_cache_release and release_pages undeclared... */
 391#define free_page_and_swap_cache(page) \
 392        page_cache_release(page)
 393#define free_pages_and_swap_cache(pages, nr) \
 394        release_pages((pages), (nr), 0);
 395
 396static inline void show_swap_cache_info(void)
 397{
 398}
 399
 400#define free_swap_and_cache(swp)        is_migration_entry(swp)
 401#define swapcache_prepare(swp)          is_migration_entry(swp)
 402
 403static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 404{
 405        return 0;
 406}
 407
 408static inline void swap_shmem_alloc(swp_entry_t swp)
 409{
 410}
 411
 412static inline int swap_duplicate(swp_entry_t swp)
 413{
 414        return 0;
 415}
 416
 417static inline void swap_free(swp_entry_t swp)
 418{
 419}
 420
 421static inline void swapcache_free(swp_entry_t swp, struct page *page)
 422{
 423}
 424
 425static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
 426                        struct vm_area_struct *vma, unsigned long addr)
 427{
 428        return NULL;
 429}
 430
 431static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
 432{
 433        return 0;
 434}
 435
 436static inline struct page *lookup_swap_cache(swp_entry_t swp)
 437{
 438        return NULL;
 439}
 440
 441static inline int add_to_swap(struct page *page)
 442{
 443        return 0;
 444}
 445
 446static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
 447                                                        gfp_t gfp_mask)
 448{
 449        return -1;
 450}
 451
 452static inline void __delete_from_swap_cache(struct page *page)
 453{
 454}
 455
 456static inline void delete_from_swap_cache(struct page *page)
 457{
 458}
 459
 460static inline int page_swapcount(struct page *page)
 461{
 462        return 0;
 463}
 464
 465#define reuse_swap_page(page)   (page_mapcount(page) == 1)
 466
 467static inline int try_to_free_swap(struct page *page)
 468{
 469        return 0;
 470}
 471
 472static inline swp_entry_t get_swap_page(void)
 473{
 474        swp_entry_t entry;
 475        entry.val = 0;
 476        return entry;
 477}
 478
 479static inline void
 480mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
 481{
 482}
 483
 484#endif /* CONFIG_SWAP */
 485#endif /* __KERNEL__*/
 486#endif /* _LINUX_SWAP_H */
 487
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.