linux/fs/ext4/mballoc.h
<<
>>
Prefs
   1/*
   2 *  fs/ext4/mballoc.h
   3 *
   4 *  Written by: Alex Tomas <alex@clusterfs.com>
   5 *
   6 */
   7#ifndef _EXT4_MBALLOC_H
   8#define _EXT4_MBALLOC_H
   9
  10#include <linux/time.h>
  11#include <linux/fs.h>
  12#include <linux/namei.h>
  13#include <linux/quotaops.h>
  14#include <linux/buffer_head.h>
  15#include <linux/module.h>
  16#include <linux/swap.h>
  17#include <linux/proc_fs.h>
  18#include <linux/pagemap.h>
  19#include <linux/seq_file.h>
  20#include <linux/blkdev.h>
  21#include <linux/mutex.h>
  22#include "ext4_jbd2.h"
  23#include "ext4.h"
  24
  25/*
  26 * with AGGRESSIVE_CHECK allocator runs consistency checks over
  27 * structures. these checks slow things down a lot
  28 */
  29#define AGGRESSIVE_CHECK__
  30
  31/*
  32 * with DOUBLE_CHECK defined mballoc creates persistent in-core
  33 * bitmaps, maintains and uses them to check for double allocations
  34 */
  35#define DOUBLE_CHECK__
  36
  37/*
  38 */
  39#ifdef CONFIG_EXT4_DEBUG
  40extern u8 mb_enable_debug;
  41
  42#define mb_debug(n, fmt, a...)                                          \
  43        do {                                                            \
  44                if ((n) <= mb_enable_debug) {                           \
  45                        printk(KERN_DEBUG "(%s, %d): %s: ",             \
  46                               __FILE__, __LINE__, __func__);           \
  47                        printk(fmt, ## a);                              \
  48                }                                                       \
  49        } while (0)
  50#else
  51#define mb_debug(n, fmt, a...)
  52#endif
  53
  54#define EXT4_MB_HISTORY_ALLOC           1       /* allocation */
  55#define EXT4_MB_HISTORY_PREALLOC        2       /* preallocated blocks used */
  56
  57/*
  58 * How long mballoc can look for a best extent (in found extents)
  59 */
  60#define MB_DEFAULT_MAX_TO_SCAN          200
  61
  62/*
  63 * How long mballoc must look for a best extent
  64 */
  65#define MB_DEFAULT_MIN_TO_SCAN          10
  66
  67/*
  68 * How many groups mballoc will scan looking for the best chunk
  69 */
  70#define MB_DEFAULT_MAX_GROUPS_TO_SCAN   5
  71
  72/*
  73 * with 'ext4_mb_stats' allocator will collect stats that will be
  74 * shown at umount. The collecting costs though!
  75 */
  76#define MB_DEFAULT_STATS                0
  77
  78/*
  79 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
  80 * by the stream allocator, which purpose is to pack requests
  81 * as close each to other as possible to produce smooth I/O traffic
  82 * We use locality group prealloc space for stream request.
  83 * We can tune the same via /proc/fs/ext4/<parition>/stream_req
  84 */
  85#define MB_DEFAULT_STREAM_THRESHOLD     16      /* 64K */
  86
  87/*
  88 * for which requests use 2^N search using buddies
  89 */
  90#define MB_DEFAULT_ORDER2_REQS          2
  91
  92/*
  93 * default group prealloc size 512 blocks
  94 */
  95#define MB_DEFAULT_GROUP_PREALLOC       512
  96
  97
  98struct ext4_free_data {
  99        /* MUST be the first member */
 100        struct ext4_journal_cb_entry    efd_jce;
 101
 102        /* ext4_free_data private data starts from here */
 103
 104        /* this links the free block information from group_info */
 105        struct rb_node                  efd_node;
 106
 107        /* group which free block extent belongs */
 108        ext4_group_t                    efd_group;
 109
 110        /* free block extent */
 111        ext4_grpblk_t                   efd_start_cluster;
 112        ext4_grpblk_t                   efd_count;
 113
 114        /* transaction which freed this extent */
 115        tid_t                           efd_tid;
 116};
 117
 118struct ext4_prealloc_space {
 119        struct list_head        pa_inode_list;
 120        struct list_head        pa_group_list;
 121        union {
 122                struct list_head pa_tmp_list;
 123                struct rcu_head pa_rcu;
 124        } u;
 125        spinlock_t              pa_lock;
 126        atomic_t                pa_count;
 127        unsigned                pa_deleted;
 128        ext4_fsblk_t            pa_pstart;      /* phys. block */
 129        ext4_lblk_t             pa_lstart;      /* log. block */
 130        ext4_grpblk_t           pa_len;         /* len of preallocated chunk */
 131        ext4_grpblk_t           pa_free;        /* how many blocks are free */
 132        unsigned short          pa_type;        /* pa type. inode or group */
 133        spinlock_t              *pa_obj_lock;
 134        struct inode            *pa_inode;      /* hack, for history only */
 135};
 136
 137enum {
 138        MB_INODE_PA = 0,
 139        MB_GROUP_PA = 1
 140};
 141
 142struct ext4_free_extent {
 143        ext4_lblk_t fe_logical;
 144        ext4_grpblk_t fe_start; /* In cluster units */
 145        ext4_group_t fe_group;
 146        ext4_grpblk_t fe_len;   /* In cluster units */
 147};
 148
 149/*
 150 * Locality group:
 151 *   we try to group all related changes together
 152 *   so that writeback can flush/allocate them together as well
 153 *   Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
 154 *   (512). We store prealloc space into the hash based on the pa_free blocks
 155 *   order value.ie, fls(pa_free)-1;
 156 */
 157#define PREALLOC_TB_SIZE 10
 158struct ext4_locality_group {
 159        /* for allocator */
 160        /* to serialize allocates */
 161        struct mutex            lg_mutex;
 162        /* list of preallocations */
 163        struct list_head        lg_prealloc_list[PREALLOC_TB_SIZE];
 164        spinlock_t              lg_prealloc_lock;
 165};
 166
 167struct ext4_allocation_context {
 168        struct inode *ac_inode;
 169        struct super_block *ac_sb;
 170
 171        /* original request */
 172        struct ext4_free_extent ac_o_ex;
 173
 174        /* goal request (normalized ac_o_ex) */
 175        struct ext4_free_extent ac_g_ex;
 176
 177        /* the best found extent */
 178        struct ext4_free_extent ac_b_ex;
 179
 180        /* copy of the best found extent taken before preallocation efforts */
 181        struct ext4_free_extent ac_f_ex;
 182
 183        /* number of iterations done. we have to track to limit searching */
 184        unsigned long ac_ex_scanned;
 185        __u16 ac_groups_scanned;
 186        __u16 ac_found;
 187        __u16 ac_tail;
 188        __u16 ac_buddy;
 189        __u16 ac_flags;         /* allocation hints */
 190        __u8 ac_status;
 191        __u8 ac_criteria;
 192        __u8 ac_2order;         /* if request is to allocate 2^N blocks and
 193                                 * N > 0, the field stores N, otherwise 0 */
 194        __u8 ac_op;             /* operation, for history only */
 195        struct page *ac_bitmap_page;
 196        struct page *ac_buddy_page;
 197        struct ext4_prealloc_space *ac_pa;
 198        struct ext4_locality_group *ac_lg;
 199};
 200
 201#define AC_STATUS_CONTINUE      1
 202#define AC_STATUS_FOUND         2
 203#define AC_STATUS_BREAK         3
 204
 205struct ext4_buddy {
 206        struct page *bd_buddy_page;
 207        void *bd_buddy;
 208        struct page *bd_bitmap_page;
 209        void *bd_bitmap;
 210        struct ext4_group_info *bd_info;
 211        struct super_block *bd_sb;
 212        __u16 bd_blkbits;
 213        ext4_group_t bd_group;
 214};
 215
 216static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
 217                                        struct ext4_free_extent *fex)
 218{
 219        return ext4_group_first_block_no(sb, fex->fe_group) +
 220                (fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
 221}
 222#endif
 223
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.