linux/include/linux/slab_def.h
<<
>>
Prefs
   1#ifndef _LINUX_SLAB_DEF_H
   2#define _LINUX_SLAB_DEF_H
   3
   4/*
   5 * Definitions unique to the original Linux SLAB allocator.
   6 *
   7 * What we provide here is a way to optimize the frequent kmalloc
   8 * calls in the kernel by selecting the appropriate general cache
   9 * if kmalloc was called with a size that can be established at
  10 * compile time.
  11 */
  12
  13#include <linux/init.h>
  14#include <asm/page.h>           /* kmalloc_sizes.h needs PAGE_SIZE */
  15#include <asm/cache.h>          /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  16#include <linux/compiler.h>
  17
  18/*
  19 * struct kmem_cache
  20 *
  21 * manages a cache.
  22 */
  23
  24struct kmem_cache {
  25/* 1) Cache tunables. Protected by cache_chain_mutex */
  26        unsigned int batchcount;
  27        unsigned int limit;
  28        unsigned int shared;
  29
  30        unsigned int size;
  31        u32 reciprocal_buffer_size;
  32/* 2) touched by every alloc & free from the backend */
  33
  34        unsigned int flags;             /* constant flags */
  35        unsigned int num;               /* # of objs per slab */
  36
  37/* 3) cache_grow/shrink */
  38        /* order of pgs per slab (2^n) */
  39        unsigned int gfporder;
  40
  41        /* force GFP flags, e.g. GFP_DMA */
  42        gfp_t allocflags;
  43
  44        size_t colour;                  /* cache colouring range */
  45        unsigned int colour_off;        /* colour offset */
  46        struct kmem_cache *slabp_cache;
  47        unsigned int slab_size;
  48
  49        /* constructor func */
  50        void (*ctor)(void *obj);
  51
  52/* 4) cache creation/removal */
  53        const char *name;
  54        struct list_head list;
  55        int refcount;
  56        int object_size;
  57        int align;
  58
  59/* 5) statistics */
  60#ifdef CONFIG_DEBUG_SLAB
  61        unsigned long num_active;
  62        unsigned long num_allocations;
  63        unsigned long high_mark;
  64        unsigned long grown;
  65        unsigned long reaped;
  66        unsigned long errors;
  67        unsigned long max_freeable;
  68        unsigned long node_allocs;
  69        unsigned long node_frees;
  70        unsigned long node_overflow;
  71        atomic_t allochit;
  72        atomic_t allocmiss;
  73        atomic_t freehit;
  74        atomic_t freemiss;
  75
  76        /*
  77         * If debugging is enabled, then the allocator can add additional
  78         * fields and/or padding to every object. size contains the total
  79         * object size including these internal fields, the following two
  80         * variables contain the offset to the user object and its size.
  81         */
  82        int obj_offset;
  83#endif /* CONFIG_DEBUG_SLAB */
  84#ifdef CONFIG_MEMCG_KMEM
  85        struct memcg_cache_params *memcg_params;
  86#endif
  87
  88/* 6) per-cpu/per-node data, touched during every alloc/free */
  89        /*
  90         * We put array[] at the end of kmem_cache, because we want to size
  91         * this array to nr_cpu_ids slots instead of NR_CPUS
  92         * (see kmem_cache_init())
  93         * We still use [NR_CPUS] and not [1] or [0] because cache_cache
  94         * is statically defined, so we reserve the max number of cpus.
  95         *
  96         * We also need to guarantee that the list is able to accomodate a
  97         * pointer for each node since "nodelists" uses the remainder of
  98         * available pointers.
  99         */
 100        struct kmem_list3 **nodelists;
 101        struct array_cache *array[NR_CPUS + MAX_NUMNODES];
 102        /*
 103         * Do not add fields after array[]
 104         */
 105};
 106
 107/* Size description struct for general caches. */
 108struct cache_sizes {
 109        size_t                  cs_size;
 110        struct kmem_cache       *cs_cachep;
 111#ifdef CONFIG_ZONE_DMA
 112        struct kmem_cache       *cs_dmacachep;
 113#endif
 114};
 115extern struct cache_sizes malloc_sizes[];
 116
 117void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 118void *__kmalloc(size_t size, gfp_t flags);
 119
 120#ifdef CONFIG_TRACING
 121extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
 122#else
 123static __always_inline void *
 124kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
 125{
 126        return kmem_cache_alloc(cachep, flags);
 127}
 128#endif
 129
 130static __always_inline void *kmalloc(size_t size, gfp_t flags)
 131{
 132        struct kmem_cache *cachep;
 133        void *ret;
 134
 135        if (__builtin_constant_p(size)) {
 136                int i = 0;
 137
 138                if (!size)
 139                        return ZERO_SIZE_PTR;
 140
 141#define CACHE(x) \
 142                if (size <= x) \
 143                        goto found; \
 144                else \
 145                        i++;
 146#include <linux/kmalloc_sizes.h>
 147#undef CACHE
 148                return NULL;
 149found:
 150#ifdef CONFIG_ZONE_DMA
 151                if (flags & GFP_DMA)
 152                        cachep = malloc_sizes[i].cs_dmacachep;
 153                else
 154#endif
 155                        cachep = malloc_sizes[i].cs_cachep;
 156
 157                ret = kmem_cache_alloc_trace(cachep, flags, size);
 158
 159                return ret;
 160        }
 161        return __kmalloc(size, flags);
 162}
 163
 164#ifdef CONFIG_NUMA
 165extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 166extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 167
 168#ifdef CONFIG_TRACING
 169extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
 170                                         gfp_t flags,
 171                                         int nodeid,
 172                                         size_t size);
 173#else
 174static __always_inline void *
 175kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
 176                            gfp_t flags,
 177                            int nodeid,
 178                            size_t size)
 179{
 180        return kmem_cache_alloc_node(cachep, flags, nodeid);
 181}
 182#endif
 183
 184static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 185{
 186        struct kmem_cache *cachep;
 187
 188        if (__builtin_constant_p(size)) {
 189                int i = 0;
 190
 191                if (!size)
 192                        return ZERO_SIZE_PTR;
 193
 194#define CACHE(x) \
 195                if (size <= x) \
 196                        goto found; \
 197                else \
 198                        i++;
 199#include <linux/kmalloc_sizes.h>
 200#undef CACHE
 201                return NULL;
 202found:
 203#ifdef CONFIG_ZONE_DMA
 204                if (flags & GFP_DMA)
 205                        cachep = malloc_sizes[i].cs_dmacachep;
 206                else
 207#endif
 208                        cachep = malloc_sizes[i].cs_cachep;
 209
 210                return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 211        }
 212        return __kmalloc_node(size, flags, node);
 213}
 214
 215#endif  /* CONFIG_NUMA */
 216
 217#endif  /* _LINUX_SLAB_DEF_H */
 218
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.