linux/mm/slab.h
<<
>>
Prefs
   1#ifndef MM_SLAB_H
   2#define MM_SLAB_H
   3/*
   4 * Internal slab definitions
   5 */
   6
   7/*
   8 * State of the slab allocator.
   9 *
  10 * This is used to describe the states of the allocator during bootup.
  11 * Allocators use this to gradually bootstrap themselves. Most allocators
  12 * have the problem that the structures used for managing slab caches are
  13 * allocated from slab caches themselves.
  14 */
  15enum slab_state {
  16        DOWN,                   /* No slab functionality yet */
  17        PARTIAL,                /* SLUB: kmem_cache_node available */
  18        PARTIAL_ARRAYCACHE,     /* SLAB: kmalloc size for arraycache available */
  19        PARTIAL_L3,             /* SLAB: kmalloc size for l3 struct available */
  20        UP,                     /* Slab caches usable but not all extras yet */
  21        FULL                    /* Everything is working */
  22};
  23
  24extern enum slab_state slab_state;
  25
  26/* The slab cache mutex protects the management structures during changes */
  27extern struct mutex slab_mutex;
  28
  29/* The list of all slab caches on the system */
  30extern struct list_head slab_caches;
  31
  32/* The slab cache that manages slab cache information */
  33extern struct kmem_cache *kmem_cache;
  34
  35unsigned long calculate_alignment(unsigned long flags,
  36                unsigned long align, unsigned long size);
  37
  38/* Functions provided by the slab allocators */
  39extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
  40
  41extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
  42                        unsigned long flags);
  43extern void create_boot_cache(struct kmem_cache *, const char *name,
  44                        size_t size, unsigned long flags);
  45
  46struct mem_cgroup;
  47#ifdef CONFIG_SLUB
  48struct kmem_cache *
  49__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
  50                   size_t align, unsigned long flags, void (*ctor)(void *));
  51#else
  52static inline struct kmem_cache *
  53__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
  54                   size_t align, unsigned long flags, void (*ctor)(void *))
  55{ return NULL; }
  56#endif
  57
  58
  59/* Legal flag mask for kmem_cache_create(), for various configurations */
  60#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
  61                         SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
  62
  63#if defined(CONFIG_DEBUG_SLAB)
  64#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
  65#elif defined(CONFIG_SLUB_DEBUG)
  66#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  67                          SLAB_TRACE | SLAB_DEBUG_FREE)
  68#else
  69#define SLAB_DEBUG_FLAGS (0)
  70#endif
  71
  72#if defined(CONFIG_SLAB)
  73#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
  74                          SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
  75#elif defined(CONFIG_SLUB)
  76#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
  77                          SLAB_TEMPORARY | SLAB_NOTRACK)
  78#else
  79#define SLAB_CACHE_FLAGS (0)
  80#endif
  81
  82#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
  83
  84int __kmem_cache_shutdown(struct kmem_cache *);
  85
  86struct seq_file;
  87struct file;
  88
  89struct slabinfo {
  90        unsigned long active_objs;
  91        unsigned long num_objs;
  92        unsigned long active_slabs;
  93        unsigned long num_slabs;
  94        unsigned long shared_avail;
  95        unsigned int limit;
  96        unsigned int batchcount;
  97        unsigned int shared;
  98        unsigned int objects_per_slab;
  99        unsigned int cache_order;
 100};
 101
 102void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
 103void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 104ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 105                       size_t count, loff_t *ppos);
 106
 107#ifdef CONFIG_MEMCG_KMEM
 108static inline bool is_root_cache(struct kmem_cache *s)
 109{
 110        return !s->memcg_params || s->memcg_params->is_root_cache;
 111}
 112
 113static inline bool cache_match_memcg(struct kmem_cache *cachep,
 114                                     struct mem_cgroup *memcg)
 115{
 116        return (is_root_cache(cachep) && !memcg) ||
 117                                (cachep->memcg_params->memcg == memcg);
 118}
 119
 120static inline void memcg_bind_pages(struct kmem_cache *s, int order)
 121{
 122        if (!is_root_cache(s))
 123                atomic_add(1 << order, &s->memcg_params->nr_pages);
 124}
 125
 126static inline void memcg_release_pages(struct kmem_cache *s, int order)
 127{
 128        if (is_root_cache(s))
 129                return;
 130
 131        if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
 132                mem_cgroup_destroy_cache(s);
 133}
 134
 135static inline bool slab_equal_or_root(struct kmem_cache *s,
 136                                        struct kmem_cache *p)
 137{
 138        return (p == s) ||
 139                (s->memcg_params && (p == s->memcg_params->root_cache));
 140}
 141
 142/*
 143 * We use suffixes to the name in memcg because we can't have caches
 144 * created in the system with the same name. But when we print them
 145 * locally, better refer to them with the base name
 146 */
 147static inline const char *cache_name(struct kmem_cache *s)
 148{
 149        if (!is_root_cache(s))
 150                return s->memcg_params->root_cache->name;
 151        return s->name;
 152}
 153
 154static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
 155{
 156        return s->memcg_params->memcg_caches[idx];
 157}
 158
 159static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 160{
 161        if (is_root_cache(s))
 162                return s;
 163        return s->memcg_params->root_cache;
 164}
 165#else
 166static inline bool is_root_cache(struct kmem_cache *s)
 167{
 168        return true;
 169}
 170
 171static inline bool cache_match_memcg(struct kmem_cache *cachep,
 172                                     struct mem_cgroup *memcg)
 173{
 174        return true;
 175}
 176
 177static inline void memcg_bind_pages(struct kmem_cache *s, int order)
 178{
 179}
 180
 181static inline void memcg_release_pages(struct kmem_cache *s, int order)
 182{
 183}
 184
 185static inline bool slab_equal_or_root(struct kmem_cache *s,
 186                                      struct kmem_cache *p)
 187{
 188        return true;
 189}
 190
 191static inline const char *cache_name(struct kmem_cache *s)
 192{
 193        return s->name;
 194}
 195
 196static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
 197{
 198        return NULL;
 199}
 200
 201static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 202{
 203        return s;
 204}
 205#endif
 206
 207static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 208{
 209        struct kmem_cache *cachep;
 210        struct page *page;
 211
 212        /*
 213         * When kmemcg is not being used, both assignments should return the
 214         * same value. but we don't want to pay the assignment price in that
 215         * case. If it is not compiled in, the compiler should be smart enough
 216         * to not do even the assignment. In that case, slab_equal_or_root
 217         * will also be a constant.
 218         */
 219        if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
 220                return s;
 221
 222        page = virt_to_head_page(x);
 223        cachep = page->slab_cache;
 224        if (slab_equal_or_root(cachep, s))
 225                return cachep;
 226
 227        pr_err("%s: Wrong slab cache. %s but object is from %s\n",
 228                __FUNCTION__, cachep->name, s->name);
 229        WARN_ON_ONCE(1);
 230        return s;
 231}
 232#endif
 233
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.