linux/include/linux/slab.h
<<
>>
Prefs
   1/*
   2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   3 *
   4 * (C) SGI 2006, Christoph Lameter
   5 *      Cleaned up and restructured to ease the addition of alternative
   6 *      implementations of SLAB allocators.
   7 */
   8
   9#ifndef _LINUX_SLAB_H
  10#define _LINUX_SLAB_H
  11
  12#include <linux/gfp.h>
  13#include <linux/types.h>
  14#include <linux/workqueue.h>
  15
  16
  17/*
  18 * Flags to pass to kmem_cache_create().
  19 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
  20 */
  21#define SLAB_DEBUG_FREE         0x00000100UL    /* DEBUG: Perform (expensive) checks on free */
  22#define SLAB_RED_ZONE           0x00000400UL    /* DEBUG: Red zone objs in a cache */
  23#define SLAB_POISON             0x00000800UL    /* DEBUG: Poison objects */
  24#define SLAB_HWCACHE_ALIGN      0x00002000UL    /* Align objs on cache lines */
  25#define SLAB_CACHE_DMA          0x00004000UL    /* Use GFP_DMA memory */
  26#define SLAB_STORE_USER         0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
  27#define SLAB_PANIC              0x00040000UL    /* Panic if kmem_cache_create() fails */
  28/*
  29 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
  30 *
  31 * This delays freeing the SLAB page by a grace period, it does _NOT_
  32 * delay object freeing. This means that if you do kmem_cache_free()
  33 * that memory location is free to be reused at any time. Thus it may
  34 * be possible to see another object there in the same RCU grace period.
  35 *
  36 * This feature only ensures the memory location backing the object
  37 * stays valid, the trick to using this is relying on an independent
  38 * object validation pass. Something like:
  39 *
  40 *  rcu_read_lock()
  41 * again:
  42 *  obj = lockless_lookup(key);
  43 *  if (obj) {
  44 *    if (!try_get_ref(obj)) // might fail for free objects
  45 *      goto again;
  46 *
  47 *    if (obj->key != key) { // not the object we expected
  48 *      put_ref(obj);
  49 *      goto again;
  50 *    }
  51 *  }
  52 *  rcu_read_unlock();
  53 *
  54 * See also the comment on struct slab_rcu in mm/slab.c.
  55 */
  56#define SLAB_DESTROY_BY_RCU     0x00080000UL    /* Defer freeing slabs to RCU */
  57#define SLAB_MEM_SPREAD         0x00100000UL    /* Spread some memory over cpuset */
  58#define SLAB_TRACE              0x00200000UL    /* Trace allocations and frees */
  59
  60/* Flag to prevent checks on free */
  61#ifdef CONFIG_DEBUG_OBJECTS
  62# define SLAB_DEBUG_OBJECTS     0x00400000UL
  63#else
  64# define SLAB_DEBUG_OBJECTS     0x00000000UL
  65#endif
  66
  67#define SLAB_NOLEAKTRACE        0x00800000UL    /* Avoid kmemleak tracing */
  68
  69/* Don't track use of uninitialized memory */
  70#ifdef CONFIG_KMEMCHECK
  71# define SLAB_NOTRACK           0x01000000UL
  72#else
  73# define SLAB_NOTRACK           0x00000000UL
  74#endif
  75#ifdef CONFIG_FAILSLAB
  76# define SLAB_FAILSLAB          0x02000000UL    /* Fault injection mark */
  77#else
  78# define SLAB_FAILSLAB          0x00000000UL
  79#endif
  80
  81/* The following flags affect the page allocator grouping pages by mobility */
  82#define SLAB_RECLAIM_ACCOUNT    0x00020000UL            /* Objects are reclaimable */
  83#define SLAB_TEMPORARY          SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
  84/*
  85 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
  86 *
  87 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
  88 *
  89 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  90 * Both make kfree a no-op.
  91 */
  92#define ZERO_SIZE_PTR ((void *)16)
  93
  94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  95                                (unsigned long)ZERO_SIZE_PTR)
  96
  97/*
  98 * Common fields provided in kmem_cache by all slab allocators
  99 * This struct is either used directly by the allocator (SLOB)
 100 * or the allocator must include definitions for all fields
 101 * provided in kmem_cache_common in their definition of kmem_cache.
 102 *
 103 * Once we can do anonymous structs (C11 standard) we could put a
 104 * anonymous struct definition in these allocators so that the
 105 * separate allocations in the kmem_cache structure of SLAB and
 106 * SLUB is no longer needed.
 107 */
 108#ifdef CONFIG_SLOB
 109struct kmem_cache {
 110        unsigned int object_size;/* The original size of the object */
 111        unsigned int size;      /* The aligned/padded/added on size  */
 112        unsigned int align;     /* Alignment as calculated */
 113        unsigned long flags;    /* Active flags on the slab */
 114        const char *name;       /* Slab name for sysfs */
 115        int refcount;           /* Use counter */
 116        void (*ctor)(void *);   /* Called on object slot creation */
 117        struct list_head list;  /* List of all slab caches on the system */
 118};
 119#endif
 120
 121struct mem_cgroup;
 122/*
 123 * struct kmem_cache related prototypes
 124 */
 125void __init kmem_cache_init(void);
 126int slab_is_available(void);
 127
 128struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
 129                        unsigned long,
 130                        void (*)(void *));
 131struct kmem_cache *
 132kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
 133                        unsigned long, void (*)(void *), struct kmem_cache *);
 134void kmem_cache_destroy(struct kmem_cache *);
 135int kmem_cache_shrink(struct kmem_cache *);
 136void kmem_cache_free(struct kmem_cache *, void *);
 137
 138/*
 139 * Please use this macro to create slab caches. Simply specify the
 140 * name of the structure and maybe some flags that are listed above.
 141 *
 142 * The alignment of the struct determines object alignment. If you
 143 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 144 * then the objects will be properly aligned in SMP configurations.
 145 */
 146#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
 147                sizeof(struct __struct), __alignof__(struct __struct),\
 148                (__flags), NULL)
 149
 150/*
 151 * The largest kmalloc size supported by the slab allocators is
 152 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 153 * less than 32 MB.
 154 *
 155 * WARNING: Its not easy to increase this value since the allocators have
 156 * to do various tricks to work around compiler limitations in order to
 157 * ensure proper constant folding.
 158 */
 159#define KMALLOC_SHIFT_HIGH      ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
 160                                (MAX_ORDER + PAGE_SHIFT - 1) : 25)
 161
 162#define KMALLOC_MAX_SIZE        (1UL << KMALLOC_SHIFT_HIGH)
 163#define KMALLOC_MAX_ORDER       (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
 164
 165/*
 166 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 167 * alignment larger than the alignment of a 64-bit integer.
 168 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 169 */
 170#ifdef ARCH_DMA_MINALIGN
 171#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
 172#else
 173#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 174#endif
 175
 176/*
 177 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 178 * Intended for arches that get misalignment faults even for 64 bit integer
 179 * aligned buffers.
 180 */
 181#ifndef ARCH_SLAB_MINALIGN
 182#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
 183#endif
 184/*
 185 * This is the main placeholder for memcg-related information in kmem caches.
 186 * struct kmem_cache will hold a pointer to it, so the memory cost while
 187 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
 188 * would otherwise be if that would be bundled in kmem_cache: we'll need an
 189 * extra pointer chase. But the trade off clearly lays in favor of not
 190 * penalizing non-users.
 191 *
 192 * Both the root cache and the child caches will have it. For the root cache,
 193 * this will hold a dynamically allocated array large enough to hold
 194 * information about the currently limited memcgs in the system.
 195 *
 196 * Child caches will hold extra metadata needed for its operation. Fields are:
 197 *
 198 * @memcg: pointer to the memcg this cache belongs to
 199 * @list: list_head for the list of all caches in this memcg
 200 * @root_cache: pointer to the global, root cache, this cache was derived from
 201 * @dead: set to true after the memcg dies; the cache may still be around.
 202 * @nr_pages: number of pages that belongs to this cache.
 203 * @destroy: worker to be called whenever we are ready, or believe we may be
 204 *           ready, to destroy this cache.
 205 */
 206struct memcg_cache_params {
 207        bool is_root_cache;
 208        union {
 209                struct kmem_cache *memcg_caches[0];
 210                struct {
 211                        struct mem_cgroup *memcg;
 212                        struct list_head list;
 213                        struct kmem_cache *root_cache;
 214                        bool dead;
 215                        atomic_t nr_pages;
 216                        struct work_struct destroy;
 217                };
 218        };
 219};
 220
 221int memcg_update_all_caches(int num_memcgs);
 222
 223struct seq_file;
 224int cache_show(struct kmem_cache *s, struct seq_file *m);
 225void print_slabinfo_header(struct seq_file *m);
 226
 227/*
 228 * Common kmalloc functions provided by all allocators
 229 */
 230void * __must_check __krealloc(const void *, size_t, gfp_t);
 231void * __must_check krealloc(const void *, size_t, gfp_t);
 232void kfree(const void *);
 233void kzfree(const void *);
 234size_t ksize(const void *);
 235
 236/*
 237 * Allocator specific definitions. These are mainly used to establish optimized
 238 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
 239 * selecting the appropriate general cache at compile time.
 240 *
 241 * Allocators must define at least:
 242 *
 243 *      kmem_cache_alloc()
 244 *      __kmalloc()
 245 *      kmalloc()
 246 *
 247 * Those wishing to support NUMA must also define:
 248 *
 249 *      kmem_cache_alloc_node()
 250 *      kmalloc_node()
 251 *
 252 * See each allocator definition file for additional comments and
 253 * implementation notes.
 254 */
 255#ifdef CONFIG_SLUB
 256#include <linux/slub_def.h>
 257#elif defined(CONFIG_SLOB)
 258#include <linux/slob_def.h>
 259#else
 260#include <linux/slab_def.h>
 261#endif
 262
 263/**
 264 * kmalloc_array - allocate memory for an array.
 265 * @n: number of elements.
 266 * @size: element size.
 267 * @flags: the type of memory to allocate.
 268 *
 269 * The @flags argument may be one of:
 270 *
 271 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 272 *
 273 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 274 *
 275 * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
 276 *   For example, use this inside interrupt handlers.
 277 *
 278 * %GFP_HIGHUSER - Allocate pages from high memory.
 279 *
 280 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 281 *
 282 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 283 *
 284 * %GFP_NOWAIT - Allocation will not sleep.
 285 *
 286 * %GFP_THISNODE - Allocate node-local memory only.
 287 *
 288 * %GFP_DMA - Allocation suitable for DMA.
 289 *   Should only be used for kmalloc() caches. Otherwise, use a
 290 *   slab created with SLAB_DMA.
 291 *
 292 * Also it is possible to set different flags by OR'ing
 293 * in one or more of the following additional @flags:
 294 *
 295 * %__GFP_COLD - Request cache-cold pages instead of
 296 *   trying to return cache-warm pages.
 297 *
 298 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 299 *
 300 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 301 *   (think twice before using).
 302 *
 303 * %__GFP_NORETRY - If memory is not immediately available,
 304 *   then give up at once.
 305 *
 306 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 307 *
 308 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
 309 *
 310 * There are other flags available as well, but these are not intended
 311 * for general use, and so are not documented here. For a full list of
 312 * potential flags, always refer to linux/gfp.h.
 313 */
 314static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 315{
 316        if (size != 0 && n > SIZE_MAX / size)
 317                return NULL;
 318        return __kmalloc(n * size, flags);
 319}
 320
 321/**
 322 * kcalloc - allocate memory for an array. The memory is set to zero.
 323 * @n: number of elements.
 324 * @size: element size.
 325 * @flags: the type of memory to allocate (see kmalloc).
 326 */
 327static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 328{
 329        return kmalloc_array(n, size, flags | __GFP_ZERO);
 330}
 331
 332#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
 333/**
 334 * kmalloc_node - allocate memory from a specific node
 335 * @size: how many bytes of memory are required.
 336 * @flags: the type of memory to allocate (see kcalloc).
 337 * @node: node to allocate from.
 338 *
 339 * kmalloc() for non-local nodes, used to allocate from a specific node
 340 * if available. Equivalent to kmalloc() in the non-NUMA single-node
 341 * case.
 342 */
 343static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 344{
 345        return kmalloc(size, flags);
 346}
 347
 348static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
 349{
 350        return __kmalloc(size, flags);
 351}
 352
 353void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 354
 355static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
 356                                        gfp_t flags, int node)
 357{
 358        return kmem_cache_alloc(cachep, flags);
 359}
 360#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
 361
 362/*
 363 * kmalloc_track_caller is a special version of kmalloc that records the
 364 * calling function of the routine calling it for slab leak tracking instead
 365 * of just the calling function (confusing, eh?).
 366 * It's useful when the call to kmalloc comes from a widely-used standard
 367 * allocator where we care about the real place the memory allocation
 368 * request comes from.
 369 */
 370#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
 371        (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
 372        (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 373extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 374#define kmalloc_track_caller(size, flags) \
 375        __kmalloc_track_caller(size, flags, _RET_IP_)
 376#else
 377#define kmalloc_track_caller(size, flags) \
 378        __kmalloc(size, flags)
 379#endif /* DEBUG_SLAB */
 380
 381#ifdef CONFIG_NUMA
 382/*
 383 * kmalloc_node_track_caller is a special version of kmalloc_node that
 384 * records the calling function of the routine calling it for slab leak
 385 * tracking instead of just the calling function (confusing, eh?).
 386 * It's useful when the call to kmalloc_node comes from a widely-used
 387 * standard allocator where we care about the real place the memory
 388 * allocation request comes from.
 389 */
 390#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
 391        (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
 392        (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
 393extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 394#define kmalloc_node_track_caller(size, flags, node) \
 395        __kmalloc_node_track_caller(size, flags, node, \
 396                        _RET_IP_)
 397#else
 398#define kmalloc_node_track_caller(size, flags, node) \
 399        __kmalloc_node(size, flags, node)
 400#endif
 401
 402#else /* CONFIG_NUMA */
 403
 404#define kmalloc_node_track_caller(size, flags, node) \
 405        kmalloc_track_caller(size, flags)
 406
 407#endif /* CONFIG_NUMA */
 408
 409/*
 410 * Shortcuts
 411 */
 412static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
 413{
 414        return kmem_cache_alloc(k, flags | __GFP_ZERO);
 415}
 416
 417/**
 418 * kzalloc - allocate memory. The memory is set to zero.
 419 * @size: how many bytes of memory are required.
 420 * @flags: the type of memory to allocate (see kmalloc).
 421 */
 422static inline void *kzalloc(size_t size, gfp_t flags)
 423{
 424        return kmalloc(size, flags | __GFP_ZERO);
 425}
 426
 427/**
 428 * kzalloc_node - allocate zeroed memory from a particular memory node.
 429 * @size: how many bytes of memory are required.
 430 * @flags: the type of memory to allocate (see kmalloc).
 431 * @node: memory node from which to allocate
 432 */
 433static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 434{
 435        return kmalloc_node(size, flags | __GFP_ZERO, node);
 436}
 437
 438/*
 439 * Determine the size of a slab object
 440 */
 441static inline unsigned int kmem_cache_size(struct kmem_cache *s)
 442{
 443        return s->object_size;
 444}
 445
 446void __init kmem_cache_init_late(void);
 447
 448#endif  /* _LINUX_SLAB_H */
 449
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.