linux/include/linux/memcontrol.h
<<
>>
Prefs
   1/* memcontrol.h - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#ifndef _LINUX_MEMCONTROL_H
  21#define _LINUX_MEMCONTROL_H
  22#include <linux/cgroup.h>
  23#include <linux/vm_event_item.h>
  24#include <linux/hardirq.h>
  25#include <linux/jump_label.h>
  26
  27struct mem_cgroup;
  28struct page_cgroup;
  29struct page;
  30struct mm_struct;
  31struct kmem_cache;
  32
  33/* Stats that can be updated by kernel. */
  34enum mem_cgroup_page_stat_item {
  35        MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
  36};
  37
  38struct mem_cgroup_reclaim_cookie {
  39        struct zone *zone;
  40        int priority;
  41        unsigned int generation;
  42};
  43
  44#ifdef CONFIG_MEMCG
  45/*
  46 * All "charge" functions with gfp_mask should use GFP_KERNEL or
  47 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
  48 * alloc memory but reclaims memory from all available zones. So, "where I want
  49 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
  50 * available but adding a rule is better. charge functions' gfp_mask should
  51 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
  52 * codes.
  53 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
  54 */
  55
  56extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
  57                                gfp_t gfp_mask);
  58/* for swap handling */
  59extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  60                struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
  61extern void mem_cgroup_commit_charge_swapin(struct page *page,
  62                                        struct mem_cgroup *memcg);
  63extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
  64
  65extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  66                                        gfp_t gfp_mask);
  67
  68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
  69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
  70
  71/* For coalescing uncharge for reducing memcg' overhead*/
  72extern void mem_cgroup_uncharge_start(void);
  73extern void mem_cgroup_uncharge_end(void);
  74
  75extern void mem_cgroup_uncharge_page(struct page *page);
  76extern void mem_cgroup_uncharge_cache_page(struct page *page);
  77
  78bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  79                                  struct mem_cgroup *memcg);
  80int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
  81
  82extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
  83extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  84extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
  85
  86extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
  87extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
  88
  89static inline
  90bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
  91{
  92        struct mem_cgroup *task_memcg;
  93        bool match;
  94
  95        rcu_read_lock();
  96        task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  97        match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
  98        rcu_read_unlock();
  99        return match;
 100}
 101
 102extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
 103
 104extern void
 105mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 106                             struct mem_cgroup **memcgp);
 107extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
 108        struct page *oldpage, struct page *newpage, bool migration_ok);
 109
 110struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
 111                                   struct mem_cgroup *,
 112                                   struct mem_cgroup_reclaim_cookie *);
 113void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 114
 115/*
 116 * For memory reclaim.
 117 */
 118int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
 119int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
 120int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 121unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
 122void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
 123extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 124                                        struct task_struct *p);
 125extern void mem_cgroup_replace_page_cache(struct page *oldpage,
 126                                        struct page *newpage);
 127
 128#ifdef CONFIG_MEMCG_SWAP
 129extern int do_swap_account;
 130#endif
 131
 132static inline bool mem_cgroup_disabled(void)
 133{
 134        if (mem_cgroup_subsys.disabled)
 135                return true;
 136        return false;
 137}
 138
 139void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
 140                                         unsigned long *flags);
 141
 142extern atomic_t memcg_moving;
 143
 144static inline void mem_cgroup_begin_update_page_stat(struct page *page,
 145                                        bool *locked, unsigned long *flags)
 146{
 147        if (mem_cgroup_disabled())
 148                return;
 149        rcu_read_lock();
 150        *locked = false;
 151        if (atomic_read(&memcg_moving))
 152                __mem_cgroup_begin_update_page_stat(page, locked, flags);
 153}
 154
 155void __mem_cgroup_end_update_page_stat(struct page *page,
 156                                unsigned long *flags);
 157static inline void mem_cgroup_end_update_page_stat(struct page *page,
 158                                        bool *locked, unsigned long *flags)
 159{
 160        if (mem_cgroup_disabled())
 161                return;
 162        if (*locked)
 163                __mem_cgroup_end_update_page_stat(page, flags);
 164        rcu_read_unlock();
 165}
 166
 167void mem_cgroup_update_page_stat(struct page *page,
 168                                 enum mem_cgroup_page_stat_item idx,
 169                                 int val);
 170
 171static inline void mem_cgroup_inc_page_stat(struct page *page,
 172                                            enum mem_cgroup_page_stat_item idx)
 173{
 174        mem_cgroup_update_page_stat(page, idx, 1);
 175}
 176
 177static inline void mem_cgroup_dec_page_stat(struct page *page,
 178                                            enum mem_cgroup_page_stat_item idx)
 179{
 180        mem_cgroup_update_page_stat(page, idx, -1);
 181}
 182
 183unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 184                                                gfp_t gfp_mask,
 185                                                unsigned long *total_scanned);
 186
 187void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 188static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
 189                                             enum vm_event_item idx)
 190{
 191        if (mem_cgroup_disabled())
 192                return;
 193        __mem_cgroup_count_vm_event(mm, idx);
 194}
 195#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 196void mem_cgroup_split_huge_fixup(struct page *head);
 197#endif
 198
 199#ifdef CONFIG_DEBUG_VM
 200bool mem_cgroup_bad_page_check(struct page *page);
 201void mem_cgroup_print_bad_page(struct page *page);
 202#endif
 203#else /* CONFIG_MEMCG */
 204struct mem_cgroup;
 205
 206static inline int mem_cgroup_newpage_charge(struct page *page,
 207                                        struct mm_struct *mm, gfp_t gfp_mask)
 208{
 209        return 0;
 210}
 211
 212static inline int mem_cgroup_cache_charge(struct page *page,
 213                                        struct mm_struct *mm, gfp_t gfp_mask)
 214{
 215        return 0;
 216}
 217
 218static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
 219                struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
 220{
 221        return 0;
 222}
 223
 224static inline void mem_cgroup_commit_charge_swapin(struct page *page,
 225                                          struct mem_cgroup *memcg)
 226{
 227}
 228
 229static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
 230{
 231}
 232
 233static inline void mem_cgroup_uncharge_start(void)
 234{
 235}
 236
 237static inline void mem_cgroup_uncharge_end(void)
 238{
 239}
 240
 241static inline void mem_cgroup_uncharge_page(struct page *page)
 242{
 243}
 244
 245static inline void mem_cgroup_uncharge_cache_page(struct page *page)
 246{
 247}
 248
 249static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
 250                                                    struct mem_cgroup *memcg)
 251{
 252        return &zone->lruvec;
 253}
 254
 255static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
 256                                                    struct zone *zone)
 257{
 258        return &zone->lruvec;
 259}
 260
 261static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 262{
 263        return NULL;
 264}
 265
 266static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 267{
 268        return NULL;
 269}
 270
 271static inline bool mm_match_cgroup(struct mm_struct *mm,
 272                struct mem_cgroup *memcg)
 273{
 274        return true;
 275}
 276
 277static inline int task_in_mem_cgroup(struct task_struct *task,
 278                                     const struct mem_cgroup *memcg)
 279{
 280        return 1;
 281}
 282
 283static inline struct cgroup_subsys_state
 284                *mem_cgroup_css(struct mem_cgroup *memcg)
 285{
 286        return NULL;
 287}
 288
 289static inline void
 290mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 291                             struct mem_cgroup **memcgp)
 292{
 293}
 294
 295static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
 296                struct page *oldpage, struct page *newpage, bool migration_ok)
 297{
 298}
 299
 300static inline struct mem_cgroup *
 301mem_cgroup_iter(struct mem_cgroup *root,
 302                struct mem_cgroup *prev,
 303                struct mem_cgroup_reclaim_cookie *reclaim)
 304{
 305        return NULL;
 306}
 307
 308static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
 309                                         struct mem_cgroup *prev)
 310{
 311}
 312
 313static inline bool mem_cgroup_disabled(void)
 314{
 315        return true;
 316}
 317
 318static inline int
 319mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 320{
 321        return 1;
 322}
 323
 324static inline int
 325mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 326{
 327        return 1;
 328}
 329
 330static inline unsigned long
 331mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 332{
 333        return 0;
 334}
 335
 336static inline void
 337mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 338                              int increment)
 339{
 340}
 341
 342static inline void
 343mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 344{
 345}
 346
 347static inline void mem_cgroup_begin_update_page_stat(struct page *page,
 348                                        bool *locked, unsigned long *flags)
 349{
 350}
 351
 352static inline void mem_cgroup_end_update_page_stat(struct page *page,
 353                                        bool *locked, unsigned long *flags)
 354{
 355}
 356
 357static inline void mem_cgroup_inc_page_stat(struct page *page,
 358                                            enum mem_cgroup_page_stat_item idx)
 359{
 360}
 361
 362static inline void mem_cgroup_dec_page_stat(struct page *page,
 363                                            enum mem_cgroup_page_stat_item idx)
 364{
 365}
 366
 367static inline
 368unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 369                                            gfp_t gfp_mask,
 370                                            unsigned long *total_scanned)
 371{
 372        return 0;
 373}
 374
 375static inline void mem_cgroup_split_huge_fixup(struct page *head)
 376{
 377}
 378
 379static inline
 380void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
 381{
 382}
 383static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
 384                                struct page *newpage)
 385{
 386}
 387#endif /* CONFIG_MEMCG */
 388
 389#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
 390static inline bool
 391mem_cgroup_bad_page_check(struct page *page)
 392{
 393        return false;
 394}
 395
 396static inline void
 397mem_cgroup_print_bad_page(struct page *page)
 398{
 399}
 400#endif
 401
 402enum {
 403        UNDER_LIMIT,
 404        SOFT_LIMIT,
 405        OVER_LIMIT,
 406};
 407
 408struct sock;
 409#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
 410void sock_update_memcg(struct sock *sk);
 411void sock_release_memcg(struct sock *sk);
 412#else
 413static inline void sock_update_memcg(struct sock *sk)
 414{
 415}
 416static inline void sock_release_memcg(struct sock *sk)
 417{
 418}
 419#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
 420
 421#ifdef CONFIG_MEMCG_KMEM
 422extern struct static_key memcg_kmem_enabled_key;
 423
 424extern int memcg_limited_groups_array_size;
 425
 426/*
 427 * Helper macro to loop through all memcg-specific caches. Callers must still
 428 * check if the cache is valid (it is either valid or NULL).
 429 * the slab_mutex must be held when looping through those caches
 430 */
 431#define for_each_memcg_cache_index(_idx)        \
 432        for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
 433
 434static inline bool memcg_kmem_enabled(void)
 435{
 436        return static_key_false(&memcg_kmem_enabled_key);
 437}
 438
 439/*
 440 * In general, we'll do everything in our power to not incur in any overhead
 441 * for non-memcg users for the kmem functions. Not even a function call, if we
 442 * can avoid it.
 443 *
 444 * Therefore, we'll inline all those functions so that in the best case, we'll
 445 * see that kmemcg is off for everybody and proceed quickly.  If it is on,
 446 * we'll still do most of the flag checking inline. We check a lot of
 447 * conditions, but because they are pretty simple, they are expected to be
 448 * fast.
 449 */
 450bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
 451                                        int order);
 452void __memcg_kmem_commit_charge(struct page *page,
 453                                       struct mem_cgroup *memcg, int order);
 454void __memcg_kmem_uncharge_pages(struct page *page, int order);
 455
 456int memcg_cache_id(struct mem_cgroup *memcg);
 457int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
 458                         struct kmem_cache *root_cache);
 459void memcg_release_cache(struct kmem_cache *cachep);
 460void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
 461
 462int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
 463void memcg_update_array_size(int num_groups);
 464
 465struct kmem_cache *
 466__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
 467
 468void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
 469void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
 470
 471/**
 472 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
 473 * @gfp: the gfp allocation flags.
 474 * @memcg: a pointer to the memcg this was charged against.
 475 * @order: allocation order.
 476 *
 477 * returns true if the memcg where the current task belongs can hold this
 478 * allocation.
 479 *
 480 * We return true automatically if this allocation is not to be accounted to
 481 * any memcg.
 482 */
 483static inline bool
 484memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
 485{
 486        if (!memcg_kmem_enabled())
 487                return true;
 488
 489        /*
 490         * __GFP_NOFAIL allocations will move on even if charging is not
 491         * possible. Therefore we don't even try, and have this allocation
 492         * unaccounted. We could in theory charge it with
 493         * res_counter_charge_nofail, but we hope those allocations are rare,
 494         * and won't be worth the trouble.
 495         */
 496        if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
 497                return true;
 498        if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
 499                return true;
 500
 501        /* If the test is dying, just let it go. */
 502        if (unlikely(fatal_signal_pending(current)))
 503                return true;
 504
 505        return __memcg_kmem_newpage_charge(gfp, memcg, order);
 506}
 507
 508/**
 509 * memcg_kmem_uncharge_pages: uncharge pages from memcg
 510 * @page: pointer to struct page being freed
 511 * @order: allocation order.
 512 *
 513 * there is no need to specify memcg here, since it is embedded in page_cgroup
 514 */
 515static inline void
 516memcg_kmem_uncharge_pages(struct page *page, int order)
 517{
 518        if (memcg_kmem_enabled())
 519                __memcg_kmem_uncharge_pages(page, order);
 520}
 521
 522/**
 523 * memcg_kmem_commit_charge: embeds correct memcg in a page
 524 * @page: pointer to struct page recently allocated
 525 * @memcg: the memcg structure we charged against
 526 * @order: allocation order.
 527 *
 528 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
 529 * failure of the allocation. if @page is NULL, this function will revert the
 530 * charges. Otherwise, it will commit the memcg given by @memcg to the
 531 * corresponding page_cgroup.
 532 */
 533static inline void
 534memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
 535{
 536        if (memcg_kmem_enabled() && memcg)
 537                __memcg_kmem_commit_charge(page, memcg, order);
 538}
 539
 540/**
 541 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
 542 * @cachep: the original global kmem cache
 543 * @gfp: allocation flags.
 544 *
 545 * This function assumes that the task allocating, which determines the memcg
 546 * in the page allocator, belongs to the same cgroup throughout the whole
 547 * process.  Misacounting can happen if the task calls memcg_kmem_get_cache()
 548 * while belonging to a cgroup, and later on changes. This is considered
 549 * acceptable, and should only happen upon task migration.
 550 *
 551 * Before the cache is created by the memcg core, there is also a possible
 552 * imbalance: the task belongs to a memcg, but the cache being allocated from
 553 * is the global cache, since the child cache is not yet guaranteed to be
 554 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
 555 * passed and the page allocator will not attempt any cgroup accounting.
 556 */
 557static __always_inline struct kmem_cache *
 558memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 559{
 560        if (!memcg_kmem_enabled())
 561                return cachep;
 562        if (gfp & __GFP_NOFAIL)
 563                return cachep;
 564        if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
 565                return cachep;
 566        if (unlikely(fatal_signal_pending(current)))
 567                return cachep;
 568
 569        return __memcg_kmem_get_cache(cachep, gfp);
 570}
 571#else
 572#define for_each_memcg_cache_index(_idx)        \
 573        for (; NULL; )
 574
 575static inline bool memcg_kmem_enabled(void)
 576{
 577        return false;
 578}
 579
 580static inline bool
 581memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
 582{
 583        return true;
 584}
 585
 586static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
 587{
 588}
 589
 590static inline void
 591memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
 592{
 593}
 594
 595static inline int memcg_cache_id(struct mem_cgroup *memcg)
 596{
 597        return -1;
 598}
 599
 600static inline int
 601memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
 602                     struct kmem_cache *root_cache)
 603{
 604        return 0;
 605}
 606
 607static inline void memcg_release_cache(struct kmem_cache *cachep)
 608{
 609}
 610
 611static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
 612                                        struct kmem_cache *s)
 613{
 614}
 615
 616static inline struct kmem_cache *
 617memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 618{
 619        return cachep;
 620}
 621
 622static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
 623{
 624}
 625#endif /* CONFIG_MEMCG_KMEM */
 626#endif /* _LINUX_MEMCONTROL_H */
 627
 628
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.