linux/include/linux/vmstat.h
<<
>>
Prefs
   1#ifndef _LINUX_VMSTAT_H
   2#define _LINUX_VMSTAT_H
   3
   4#include <linux/types.h>
   5#include <linux/percpu.h>
   6#include <linux/mm.h>
   7#include <linux/mmzone.h>
   8#include <linux/vm_event_item.h>
   9#include <linux/atomic.h>
  10
  11extern int sysctl_stat_interval;
  12
  13#ifdef CONFIG_VM_EVENT_COUNTERS
  14/*
  15 * Light weight per cpu counter implementation.
  16 *
  17 * Counters should only be incremented and no critical kernel component
  18 * should rely on the counter values.
  19 *
  20 * Counters are handled completely inline. On many platforms the code
  21 * generated will simply be the increment of a global address.
  22 */
  23
  24struct vm_event_state {
  25        unsigned long event[NR_VM_EVENT_ITEMS];
  26};
  27
  28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  29
  30static inline void __count_vm_event(enum vm_event_item item)
  31{
  32        __this_cpu_inc(vm_event_states.event[item]);
  33}
  34
  35static inline void count_vm_event(enum vm_event_item item)
  36{
  37        this_cpu_inc(vm_event_states.event[item]);
  38}
  39
  40static inline void __count_vm_events(enum vm_event_item item, long delta)
  41{
  42        __this_cpu_add(vm_event_states.event[item], delta);
  43}
  44
  45static inline void count_vm_events(enum vm_event_item item, long delta)
  46{
  47        this_cpu_add(vm_event_states.event[item], delta);
  48}
  49
  50extern void all_vm_events(unsigned long *);
  51#ifdef CONFIG_HOTPLUG
  52extern void vm_events_fold_cpu(int cpu);
  53#else
  54static inline void vm_events_fold_cpu(int cpu)
  55{
  56}
  57#endif
  58
  59#else
  60
  61/* Disable counters */
  62static inline void count_vm_event(enum vm_event_item item)
  63{
  64}
  65static inline void count_vm_events(enum vm_event_item item, long delta)
  66{
  67}
  68static inline void __count_vm_event(enum vm_event_item item)
  69{
  70}
  71static inline void __count_vm_events(enum vm_event_item item, long delta)
  72{
  73}
  74static inline void all_vm_events(unsigned long *ret)
  75{
  76}
  77static inline void vm_events_fold_cpu(int cpu)
  78{
  79}
  80
  81#endif /* CONFIG_VM_EVENT_COUNTERS */
  82
  83#ifdef CONFIG_NUMA_BALANCING
  84#define count_vm_numa_event(x)     count_vm_event(x)
  85#define count_vm_numa_events(x, y) count_vm_events(x, y)
  86#else
  87#define count_vm_numa_event(x) do {} while (0)
  88#define count_vm_numa_events(x, y) do {} while (0)
  89#endif /* CONFIG_NUMA_BALANCING */
  90
  91#define __count_zone_vm_events(item, zone, delta) \
  92                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  93                zone_idx(zone), delta)
  94
  95/*
  96 * Zone based page accounting with per cpu differentials.
  97 */
  98extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  99
 100static inline void zone_page_state_add(long x, struct zone *zone,
 101                                 enum zone_stat_item item)
 102{
 103        atomic_long_add(x, &zone->vm_stat[item]);
 104        atomic_long_add(x, &vm_stat[item]);
 105}
 106
 107static inline unsigned long global_page_state(enum zone_stat_item item)
 108{
 109        long x = atomic_long_read(&vm_stat[item]);
 110#ifdef CONFIG_SMP
 111        if (x < 0)
 112                x = 0;
 113#endif
 114        return x;
 115}
 116
 117static inline unsigned long zone_page_state(struct zone *zone,
 118                                        enum zone_stat_item item)
 119{
 120        long x = atomic_long_read(&zone->vm_stat[item]);
 121#ifdef CONFIG_SMP
 122        if (x < 0)
 123                x = 0;
 124#endif
 125        return x;
 126}
 127
 128/*
 129 * More accurate version that also considers the currently pending
 130 * deltas. For that we need to loop over all cpus to find the current
 131 * deltas. There is no synchronization so the result cannot be
 132 * exactly accurate either.
 133 */
 134static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 135                                        enum zone_stat_item item)
 136{
 137        long x = atomic_long_read(&zone->vm_stat[item]);
 138
 139#ifdef CONFIG_SMP
 140        int cpu;
 141        for_each_online_cpu(cpu)
 142                x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
 143
 144        if (x < 0)
 145                x = 0;
 146#endif
 147        return x;
 148}
 149
 150extern unsigned long global_reclaimable_pages(void);
 151extern unsigned long zone_reclaimable_pages(struct zone *zone);
 152
 153#ifdef CONFIG_NUMA
 154/*
 155 * Determine the per node value of a stat item. This function
 156 * is called frequently in a NUMA machine, so try to be as
 157 * frugal as possible.
 158 */
 159static inline unsigned long node_page_state(int node,
 160                                 enum zone_stat_item item)
 161{
 162        struct zone *zones = NODE_DATA(node)->node_zones;
 163
 164        return
 165#ifdef CONFIG_ZONE_DMA
 166                zone_page_state(&zones[ZONE_DMA], item) +
 167#endif
 168#ifdef CONFIG_ZONE_DMA32
 169                zone_page_state(&zones[ZONE_DMA32], item) +
 170#endif
 171#ifdef CONFIG_HIGHMEM
 172                zone_page_state(&zones[ZONE_HIGHMEM], item) +
 173#endif
 174                zone_page_state(&zones[ZONE_NORMAL], item) +
 175                zone_page_state(&zones[ZONE_MOVABLE], item);
 176}
 177
 178extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
 179
 180#else
 181
 182#define node_page_state(node, item) global_page_state(item)
 183#define zone_statistics(_zl, _z, gfp) do { } while (0)
 184
 185#endif /* CONFIG_NUMA */
 186
 187#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 188#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 189
 190extern void inc_zone_state(struct zone *, enum zone_stat_item);
 191
 192#ifdef CONFIG_SMP
 193void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
 194void __inc_zone_page_state(struct page *, enum zone_stat_item);
 195void __dec_zone_page_state(struct page *, enum zone_stat_item);
 196
 197void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
 198void inc_zone_page_state(struct page *, enum zone_stat_item);
 199void dec_zone_page_state(struct page *, enum zone_stat_item);
 200
 201extern void inc_zone_state(struct zone *, enum zone_stat_item);
 202extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 203extern void dec_zone_state(struct zone *, enum zone_stat_item);
 204extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 205
 206void refresh_cpu_vm_stats(int);
 207void refresh_zone_stat_thresholds(void);
 208
 209void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
 210
 211int calculate_pressure_threshold(struct zone *zone);
 212int calculate_normal_threshold(struct zone *zone);
 213void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 214                                int (*calculate_pressure)(struct zone *));
 215#else /* CONFIG_SMP */
 216
 217/*
 218 * We do not maintain differentials in a single processor configuration.
 219 * The functions directly modify the zone and global counters.
 220 */
 221static inline void __mod_zone_page_state(struct zone *zone,
 222                        enum zone_stat_item item, int delta)
 223{
 224        zone_page_state_add(delta, zone, item);
 225}
 226
 227static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 228{
 229        atomic_long_inc(&zone->vm_stat[item]);
 230        atomic_long_inc(&vm_stat[item]);
 231}
 232
 233static inline void __inc_zone_page_state(struct page *page,
 234                        enum zone_stat_item item)
 235{
 236        __inc_zone_state(page_zone(page), item);
 237}
 238
 239static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 240{
 241        atomic_long_dec(&zone->vm_stat[item]);
 242        atomic_long_dec(&vm_stat[item]);
 243}
 244
 245static inline void __dec_zone_page_state(struct page *page,
 246                        enum zone_stat_item item)
 247{
 248        __dec_zone_state(page_zone(page), item);
 249}
 250
 251/*
 252 * We only use atomic operations to update counters. So there is no need to
 253 * disable interrupts.
 254 */
 255#define inc_zone_page_state __inc_zone_page_state
 256#define dec_zone_page_state __dec_zone_page_state
 257#define mod_zone_page_state __mod_zone_page_state
 258
 259#define set_pgdat_percpu_threshold(pgdat, callback) { }
 260
 261static inline void refresh_cpu_vm_stats(int cpu) { }
 262static inline void refresh_zone_stat_thresholds(void) { }
 263
 264static inline void drain_zonestat(struct zone *zone,
 265                        struct per_cpu_pageset *pset) { }
 266#endif          /* CONFIG_SMP */
 267
 268static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 269                                             int migratetype)
 270{
 271        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 272        if (is_migrate_cma(migratetype))
 273                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 274}
 275
 276extern const char * const vmstat_text[];
 277
 278#endif /* _LINUX_VMSTAT_H */
 279
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.