linux/mm/percpu-stats.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/percpu-debug.c
   4 *
   5 * Copyright (C) 2017           Facebook Inc.
   6 * Copyright (C) 2017           Dennis Zhou <dennis@kernel.org>
   7 *
   8 * Prints statistics about the percpu allocator and backing chunks.
   9 */
  10#include <linux/debugfs.h>
  11#include <linux/list.h>
  12#include <linux/percpu.h>
  13#include <linux/seq_file.h>
  14#include <linux/sort.h>
  15#include <linux/vmalloc.h>
  16
  17#include "percpu-internal.h"
  18
  19#define P(X, Y) \
  20        seq_printf(m, "  %-20s: %12lld\n", X, (long long int)Y)
  21
  22struct percpu_stats pcpu_stats;
  23struct pcpu_alloc_info pcpu_stats_ai;
  24
  25static int cmpint(const void *a, const void *b)
  26{
  27        return *(int *)a - *(int *)b;
  28}
  29
  30/*
  31 * Iterates over all chunks to find the max nr_alloc entries.
  32 */
  33static int find_max_nr_alloc(void)
  34{
  35        struct pcpu_chunk *chunk;
  36        int slot, max_nr_alloc;
  37        enum pcpu_chunk_type type;
  38
  39        max_nr_alloc = 0;
  40        for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
  41                for (slot = 0; slot < pcpu_nr_slots; slot++)
  42                        list_for_each_entry(chunk, &pcpu_chunk_list(type)[slot],
  43                                            list)
  44                                max_nr_alloc = max(max_nr_alloc,
  45                                                   chunk->nr_alloc);
  46
  47        return max_nr_alloc;
  48}
  49
  50/*
  51 * Prints out chunk state. Fragmentation is considered between
  52 * the beginning of the chunk to the last allocation.
  53 *
  54 * All statistics are in bytes unless stated otherwise.
  55 */
  56static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
  57                            int *buffer)
  58{
  59        struct pcpu_block_md *chunk_md = &chunk->chunk_md;
  60        int i, last_alloc, as_len, start, end;
  61        int *alloc_sizes, *p;
  62        /* statistics */
  63        int sum_frag = 0, max_frag = 0;
  64        int cur_min_alloc = 0, cur_med_alloc = 0, cur_max_alloc = 0;
  65
  66        alloc_sizes = buffer;
  67
  68        /*
  69         * find_last_bit returns the start value if nothing found.
  70         * Therefore, we must determine if it is a failure of find_last_bit
  71         * and set the appropriate value.
  72         */
  73        last_alloc = find_last_bit(chunk->alloc_map,
  74                                   pcpu_chunk_map_bits(chunk) -
  75                                   chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1);
  76        last_alloc = test_bit(last_alloc, chunk->alloc_map) ?
  77                     last_alloc + 1 : 0;
  78
  79        as_len = 0;
  80        start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
  81
  82        /*
  83         * If a bit is set in the allocation map, the bound_map identifies
  84         * where the allocation ends.  If the allocation is not set, the
  85         * bound_map does not identify free areas as it is only kept accurate
  86         * on allocation, not free.
  87         *
  88         * Positive values are allocations and negative values are free
  89         * fragments.
  90         */
  91        while (start < last_alloc) {
  92                if (test_bit(start, chunk->alloc_map)) {
  93                        end = find_next_bit(chunk->bound_map, last_alloc,
  94                                            start + 1);
  95                        alloc_sizes[as_len] = 1;
  96                } else {
  97                        end = find_next_bit(chunk->alloc_map, last_alloc,
  98                                            start + 1);
  99                        alloc_sizes[as_len] = -1;
 100                }
 101
 102                alloc_sizes[as_len++] *= (end - start) * PCPU_MIN_ALLOC_SIZE;
 103
 104                start = end;
 105        }
 106
 107        /*
 108         * The negative values are free fragments and thus sorting gives the
 109         * free fragments at the beginning in largest first order.
 110         */
 111        if (as_len > 0) {
 112                sort(alloc_sizes, as_len, sizeof(int), cmpint, NULL);
 113
 114                /* iterate through the unallocated fragments */
 115                for (i = 0, p = alloc_sizes; *p < 0 && i < as_len; i++, p++) {
 116                        sum_frag -= *p;
 117                        max_frag = max(max_frag, -1 * (*p));
 118                }
 119
 120                cur_min_alloc = alloc_sizes[i];
 121                cur_med_alloc = alloc_sizes[(i + as_len - 1) / 2];
 122                cur_max_alloc = alloc_sizes[as_len - 1];
 123        }
 124
 125        P("nr_alloc", chunk->nr_alloc);
 126        P("max_alloc_size", chunk->max_alloc_size);
 127        P("empty_pop_pages", chunk->nr_empty_pop_pages);
 128        P("first_bit", chunk_md->first_free);
 129        P("free_bytes", chunk->free_bytes);
 130        P("contig_bytes", chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
 131        P("sum_frag", sum_frag);
 132        P("max_frag", max_frag);
 133        P("cur_min_alloc", cur_min_alloc);
 134        P("cur_med_alloc", cur_med_alloc);
 135        P("cur_max_alloc", cur_max_alloc);
 136#ifdef CONFIG_MEMCG_KMEM
 137        P("memcg_aware", pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)));
 138#endif
 139        seq_putc(m, '\n');
 140}
 141
 142static int percpu_stats_show(struct seq_file *m, void *v)
 143{
 144        struct pcpu_chunk *chunk;
 145        int slot, max_nr_alloc;
 146        int *buffer;
 147        enum pcpu_chunk_type type;
 148        int nr_empty_pop_pages;
 149
 150alloc_buffer:
 151        spin_lock_irq(&pcpu_lock);
 152        max_nr_alloc = find_max_nr_alloc();
 153        spin_unlock_irq(&pcpu_lock);
 154
 155        /* there can be at most this many free and allocated fragments */
 156        buffer = vmalloc(array_size(sizeof(int), (2 * max_nr_alloc + 1)));
 157        if (!buffer)
 158                return -ENOMEM;
 159
 160        spin_lock_irq(&pcpu_lock);
 161
 162        /* if the buffer allocated earlier is too small */
 163        if (max_nr_alloc < find_max_nr_alloc()) {
 164                spin_unlock_irq(&pcpu_lock);
 165                vfree(buffer);
 166                goto alloc_buffer;
 167        }
 168
 169        nr_empty_pop_pages = 0;
 170        for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
 171                nr_empty_pop_pages += pcpu_nr_empty_pop_pages[type];
 172
 173#define PL(X)                                                           \
 174        seq_printf(m, "  %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
 175
 176        seq_printf(m,
 177                        "Percpu Memory Statistics\n"
 178                        "Allocation Info:\n"
 179                        "----------------------------------------\n");
 180        PL(unit_size);
 181        PL(static_size);
 182        PL(reserved_size);
 183        PL(dyn_size);
 184        PL(atom_size);
 185        PL(alloc_size);
 186        seq_putc(m, '\n');
 187
 188#undef PL
 189
 190#define PU(X) \
 191        seq_printf(m, "  %-20s: %12llu\n", #X, (unsigned long long)pcpu_stats.X)
 192
 193        seq_printf(m,
 194                        "Global Stats:\n"
 195                        "----------------------------------------\n");
 196        PU(nr_alloc);
 197        PU(nr_dealloc);
 198        PU(nr_cur_alloc);
 199        PU(nr_max_alloc);
 200        PU(nr_chunks);
 201        PU(nr_max_chunks);
 202        PU(min_alloc_size);
 203        PU(max_alloc_size);
 204        P("empty_pop_pages", nr_empty_pop_pages);
 205        seq_putc(m, '\n');
 206
 207#undef PU
 208
 209        seq_printf(m,
 210                        "Per Chunk Stats:\n"
 211                        "----------------------------------------\n");
 212
 213        if (pcpu_reserved_chunk) {
 214                seq_puts(m, "Chunk: <- Reserved Chunk\n");
 215                chunk_map_stats(m, pcpu_reserved_chunk, buffer);
 216        }
 217
 218        for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) {
 219                for (slot = 0; slot < pcpu_nr_slots; slot++) {
 220                        list_for_each_entry(chunk, &pcpu_chunk_list(type)[slot],
 221                                            list) {
 222                                if (chunk == pcpu_first_chunk) {
 223                                        seq_puts(m, "Chunk: <- First Chunk\n");
 224                                        chunk_map_stats(m, chunk, buffer);
 225                                } else {
 226                                        seq_puts(m, "Chunk:\n");
 227                                        chunk_map_stats(m, chunk, buffer);
 228                                }
 229                        }
 230                }
 231        }
 232
 233        spin_unlock_irq(&pcpu_lock);
 234
 235        vfree(buffer);
 236
 237        return 0;
 238}
 239DEFINE_SHOW_ATTRIBUTE(percpu_stats);
 240
 241static int __init init_percpu_stats_debugfs(void)
 242{
 243        debugfs_create_file("percpu_stats", 0444, NULL, NULL,
 244                        &percpu_stats_fops);
 245
 246        return 0;
 247}
 248
 249late_initcall(init_percpu_stats_debugfs);
 250
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.