linux/kernel/trace/ring_buffer.c
<<
>>
Prefs
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
   6#include <linux/ftrace_event.h>
   7#include <linux/ring_buffer.h>
   8#include <linux/trace_clock.h>
   9#include <linux/trace_seq.h>
  10#include <linux/spinlock.h>
  11#include <linux/debugfs.h>
  12#include <linux/uaccess.h>
  13#include <linux/hardirq.h>
  14#include <linux/kmemcheck.h>
  15#include <linux/module.h>
  16#include <linux/percpu.h>
  17#include <linux/mutex.h>
  18#include <linux/slab.h>
  19#include <linux/init.h>
  20#include <linux/hash.h>
  21#include <linux/list.h>
  22#include <linux/cpu.h>
  23#include <linux/fs.h>
  24
  25#include <asm/local.h>
  26
  27static void update_pages_handler(struct work_struct *work);
  28
  29/*
  30 * The ring buffer header is special. We must manually up keep it.
  31 */
  32int ring_buffer_print_entry_header(struct trace_seq *s)
  33{
  34        int ret;
  35
  36        ret = trace_seq_printf(s, "# compressed entry header\n");
  37        ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
  38        ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
  39        ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
  40        ret = trace_seq_printf(s, "\n");
  41        ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
  42                               RINGBUF_TYPE_PADDING);
  43        ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  44                               RINGBUF_TYPE_TIME_EXTEND);
  45        ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
  46                               RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  47
  48        return ret;
  49}
  50
  51/*
  52 * The ring buffer is made up of a list of pages. A separate list of pages is
  53 * allocated for each CPU. A writer may only write to a buffer that is
  54 * associated with the CPU it is currently executing on.  A reader may read
  55 * from any per cpu buffer.
  56 *
  57 * The reader is special. For each per cpu buffer, the reader has its own
  58 * reader page. When a reader has read the entire reader page, this reader
  59 * page is swapped with another page in the ring buffer.
  60 *
  61 * Now, as long as the writer is off the reader page, the reader can do what
  62 * ever it wants with that page. The writer will never write to that page
  63 * again (as long as it is out of the ring buffer).
  64 *
  65 * Here's some silly ASCII art.
  66 *
  67 *   +------+
  68 *   |reader|          RING BUFFER
  69 *   |page  |
  70 *   +------+        +---+   +---+   +---+
  71 *                   |   |-->|   |-->|   |
  72 *                   +---+   +---+   +---+
  73 *                     ^               |
  74 *                     |               |
  75 *                     +---------------+
  76 *
  77 *
  78 *   +------+
  79 *   |reader|          RING BUFFER
  80 *   |page  |------------------v
  81 *   +------+        +---+   +---+   +---+
  82 *                   |   |-->|   |-->|   |
  83 *                   +---+   +---+   +---+
  84 *                     ^               |
  85 *                     |               |
  86 *                     +---------------+
  87 *
  88 *
  89 *   +------+
  90 *   |reader|          RING BUFFER
  91 *   |page  |------------------v
  92 *   +------+        +---+   +---+   +---+
  93 *      ^            |   |-->|   |-->|   |
  94 *      |            +---+   +---+   +---+
  95 *      |                              |
  96 *      |                              |
  97 *      +------------------------------+
  98 *
  99 *
 100 *   +------+
 101 *   |buffer|          RING BUFFER
 102 *   |page  |------------------v
 103 *   +------+        +---+   +---+   +---+
 104 *      ^            |   |   |   |-->|   |
 105 *      |   New      +---+   +---+   +---+
 106 *      |  Reader------^               |
 107 *      |   page                       |
 108 *      +------------------------------+
 109 *
 110 *
 111 * After we make this swap, the reader can hand this page off to the splice
 112 * code and be done with it. It can even allocate a new page if it needs to
 113 * and swap that into the ring buffer.
 114 *
 115 * We will be using cmpxchg soon to make all this lockless.
 116 *
 117 */
 118
 119/*
 120 * A fast way to enable or disable all ring buffers is to
 121 * call tracing_on or tracing_off. Turning off the ring buffers
 122 * prevents all ring buffers from being recorded to.
 123 * Turning this switch on, makes it OK to write to the
 124 * ring buffer, if the ring buffer is enabled itself.
 125 *
 126 * There's three layers that must be on in order to write
 127 * to the ring buffer.
 128 *
 129 * 1) This global flag must be set.
 130 * 2) The ring buffer must be enabled for recording.
 131 * 3) The per cpu buffer must be enabled for recording.
 132 *
 133 * In case of an anomaly, this global flag has a bit set that
 134 * will permantly disable all ring buffers.
 135 */
 136
 137/*
 138 * Global flag to disable all recording to ring buffers
 139 *  This has two bits: ON, DISABLED
 140 *
 141 *  ON   DISABLED
 142 * ---- ----------
 143 *   0      0        : ring buffers are off
 144 *   1      0        : ring buffers are on
 145 *   X      1        : ring buffers are permanently disabled
 146 */
 147
 148enum {
 149        RB_BUFFERS_ON_BIT       = 0,
 150        RB_BUFFERS_DISABLED_BIT = 1,
 151};
 152
 153enum {
 154        RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
 155        RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
 156};
 157
 158static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 159
 160/* Used for individual buffers (after the counter) */
 161#define RB_BUFFER_OFF           (1 << 20)
 162
 163#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 164
 165/**
 166 * tracing_off_permanent - permanently disable ring buffers
 167 *
 168 * This function, once called, will disable all ring buffers
 169 * permanently.
 170 */
 171void tracing_off_permanent(void)
 172{
 173        set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 174}
 175
 176#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 177#define RB_ALIGNMENT            4U
 178#define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 179#define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
 180
 181#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 182# define RB_FORCE_8BYTE_ALIGNMENT       0
 183# define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
 184#else
 185# define RB_FORCE_8BYTE_ALIGNMENT       1
 186# define RB_ARCH_ALIGNMENT              8U
 187#endif
 188
 189#define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
 190
 191/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 192#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 193
 194enum {
 195        RB_LEN_TIME_EXTEND = 8,
 196        RB_LEN_TIME_STAMP = 16,
 197};
 198
 199#define skip_time_extend(event) \
 200        ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 201
 202static inline int rb_null_event(struct ring_buffer_event *event)
 203{
 204        return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 205}
 206
 207static void rb_event_set_padding(struct ring_buffer_event *event)
 208{
 209        /* padding has a NULL time_delta */
 210        event->type_len = RINGBUF_TYPE_PADDING;
 211        event->time_delta = 0;
 212}
 213
 214static unsigned
 215rb_event_data_length(struct ring_buffer_event *event)
 216{
 217        unsigned length;
 218
 219        if (event->type_len)
 220                length = event->type_len * RB_ALIGNMENT;
 221        else
 222                length = event->array[0];
 223        return length + RB_EVNT_HDR_SIZE;
 224}
 225
 226/*
 227 * Return the length of the given event. Will return
 228 * the length of the time extend if the event is a
 229 * time extend.
 230 */
 231static inline unsigned
 232rb_event_length(struct ring_buffer_event *event)
 233{
 234        switch (event->type_len) {
 235        case RINGBUF_TYPE_PADDING:
 236                if (rb_null_event(event))
 237                        /* undefined */
 238                        return -1;
 239                return  event->array[0] + RB_EVNT_HDR_SIZE;
 240
 241        case RINGBUF_TYPE_TIME_EXTEND:
 242                return RB_LEN_TIME_EXTEND;
 243
 244        case RINGBUF_TYPE_TIME_STAMP:
 245                return RB_LEN_TIME_STAMP;
 246
 247        case RINGBUF_TYPE_DATA:
 248                return rb_event_data_length(event);
 249        default:
 250                BUG();
 251        }
 252        /* not hit */
 253        return 0;
 254}
 255
 256/*
 257 * Return total length of time extend and data,
 258 *   or just the event length for all other events.
 259 */
 260static inline unsigned
 261rb_event_ts_length(struct ring_buffer_event *event)
 262{
 263        unsigned len = 0;
 264
 265        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 266                /* time extends include the data event after it */
 267                len = RB_LEN_TIME_EXTEND;
 268                event = skip_time_extend(event);
 269        }
 270        return len + rb_event_length(event);
 271}
 272
 273/**
 274 * ring_buffer_event_length - return the length of the event
 275 * @event: the event to get the length of
 276 *
 277 * Returns the size of the data load of a data event.
 278 * If the event is something other than a data event, it
 279 * returns the size of the event itself. With the exception
 280 * of a TIME EXTEND, where it still returns the size of the
 281 * data load of the data event after it.
 282 */
 283unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 284{
 285        unsigned length;
 286
 287        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 288                event = skip_time_extend(event);
 289
 290        length = rb_event_length(event);
 291        if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 292                return length;
 293        length -= RB_EVNT_HDR_SIZE;
 294        if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 295                length -= sizeof(event->array[0]);
 296        return length;
 297}
 298EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 299
 300/* inline for ring buffer fast paths */
 301static void *
 302rb_event_data(struct ring_buffer_event *event)
 303{
 304        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 305                event = skip_time_extend(event);
 306        BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 307        /* If length is in len field, then array[0] has the data */
 308        if (event->type_len)
 309                return (void *)&event->array[0];
 310        /* Otherwise length is in array[0] and array[1] has the data */
 311        return (void *)&event->array[1];
 312}
 313
 314/**
 315 * ring_buffer_event_data - return the data of the event
 316 * @event: the event to get the data from
 317 */
 318void *ring_buffer_event_data(struct ring_buffer_event *event)
 319{
 320        return rb_event_data(event);
 321}
 322EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 323
 324#define for_each_buffer_cpu(buffer, cpu)                \
 325        for_each_cpu(cpu, buffer->cpumask)
 326
 327#define TS_SHIFT        27
 328#define TS_MASK         ((1ULL << TS_SHIFT) - 1)
 329#define TS_DELTA_TEST   (~TS_MASK)
 330
 331/* Flag when events were overwritten */
 332#define RB_MISSED_EVENTS        (1 << 31)
 333/* Missed count stored at end */
 334#define RB_MISSED_STORED        (1 << 30)
 335
 336struct buffer_data_page {
 337        u64              time_stamp;    /* page time stamp */
 338        local_t          commit;        /* write committed index */
 339        unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
 340};
 341
 342/*
 343 * Note, the buffer_page list must be first. The buffer pages
 344 * are allocated in cache lines, which means that each buffer
 345 * page will be at the beginning of a cache line, and thus
 346 * the least significant bits will be zero. We use this to
 347 * add flags in the list struct pointers, to make the ring buffer
 348 * lockless.
 349 */
 350struct buffer_page {
 351        struct list_head list;          /* list of buffer pages */
 352        local_t          write;         /* index for next write */
 353        unsigned         read;          /* index for next read */
 354        local_t          entries;       /* entries on this page */
 355        unsigned long    real_end;      /* real end of data */
 356        struct buffer_data_page *page;  /* Actual data page */
 357};
 358
 359/*
 360 * The buffer page counters, write and entries, must be reset
 361 * atomically when crossing page boundaries. To synchronize this
 362 * update, two counters are inserted into the number. One is
 363 * the actual counter for the write position or count on the page.
 364 *
 365 * The other is a counter of updaters. Before an update happens
 366 * the update partition of the counter is incremented. This will
 367 * allow the updater to update the counter atomically.
 368 *
 369 * The counter is 20 bits, and the state data is 12.
 370 */
 371#define RB_WRITE_MASK           0xfffff
 372#define RB_WRITE_INTCNT         (1 << 20)
 373
 374static void rb_init_page(struct buffer_data_page *bpage)
 375{
 376        local_set(&bpage->commit, 0);
 377}
 378
 379/**
 380 * ring_buffer_page_len - the size of data on the page.
 381 * @page: The page to read
 382 *
 383 * Returns the amount of data on the page, including buffer page header.
 384 */
 385size_t ring_buffer_page_len(void *page)
 386{
 387        return local_read(&((struct buffer_data_page *)page)->commit)
 388                + BUF_PAGE_HDR_SIZE;
 389}
 390
 391/*
 392 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 393 * this issue out.
 394 */
 395static void free_buffer_page(struct buffer_page *bpage)
 396{
 397        free_page((unsigned long)bpage->page);
 398        kfree(bpage);
 399}
 400
 401/*
 402 * We need to fit the time_stamp delta into 27 bits.
 403 */
 404static inline int test_time_stamp(u64 delta)
 405{
 406        if (delta & TS_DELTA_TEST)
 407                return 1;
 408        return 0;
 409}
 410
 411#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 412
 413/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 414#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 415
 416int ring_buffer_print_page_header(struct trace_seq *s)
 417{
 418        struct buffer_data_page field;
 419        int ret;
 420
 421        ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 422                               "offset:0;\tsize:%u;\tsigned:%u;\n",
 423                               (unsigned int)sizeof(field.time_stamp),
 424                               (unsigned int)is_signed_type(u64));
 425
 426        ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
 427                               "offset:%u;\tsize:%u;\tsigned:%u;\n",
 428                               (unsigned int)offsetof(typeof(field), commit),
 429                               (unsigned int)sizeof(field.commit),
 430                               (unsigned int)is_signed_type(long));
 431
 432        ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
 433                               "offset:%u;\tsize:%u;\tsigned:%u;\n",
 434                               (unsigned int)offsetof(typeof(field), commit),
 435                               1,
 436                               (unsigned int)is_signed_type(long));
 437
 438        ret = trace_seq_printf(s, "\tfield: char data;\t"
 439                               "offset:%u;\tsize:%u;\tsigned:%u;\n",
 440                               (unsigned int)offsetof(typeof(field), data),
 441                               (unsigned int)BUF_PAGE_SIZE,
 442                               (unsigned int)is_signed_type(char));
 443
 444        return ret;
 445}
 446
 447/*
 448 * head_page == tail_page && head == tail then buffer is empty.
 449 */
 450struct ring_buffer_per_cpu {
 451        int                             cpu;
 452        atomic_t                        record_disabled;
 453        struct ring_buffer              *buffer;
 454        raw_spinlock_t                  reader_lock;    /* serialize readers */
 455        arch_spinlock_t                 lock;
 456        struct lock_class_key           lock_key;
 457        unsigned int                    nr_pages;
 458        struct list_head                *pages;
 459        struct buffer_page              *head_page;     /* read from head */
 460        struct buffer_page              *tail_page;     /* write to tail */
 461        struct buffer_page              *commit_page;   /* committed pages */
 462        struct buffer_page              *reader_page;
 463        unsigned long                   lost_events;
 464        unsigned long                   last_overrun;
 465        local_t                         entries_bytes;
 466        local_t                         entries;
 467        local_t                         overrun;
 468        local_t                         commit_overrun;
 469        local_t                         dropped_events;
 470        local_t                         committing;
 471        local_t                         commits;
 472        unsigned long                   read;
 473        unsigned long                   read_bytes;
 474        u64                             write_stamp;
 475        u64                             read_stamp;
 476        /* ring buffer pages to update, > 0 to add, < 0 to remove */
 477        int                             nr_pages_to_update;
 478        struct list_head                new_pages; /* new pages to add */
 479        struct work_struct              update_pages_work;
 480        struct completion               update_done;
 481};
 482
 483struct ring_buffer {
 484        unsigned                        flags;
 485        int                             cpus;
 486        atomic_t                        record_disabled;
 487        atomic_t                        resize_disabled;
 488        cpumask_var_t                   cpumask;
 489
 490        struct lock_class_key           *reader_lock_key;
 491
 492        struct mutex                    mutex;
 493
 494        struct ring_buffer_per_cpu      **buffers;
 495
 496#ifdef CONFIG_HOTPLUG_CPU
 497        struct notifier_block           cpu_notify;
 498#endif
 499        u64                             (*clock)(void);
 500};
 501
 502struct ring_buffer_iter {
 503        struct ring_buffer_per_cpu      *cpu_buffer;
 504        unsigned long                   head;
 505        struct buffer_page              *head_page;
 506        struct buffer_page              *cache_reader_page;
 507        unsigned long                   cache_read;
 508        u64                             read_stamp;
 509};
 510
 511/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 512#define RB_WARN_ON(b, cond)                                             \
 513        ({                                                              \
 514                int _____ret = unlikely(cond);                          \
 515                if (_____ret) {                                         \
 516                        if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 517                                struct ring_buffer_per_cpu *__b =       \
 518                                        (void *)b;                      \
 519                                atomic_inc(&__b->buffer->record_disabled); \
 520                        } else                                          \
 521                                atomic_inc(&b->record_disabled);        \
 522                        WARN_ON(1);                                     \
 523                }                                                       \
 524                _____ret;                                               \
 525        })
 526
 527/* Up this if you want to test the TIME_EXTENTS and normalization */
 528#define DEBUG_SHIFT 0
 529
 530static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 531{
 532        /* shift to debug/test normalization and TIME_EXTENTS */
 533        return buffer->clock() << DEBUG_SHIFT;
 534}
 535
 536u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 537{
 538        u64 time;
 539
 540        preempt_disable_notrace();
 541        time = rb_time_stamp(buffer);
 542        preempt_enable_no_resched_notrace();
 543
 544        return time;
 545}
 546EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 547
 548void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 549                                      int cpu, u64 *ts)
 550{
 551        /* Just stupid testing the normalize function and deltas */
 552        *ts >>= DEBUG_SHIFT;
 553}
 554EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 555
 556/*
 557 * Making the ring buffer lockless makes things tricky.
 558 * Although writes only happen on the CPU that they are on,
 559 * and they only need to worry about interrupts. Reads can
 560 * happen on any CPU.
 561 *
 562 * The reader page is always off the ring buffer, but when the
 563 * reader finishes with a page, it needs to swap its page with
 564 * a new one from the buffer. The reader needs to take from
 565 * the head (writes go to the tail). But if a writer is in overwrite
 566 * mode and wraps, it must push the head page forward.
 567 *
 568 * Here lies the problem.
 569 *
 570 * The reader must be careful to replace only the head page, and
 571 * not another one. As described at the top of the file in the
 572 * ASCII art, the reader sets its old page to point to the next
 573 * page after head. It then sets the page after head to point to
 574 * the old reader page. But if the writer moves the head page
 575 * during this operation, the reader could end up with the tail.
 576 *
 577 * We use cmpxchg to help prevent this race. We also do something
 578 * special with the page before head. We set the LSB to 1.
 579 *
 580 * When the writer must push the page forward, it will clear the
 581 * bit that points to the head page, move the head, and then set
 582 * the bit that points to the new head page.
 583 *
 584 * We also don't want an interrupt coming in and moving the head
 585 * page on another writer. Thus we use the second LSB to catch
 586 * that too. Thus:
 587 *
 588 * head->list->prev->next        bit 1          bit 0
 589 *                              -------        -------
 590 * Normal page                     0              0
 591 * Points to head page             0              1
 592 * New head page                   1              0
 593 *
 594 * Note we can not trust the prev pointer of the head page, because:
 595 *
 596 * +----+       +-----+        +-----+
 597 * |    |------>|  T  |---X--->|  N  |
 598 * |    |<------|     |        |     |
 599 * +----+       +-----+        +-----+
 600 *   ^                           ^ |
 601 *   |          +-----+          | |
 602 *   +----------|  R  |----------+ |
 603 *              |     |<-----------+
 604 *              +-----+
 605 *
 606 * Key:  ---X-->  HEAD flag set in pointer
 607 *         T      Tail page
 608 *         R      Reader page
 609 *         N      Next page
 610 *
 611 * (see __rb_reserve_next() to see where this happens)
 612 *
 613 *  What the above shows is that the reader just swapped out
 614 *  the reader page with a page in the buffer, but before it
 615 *  could make the new header point back to the new page added
 616 *  it was preempted by a writer. The writer moved forward onto
 617 *  the new page added by the reader and is about to move forward
 618 *  again.
 619 *
 620 *  You can see, it is legitimate for the previous pointer of
 621 *  the head (or any page) not to point back to itself. But only
 622 *  temporarially.
 623 */
 624
 625#define RB_PAGE_NORMAL          0UL
 626#define RB_PAGE_HEAD            1UL
 627#define RB_PAGE_UPDATE          2UL
 628
 629
 630#define RB_FLAG_MASK            3UL
 631
 632/* PAGE_MOVED is not part of the mask */
 633#define RB_PAGE_MOVED           4UL
 634
 635/*
 636 * rb_list_head - remove any bit
 637 */
 638static struct list_head *rb_list_head(struct list_head *list)
 639{
 640        unsigned long val = (unsigned long)list;
 641
 642        return (struct list_head *)(val & ~RB_FLAG_MASK);
 643}
 644
 645/*
 646 * rb_is_head_page - test if the given page is the head page
 647 *
 648 * Because the reader may move the head_page pointer, we can
 649 * not trust what the head page is (it may be pointing to
 650 * the reader page). But if the next page is a header page,
 651 * its flags will be non zero.
 652 */
 653static inline int
 654rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 655                struct buffer_page *page, struct list_head *list)
 656{
 657        unsigned long val;
 658
 659        val = (unsigned long)list->next;
 660
 661        if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 662                return RB_PAGE_MOVED;
 663
 664        return val & RB_FLAG_MASK;
 665}
 666
 667/*
 668 * rb_is_reader_page
 669 *
 670 * The unique thing about the reader page, is that, if the
 671 * writer is ever on it, the previous pointer never points
 672 * back to the reader page.
 673 */
 674static int rb_is_reader_page(struct buffer_page *page)
 675{
 676        struct list_head *list = page->list.prev;
 677
 678        return rb_list_head(list->next) != &page->list;
 679}
 680
 681/*
 682 * rb_set_list_to_head - set a list_head to be pointing to head.
 683 */
 684static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 685                                struct list_head *list)
 686{
 687        unsigned long *ptr;
 688
 689        ptr = (unsigned long *)&list->next;
 690        *ptr |= RB_PAGE_HEAD;
 691        *ptr &= ~RB_PAGE_UPDATE;
 692}
 693
 694/*
 695 * rb_head_page_activate - sets up head page
 696 */
 697static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 698{
 699        struct buffer_page *head;
 700
 701        head = cpu_buffer->head_page;
 702        if (!head)
 703                return;
 704
 705        /*
 706         * Set the previous list pointer to have the HEAD flag.
 707         */
 708        rb_set_list_to_head(cpu_buffer, head->list.prev);
 709}
 710
 711static void rb_list_head_clear(struct list_head *list)
 712{
 713        unsigned long *ptr = (unsigned long *)&list->next;
 714
 715        *ptr &= ~RB_FLAG_MASK;
 716}
 717
 718/*
 719 * rb_head_page_dactivate - clears head page ptr (for free list)
 720 */
 721static void
 722rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 723{
 724        struct list_head *hd;
 725
 726        /* Go through the whole list and clear any pointers found. */
 727        rb_list_head_clear(cpu_buffer->pages);
 728
 729        list_for_each(hd, cpu_buffer->pages)
 730                rb_list_head_clear(hd);
 731}
 732
 733static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 734                            struct buffer_page *head,
 735                            struct buffer_page *prev,
 736                            int old_flag, int new_flag)
 737{
 738        struct list_head *list;
 739        unsigned long val = (unsigned long)&head->list;
 740        unsigned long ret;
 741
 742        list = &prev->list;
 743
 744        val &= ~RB_FLAG_MASK;
 745
 746        ret = cmpxchg((unsigned long *)&list->next,
 747                      val | old_flag, val | new_flag);
 748
 749        /* check if the reader took the page */
 750        if ((ret & ~RB_FLAG_MASK) != val)
 751                return RB_PAGE_MOVED;
 752
 753        return ret & RB_FLAG_MASK;
 754}
 755
 756static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 757                                   struct buffer_page *head,
 758                                   struct buffer_page *prev,
 759                                   int old_flag)
 760{
 761        return rb_head_page_set(cpu_buffer, head, prev,
 762                                old_flag, RB_PAGE_UPDATE);
 763}
 764
 765static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 766                                 struct buffer_page *head,
 767                                 struct buffer_page *prev,
 768                                 int old_flag)
 769{
 770        return rb_head_page_set(cpu_buffer, head, prev,
 771                                old_flag, RB_PAGE_HEAD);
 772}
 773
 774static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 775                                   struct buffer_page *head,
 776                                   struct buffer_page *prev,
 777                                   int old_flag)
 778{
 779        return rb_head_page_set(cpu_buffer, head, prev,
 780                                old_flag, RB_PAGE_NORMAL);
 781}
 782
 783static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 784                               struct buffer_page **bpage)
 785{
 786        struct list_head *p = rb_list_head((*bpage)->list.next);
 787
 788        *bpage = list_entry(p, struct buffer_page, list);
 789}
 790
 791static struct buffer_page *
 792rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 793{
 794        struct buffer_page *head;
 795        struct buffer_page *page;
 796        struct list_head *list;
 797        int i;
 798
 799        if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 800                return NULL;
 801
 802        /* sanity check */
 803        list = cpu_buffer->pages;
 804        if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 805                return NULL;
 806
 807        page = head = cpu_buffer->head_page;
 808        /*
 809         * It is possible that the writer moves the header behind
 810         * where we started, and we miss in one loop.
 811         * A second loop should grab the header, but we'll do
 812         * three loops just because I'm paranoid.
 813         */
 814        for (i = 0; i < 3; i++) {
 815                do {
 816                        if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 817                                cpu_buffer->head_page = page;
 818                                return page;
 819                        }
 820                        rb_inc_page(cpu_buffer, &page);
 821                } while (page != head);
 822        }
 823
 824        RB_WARN_ON(cpu_buffer, 1);
 825
 826        return NULL;
 827}
 828
 829static int rb_head_page_replace(struct buffer_page *old,
 830                                struct buffer_page *new)
 831{
 832        unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 833        unsigned long val;
 834        unsigned long ret;
 835
 836        val = *ptr & ~RB_FLAG_MASK;
 837        val |= RB_PAGE_HEAD;
 838
 839        ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 840
 841        return ret == val;
 842}
 843
 844/*
 845 * rb_tail_page_update - move the tail page forward
 846 *
 847 * Returns 1 if moved tail page, 0 if someone else did.
 848 */
 849static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
 850                               struct buffer_page *tail_page,
 851                               struct buffer_page *next_page)
 852{
 853        struct buffer_page *old_tail;
 854        unsigned long old_entries;
 855        unsigned long old_write;
 856        int ret = 0;
 857
 858        /*
 859         * The tail page now needs to be moved forward.
 860         *
 861         * We need to reset the tail page, but without messing
 862         * with possible erasing of data brought in by interrupts
 863         * that have moved the tail page and are currently on it.
 864         *
 865         * We add a counter to the write field to denote this.
 866         */
 867        old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
 868        old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
 869
 870        /*
 871         * Just make sure we have seen our old_write and synchronize
 872         * with any interrupts that come in.
 873         */
 874        barrier();
 875
 876        /*
 877         * If the tail page is still the same as what we think
 878         * it is, then it is up to us to update the tail
 879         * pointer.
 880         */
 881        if (tail_page == cpu_buffer->tail_page) {
 882                /* Zero the write counter */
 883                unsigned long val = old_write & ~RB_WRITE_MASK;
 884                unsigned long eval = old_entries & ~RB_WRITE_MASK;
 885
 886                /*
 887                 * This will only succeed if an interrupt did
 888                 * not come in and change it. In which case, we
 889                 * do not want to modify it.
 890                 *
 891                 * We add (void) to let the compiler know that we do not care
 892                 * about the return value of these functions. We use the
 893                 * cmpxchg to only update if an interrupt did not already
 894                 * do it for us. If the cmpxchg fails, we don't care.
 895                 */
 896                (void)local_cmpxchg(&next_page->write, old_write, val);
 897                (void)local_cmpxchg(&next_page->entries, old_entries, eval);
 898
 899                /*
 900                 * No need to worry about races with clearing out the commit.
 901                 * it only can increment when a commit takes place. But that
 902                 * only happens in the outer most nested commit.
 903                 */
 904                local_set(&next_page->page->commit, 0);
 905
 906                old_tail = cmpxchg(&cpu_buffer->tail_page,
 907                                   tail_page, next_page);
 908
 909                if (old_tail == tail_page)
 910                        ret = 1;
 911        }
 912
 913        return ret;
 914}
 915
 916static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
 917                          struct buffer_page *bpage)
 918{
 919        unsigned long val = (unsigned long)bpage;
 920
 921        if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
 922                return 1;
 923
 924        return 0;
 925}
 926
 927/**
 928 * rb_check_list - make sure a pointer to a list has the last bits zero
 929 */
 930static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
 931                         struct list_head *list)
 932{
 933        if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
 934                return 1;
 935        if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
 936                return 1;
 937        return 0;
 938}
 939
 940/**
 941 * check_pages - integrity check of buffer pages
 942 * @cpu_buffer: CPU buffer with pages to test
 943 *
 944 * As a safety measure we check to make sure the data pages have not
 945 * been corrupted.
 946 */
 947static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 948{
 949        struct list_head *head = cpu_buffer->pages;
 950        struct buffer_page *bpage, *tmp;
 951
 952        /* Reset the head page if it exists */
 953        if (cpu_buffer->head_page)
 954                rb_set_head_page(cpu_buffer);
 955
 956        rb_head_page_deactivate(cpu_buffer);
 957
 958        if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
 959                return -1;
 960        if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
 961                return -1;
 962
 963        if (rb_check_list(cpu_buffer, head))
 964                return -1;
 965
 966        list_for_each_entry_safe(bpage, tmp, head, list) {
 967                if (RB_WARN_ON(cpu_buffer,
 968                               bpage->list.next->prev != &bpage->list))
 969                        return -1;
 970                if (RB_WARN_ON(cpu_buffer,
 971                               bpage->list.prev->next != &bpage->list))
 972                        return -1;
 973                if (rb_check_list(cpu_buffer, &bpage->list))
 974                        return -1;
 975        }
 976
 977        rb_head_page_activate(cpu_buffer);
 978
 979        return 0;
 980}
 981
 982static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
 983{
 984        int i;
 985        struct buffer_page *bpage, *tmp;
 986
 987        for (i = 0; i < nr_pages; i++) {
 988                struct page *page;
 989                /*
 990                 * __GFP_NORETRY flag makes sure that the allocation fails
 991                 * gracefully without invoking oom-killer and the system is
 992                 * not destabilized.
 993                 */
 994                bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
 995                                    GFP_KERNEL | __GFP_NORETRY,
 996                                    cpu_to_node(cpu));
 997                if (!bpage)
 998                        goto free_pages;
 999
1000                list_add(&bpage->list, pages);
1001
1002                page = alloc_pages_node(cpu_to_node(cpu),
1003                                        GFP_KERNEL | __GFP_NORETRY, 0);
1004                if (!page)
1005                        goto free_pages;
1006                bpage->page = page_address(page);
1007                rb_init_page(bpage->page);
1008        }
1009
1010        return 0;
1011
1012free_pages:
1013        list_for_each_entry_safe(bpage, tmp, pages, list) {
1014                list_del_init(&bpage->list);
1015                free_buffer_page(bpage);
1016        }
1017
1018        return -ENOMEM;
1019}
1020
1021static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1022                             unsigned nr_pages)
1023{
1024        LIST_HEAD(pages);
1025
1026        WARN_ON(!nr_pages);
1027
1028        if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1029                return -ENOMEM;
1030
1031        /*
1032         * The ring buffer page list is a circular list that does not
1033         * start and end with a list head. All page list items point to
1034         * other pages.
1035         */
1036        cpu_buffer->pages = pages.next;
1037        list_del(&pages);
1038
1039        cpu_buffer->nr_pages = nr_pages;
1040
1041        rb_check_pages(cpu_buffer);
1042
1043        return 0;
1044}
1045
1046static struct ring_buffer_per_cpu *
1047rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1048{
1049        struct ring_buffer_per_cpu *cpu_buffer;
1050        struct buffer_page *bpage;
1051        struct page *page;
1052        int ret;
1053
1054        cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1055                                  GFP_KERNEL, cpu_to_node(cpu));
1056        if (!cpu_buffer)
1057                return NULL;
1058
1059        cpu_buffer->cpu = cpu;
1060        cpu_buffer->buffer = buffer;
1061        raw_spin_lock_init(&cpu_buffer->reader_lock);
1062        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1063        cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1064        INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1065        init_completion(&cpu_buffer->update_done);
1066
1067        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1068                            GFP_KERNEL, cpu_to_node(cpu));
1069        if (!bpage)
1070                goto fail_free_buffer;
1071
1072        rb_check_bpage(cpu_buffer, bpage);
1073
1074        cpu_buffer->reader_page = bpage;
1075        page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1076        if (!page)
1077                goto fail_free_reader;
1078        bpage->page = page_address(page);
1079        rb_init_page(bpage->page);
1080
1081        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1082        INIT_LIST_HEAD(&cpu_buffer->new_pages);
1083
1084        ret = rb_allocate_pages(cpu_buffer, nr_pages);
1085        if (ret < 0)
1086                goto fail_free_reader;
1087
1088        cpu_buffer->head_page
1089                = list_entry(cpu_buffer->pages, struct buffer_page, list);
1090        cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1091
1092        rb_head_page_activate(cpu_buffer);
1093
1094        return cpu_buffer;
1095
1096 fail_free_reader:
1097        free_buffer_page(cpu_buffer->reader_page);
1098
1099 fail_free_buffer:
1100        kfree(cpu_buffer);
1101        return NULL;
1102}
1103
1104static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1105{
1106        struct list_head *head = cpu_buffer->pages;
1107        struct buffer_page *bpage, *tmp;
1108
1109        free_buffer_page(cpu_buffer->reader_page);
1110
1111        rb_head_page_deactivate(cpu_buffer);
1112
1113        if (head) {
1114                list_for_each_entry_safe(bpage, tmp, head, list) {
1115                        list_del_init(&bpage->list);
1116                        free_buffer_page(bpage);
1117                }
1118                bpage = list_entry(head, struct buffer_page, list);
1119                free_buffer_page(bpage);
1120        }
1121
1122        kfree(cpu_buffer);
1123}
1124
1125#ifdef CONFIG_HOTPLUG_CPU
1126static int rb_cpu_notify(struct notifier_block *self,
1127                         unsigned long action, void *hcpu);
1128#endif
1129
1130/**
1131 * ring_buffer_alloc - allocate a new ring_buffer
1132 * @size: the size in bytes per cpu that is needed.
1133 * @flags: attributes to set for the ring buffer.
1134 *
1135 * Currently the only flag that is available is the RB_FL_OVERWRITE
1136 * flag. This flag means that the buffer will overwrite old data
1137 * when the buffer wraps. If this flag is not set, the buffer will
1138 * drop data when the tail hits the head.
1139 */
1140struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1141                                        struct lock_class_key *key)
1142{
1143        struct ring_buffer *buffer;
1144        int bsize;
1145        int cpu, nr_pages;
1146
1147        /* keep it in its own cache line */
1148        buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1149                         GFP_KERNEL);
1150        if (!buffer)
1151                return NULL;
1152
1153        if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1154                goto fail_free_buffer;
1155
1156        nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1157        buffer->flags = flags;
1158        buffer->clock = trace_clock_local;
1159        buffer->reader_lock_key = key;
1160
1161        /* need at least two pages */
1162        if (nr_pages < 2)
1163                nr_pages = 2;
1164
1165        /*
1166         * In case of non-hotplug cpu, if the ring-buffer is allocated
1167         * in early initcall, it will not be notified of secondary cpus.
1168         * In that off case, we need to allocate for all possible cpus.
1169         */
1170#ifdef CONFIG_HOTPLUG_CPU
1171        get_online_cpus();
1172        cpumask_copy(buffer->cpumask, cpu_online_mask);
1173#else
1174        cpumask_copy(buffer->cpumask, cpu_possible_mask);
1175#endif
1176        buffer->cpus = nr_cpu_ids;
1177
1178        bsize = sizeof(void *) * nr_cpu_ids;
1179        buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1180                                  GFP_KERNEL);
1181        if (!buffer->buffers)
1182                goto fail_free_cpumask;
1183
1184        for_each_buffer_cpu(buffer, cpu) {
1185                buffer->buffers[cpu] =
1186                        rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1187                if (!buffer->buffers[cpu])
1188                        goto fail_free_buffers;
1189        }
1190
1191#ifdef CONFIG_HOTPLUG_CPU
1192        buffer->cpu_notify.notifier_call = rb_cpu_notify;
1193        buffer->cpu_notify.priority = 0;
1194        register_cpu_notifier(&buffer->cpu_notify);
1195#endif
1196
1197        put_online_cpus();
1198        mutex_init(&buffer->mutex);
1199
1200        return buffer;
1201
1202 fail_free_buffers:
1203        for_each_buffer_cpu(buffer, cpu) {
1204                if (buffer->buffers[cpu])
1205                        rb_free_cpu_buffer(buffer->buffers[cpu]);
1206        }
1207        kfree(buffer->buffers);
1208
1209 fail_free_cpumask:
1210        free_cpumask_var(buffer->cpumask);
1211        put_online_cpus();
1212
1213 fail_free_buffer:
1214        kfree(buffer);
1215        return NULL;
1216}
1217EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1218
1219/**
1220 * ring_buffer_free - free a ring buffer.
1221 * @buffer: the buffer to free.
1222 */
1223void
1224ring_buffer_free(struct ring_buffer *buffer)
1225{
1226        int cpu;
1227
1228        get_online_cpus();
1229
1230#ifdef CONFIG_HOTPLUG_CPU
1231        unregister_cpu_notifier(&buffer->cpu_notify);
1232#endif
1233
1234        for_each_buffer_cpu(buffer, cpu)
1235                rb_free_cpu_buffer(buffer->buffers[cpu]);
1236
1237        put_online_cpus();
1238
1239        kfree(buffer->buffers);
1240        free_cpumask_var(buffer->cpumask);
1241
1242        kfree(buffer);
1243}
1244EXPORT_SYMBOL_GPL(ring_buffer_free);
1245
1246void ring_buffer_set_clock(struct ring_buffer *buffer,
1247                           u64 (*clock)(void))
1248{
1249        buffer->clock = clock;
1250}
1251
1252static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1253
1254static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1255{
1256        return local_read(&bpage->entries) & RB_WRITE_MASK;
1257}
1258
1259static inline unsigned long rb_page_write(struct buffer_page *bpage)
1260{
1261        return local_read(&bpage->write) & RB_WRITE_MASK;
1262}
1263
1264static int
1265rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1266{
1267        struct list_head *tail_page, *to_remove, *next_page;
1268        struct buffer_page *to_remove_page, *tmp_iter_page;
1269        struct buffer_page *last_page, *first_page;
1270        unsigned int nr_removed;
1271        unsigned long head_bit;
1272        int page_entries;
1273
1274        head_bit = 0;
1275
1276        raw_spin_lock_irq(&cpu_buffer->reader_lock);
1277        atomic_inc(&cpu_buffer->record_disabled);
1278        /*
1279         * We don't race with the readers since we have acquired the reader
1280         * lock. We also don't race with writers after disabling recording.
1281         * This makes it easy to figure out the first and the last page to be
1282         * removed from the list. We unlink all the pages in between including
1283         * the first and last pages. This is done in a busy loop so that we
1284         * lose the least number of traces.
1285         * The pages are freed after we restart recording and unlock readers.
1286         */
1287        tail_page = &cpu_buffer->tail_page->list;
1288
1289        /*
1290         * tail page might be on reader page, we remove the next page
1291         * from the ring buffer
1292         */
1293        if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1294                tail_page = rb_list_head(tail_page->next);
1295        to_remove = tail_page;
1296
1297        /* start of pages to remove */
1298        first_page = list_entry(rb_list_head(to_remove->next),
1299                                struct buffer_page, list);
1300
1301        for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1302                to_remove = rb_list_head(to_remove)->next;
1303                head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1304        }
1305
1306        next_page = rb_list_head(to_remove)->next;
1307
1308        /*
1309         * Now we remove all pages between tail_page and next_page.
1310         * Make sure that we have head_bit value preserved for the
1311         * next page
1312         */
1313        tail_page->next = (struct list_head *)((unsigned long)next_page |
1314                                                head_bit);
1315        next_page = rb_list_head(next_page);
1316        next_page->prev = tail_page;
1317
1318        /* make sure pages points to a valid page in the ring buffer */
1319        cpu_buffer->pages = next_page;
1320
1321        /* update head page */
1322        if (head_bit)
1323                cpu_buffer->head_page = list_entry(next_page,
1324                                                struct buffer_page, list);
1325
1326        /*
1327         * change read pointer to make sure any read iterators reset
1328         * themselves
1329         */
1330        cpu_buffer->read = 0;
1331
1332        /* pages are removed, resume tracing and then free the pages */
1333        atomic_dec(&cpu_buffer->record_disabled);
1334        raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1335
1336        RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1337
1338        /* last buffer page to remove */
1339        last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1340                                list);
1341        tmp_iter_page = first_page;
1342
1343        do {
1344                to_remove_page = tmp_iter_page;
1345                rb_inc_page(cpu_buffer, &tmp_iter_page);
1346
1347                /* update the counters */
1348                page_entries = rb_page_entries(to_remove_page);
1349                if (page_entries) {
1350                        /*
1351                         * If something was added to this page, it was full
1352                         * since it is not the tail page. So we deduct the
1353                         * bytes consumed in ring buffer from here.
1354                         * Increment overrun to account for the lost events.
1355                         */
1356                        local_add(page_entries, &cpu_buffer->overrun);
1357                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1358                }
1359
1360                /*
1361                 * We have already removed references to this list item, just
1362                 * free up the buffer_page and its page
1363                 */
1364                free_buffer_page(to_remove_page);
1365                nr_removed--;
1366
1367        } while (to_remove_page != last_page);
1368
1369        RB_WARN_ON(cpu_buffer, nr_removed);
1370
1371        return nr_removed == 0;
1372}
1373
1374static int
1375rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1376{
1377        struct list_head *pages = &cpu_buffer->new_pages;
1378        int retries, success;
1379
1380        raw_spin_lock_irq(&cpu_buffer->reader_lock);
1381        /*
1382         * We are holding the reader lock, so the reader page won't be swapped
1383         * in the ring buffer. Now we are racing with the writer trying to
1384         * move head page and the tail page.
1385         * We are going to adapt the reader page update process where:
1386         * 1. We first splice the start and end of list of new pages between
1387         *    the head page and its previous page.
1388         * 2. We cmpxchg the prev_page->next to point from head page to the
1389         *    start of new pages list.
1390         * 3. Finally, we update the head->prev to the end of new list.
1391         *
1392         * We will try this process 10 times, to make sure that we don't keep
1393         * spinning.
1394         */
1395        retries = 10;
1396        success = 0;
1397        while (retries--) {
1398                struct list_head *head_page, *prev_page, *r;
1399                struct list_head *last_page, *first_page;
1400                struct list_head *head_page_with_bit;
1401
1402                head_page = &rb_set_head_page(cpu_buffer)->list;
1403                if (!head_page)
1404                        break;
1405                prev_page = head_page->prev;
1406
1407                first_page = pages->next;
1408                last_page  = pages->prev;
1409
1410                head_page_with_bit = (struct list_head *)
1411                                     ((unsigned long)head_page | RB_PAGE_HEAD);
1412
1413                last_page->next = head_page_with_bit;
1414                first_page->prev = prev_page;
1415
1416                r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1417
1418                if (r == head_page_with_bit) {
1419                        /*
1420                         * yay, we replaced the page pointer to our new list,
1421                         * now, we just have to update to head page's prev
1422                         * pointer to point to end of list
1423                         */
1424                        head_page->prev = last_page;
1425                        success = 1;
1426                        break;
1427                }
1428        }
1429
1430        if (success)
1431                INIT_LIST_HEAD(pages);
1432        /*
1433         * If we weren't successful in adding in new pages, warn and stop
1434         * tracing
1435         */
1436        RB_WARN_ON(cpu_buffer, !success);
1437        raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1438
1439        /* free pages if they weren't inserted */
1440        if (!success) {
1441                struct buffer_page *bpage, *tmp;
1442                list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1443                                         list) {
1444                        list_del_init(&bpage->list);
1445                        free_buffer_page(bpage);
1446                }
1447        }
1448        return success;
1449}
1450
1451static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1452{
1453        int success;
1454
1455        if (cpu_buffer->nr_pages_to_update > 0)
1456                success = rb_insert_pages(cpu_buffer);
1457        else
1458                success = rb_remove_pages(cpu_buffer,
1459                                        -cpu_buffer->nr_pages_to_update);
1460
1461        if (success)
1462                cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1463}
1464
1465static void update_pages_handler(struct work_struct *work)
1466{
1467        struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1468                        struct ring_buffer_per_cpu, update_pages_work);
1469        rb_update_pages(cpu_buffer);
1470        complete(&cpu_buffer->update_done);
1471}
1472
1473/**
1474 * ring_buffer_resize - resize the ring buffer
1475 * @buffer: the buffer to resize.
1476 * @size: the new size.
1477 *
1478 * Minimum size is 2 * BUF_PAGE_SIZE.
1479 *
1480 * Returns 0 on success and < 0 on failure.
1481 */
1482int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1483                        int cpu_id)
1484{
1485        struct ring_buffer_per_cpu *cpu_buffer;
1486        unsigned nr_pages;
1487        int cpu, err = 0;
1488
1489        /*
1490         * Always succeed at resizing a non-existent buffer:
1491         */
1492        if (!buffer)
1493                return size;
1494
1495        /* Make sure the requested buffer exists */
1496        if (cpu_id != RING_BUFFER_ALL_CPUS &&
1497            !cpumask_test_cpu(cpu_id, buffer->cpumask))
1498                return size;
1499
1500        size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1501        size *= BUF_PAGE_SIZE;
1502
1503        /* we need a minimum of two pages */
1504        if (size < BUF_PAGE_SIZE * 2)
1505                size = BUF_PAGE_SIZE * 2;
1506
1507        nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1508
1509        /*
1510         * Don't succeed if resizing is disabled, as a reader might be
1511         * manipulating the ring buffer and is expecting a sane state while
1512         * this is true.
1513         */
1514        if (atomic_read(&buffer->resize_disabled))
1515                return -EBUSY;
1516
1517        /* prevent another thread from changing buffer sizes */
1518        mutex_lock(&buffer->mutex);
1519
1520        if (cpu_id == RING_BUFFER_ALL_CPUS) {
1521                /* calculate the pages to update */
1522                for_each_buffer_cpu(buffer, cpu) {
1523                        cpu_buffer = buffer->buffers[cpu];
1524
1525                        cpu_buffer->nr_pages_to_update = nr_pages -
1526                                                        cpu_buffer->nr_pages;
1527                        /*
1528                         * nothing more to do for removing pages or no update
1529                         */
1530                        if (cpu_buffer->nr_pages_to_update <= 0)
1531                                continue;
1532                        /*
1533                         * to add pages, make sure all new pages can be
1534                         * allocated without receiving ENOMEM
1535                         */
1536                        INIT_LIST_HEAD(&cpu_buffer->new_pages);
1537                        if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1538                                                &cpu_buffer->new_pages, cpu)) {
1539                                /* not enough memory for new pages */
1540                                err = -ENOMEM;
1541                                goto out_err;
1542                        }
1543                }
1544
1545                get_online_cpus();
1546                /*
1547                 * Fire off all the required work handlers
1548                 * We can't schedule on offline CPUs, but it's not necessary
1549                 * since we can change their buffer sizes without any race.
1550                 */
1551                for_each_buffer_cpu(buffer, cpu) {
1552                        cpu_buffer = buffer->buffers[cpu];
1553                        if (!cpu_buffer->nr_pages_to_update)
1554                                continue;
1555
1556                        if (cpu_online(cpu))
1557                                schedule_work_on(cpu,
1558                                                &cpu_buffer->update_pages_work);
1559                        else
1560                                rb_update_pages(cpu_buffer);
1561                }
1562
1563                /* wait for all the updates to complete */
1564                for_each_buffer_cpu(buffer, cpu) {
1565                        cpu_buffer = buffer->buffers[cpu];
1566                        if (!cpu_buffer->nr_pages_to_update)
1567                                continue;
1568
1569                        if (cpu_online(cpu))
1570                                wait_for_completion(&cpu_buffer->update_done);
1571                        cpu_buffer->nr_pages_to_update = 0;
1572                }
1573
1574                put_online_cpus();
1575        } else {
1576                /* Make sure this CPU has been intitialized */
1577                if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1578                        goto out;
1579
1580                cpu_buffer = buffer->buffers[cpu_id];
1581
1582                if (nr_pages == cpu_buffer->nr_pages)
1583                        goto out;
1584
1585                cpu_buffer->nr_pages_to_update = nr_pages -
1586                                                cpu_buffer->nr_pages;
1587
1588                INIT_LIST_HEAD(&cpu_buffer->new_pages);
1589                if (cpu_buffer->nr_pages_to_update > 0 &&
1590                        __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1591                                            &cpu_buffer->new_pages, cpu_id)) {
1592                        err = -ENOMEM;
1593                        goto out_err;
1594                }
1595
1596                get_online_cpus();
1597
1598                if (cpu_online(cpu_id)) {
1599                        schedule_work_on(cpu_id,
1600                                         &cpu_buffer->update_pages_work);
1601                        wait_for_completion(&cpu_buffer->update_done);
1602                } else
1603                        rb_update_pages(cpu_buffer);
1604
1605                cpu_buffer->nr_pages_to_update = 0;
1606                put_online_cpus();
1607        }
1608
1609 out:
1610        /*
1611         * The ring buffer resize can happen with the ring buffer
1612         * enabled, so that the update disturbs the tracing as little
1613         * as possible. But if the buffer is disabled, we do not need
1614         * to worry about that, and we can take the time to verify
1615         * that the buffer is not corrupt.
1616         */
1617        if (atomic_read(&buffer->record_disabled)) {
1618                atomic_inc(&buffer->record_disabled);
1619                /*
1620                 * Even though the buffer was disabled, we must make sure
1621                 * that it is truly disabled before calling rb_check_pages.
1622                 * There could have been a race between checking
1623                 * record_disable and incrementing it.
1624                 */
1625                synchronize_sched();
1626                for_each_buffer_cpu(buffer, cpu) {
1627                        cpu_buffer = buffer->buffers[cpu];
1628                        rb_check_pages(cpu_buffer);
1629                }
1630                atomic_dec(&buffer->record_disabled);
1631        }
1632
1633        mutex_unlock(&buffer->mutex);
1634        return size;
1635
1636 out_err:
1637        for_each_buffer_cpu(buffer, cpu) {
1638                struct buffer_page *bpage, *tmp;
1639
1640                cpu_buffer = buffer->buffers[cpu];
1641                cpu_buffer->nr_pages_to_update = 0;
1642
1643                if (list_empty(&cpu_buffer->new_pages))
1644                        continue;
1645
1646                list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1647                                        list) {
1648                        list_del_init(&bpage->list);
1649                        free_buffer_page(bpage);
1650                }
1651        }
1652        mutex_unlock(&buffer->mutex);
1653        return err;
1654}
1655EXPORT_SYMBOL_GPL(ring_buffer_resize);
1656
1657void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1658{
1659        mutex_lock(&buffer->mutex);
1660        if (val)
1661                buffer->flags |= RB_FL_OVERWRITE;
1662        else
1663                buffer->flags &= ~RB_FL_OVERWRITE;
1664        mutex_unlock(&buffer->mutex);
1665}
1666EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1667
1668static inline void *
1669__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1670{
1671        return bpage->data + index;
1672}
1673
1674static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1675{
1676        return bpage->page->data + index;
1677}
1678
1679static inline struct ring_buffer_event *
1680rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1681{
1682        return __rb_page_index(cpu_buffer->reader_page,
1683                               cpu_buffer->reader_page->read);
1684}
1685
1686static inline struct ring_buffer_event *
1687rb_iter_head_event(struct ring_buffer_iter *iter)
1688{
1689        return __rb_page_index(iter->head_page, iter->head);
1690}
1691
1692static inline unsigned rb_page_commit(struct buffer_page *bpage)
1693{
1694        return local_read(&bpage->page->commit);
1695}
1696
1697/* Size is determined by what has been committed */
1698static inline unsigned rb_page_size(struct buffer_page *bpage)
1699{
1700        return rb_page_commit(bpage);
1701}
1702
1703static inline unsigned
1704rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1705{
1706        return rb_page_commit(cpu_buffer->commit_page);
1707}
1708
1709static inline unsigned
1710rb_event_index(struct ring_buffer_event *event)
1711{
1712        unsigned long addr = (unsigned long)event;
1713
1714        return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1715}
1716
1717static inline int
1718rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1719                   struct ring_buffer_event *event)
1720{
1721        unsigned long addr = (unsigned long)event;
1722        unsigned long index;
1723
1724        index = rb_event_index(event);
1725        addr &= PAGE_MASK;
1726
1727        return cpu_buffer->commit_page->page == (void *)addr &&
1728                rb_commit_index(cpu_buffer) == index;
1729}
1730
1731static void
1732rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1733{
1734        unsigned long max_count;
1735
1736        /*
1737         * We only race with interrupts and NMIs on this CPU.
1738         * If we own the commit event, then we can commit
1739         * all others that interrupted us, since the interruptions
1740         * are in stack format (they finish before they come
1741         * back to us). This allows us to do a simple loop to
1742         * assign the commit to the tail.
1743         */
1744 again:
1745        max_count = cpu_buffer->nr_pages * 100;
1746
1747        while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1748                if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1749                        return;
1750                if (RB_WARN_ON(cpu_buffer,
1751                               rb_is_reader_page(cpu_buffer->tail_page)))
1752                        return;
1753                local_set(&cpu_buffer->commit_page->page->commit,
1754                          rb_page_write(cpu_buffer->commit_page));
1755                rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1756                cpu_buffer->write_stamp =
1757                        cpu_buffer->commit_page->page->time_stamp;
1758                /* add barrier to keep gcc from optimizing too much */
1759                barrier();
1760        }
1761        while (rb_commit_index(cpu_buffer) !=
1762               rb_page_write(cpu_buffer->commit_page)) {
1763
1764                local_set(&cpu_buffer->commit_page->page->commit,
1765                          rb_page_write(cpu_buffer->commit_page));
1766                RB_WARN_ON(cpu_buffer,
1767                           local_read(&cpu_buffer->commit_page->page->commit) &
1768                           ~RB_WRITE_MASK);
1769                barrier();
1770        }
1771
1772        /* again, keep gcc from optimizing */
1773        barrier();
1774
1775        /*
1776         * If an interrupt came in just after the first while loop
1777         * and pushed the tail page forward, we will be left with
1778         * a dangling commit that will never go forward.
1779         */
1780        if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1781                goto again;
1782}
1783
1784static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1785{
1786        cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1787        cpu_buffer->reader_page->read = 0;
1788}
1789
1790static void rb_inc_iter(struct ring_buffer_iter *iter)
1791{
1792        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1793
1794        /*
1795         * The iterator could be on the reader page (it starts there).
1796         * But the head could have moved, since the reader was
1797         * found. Check for this case and assign the iterator
1798         * to the head page instead of next.
1799         */
1800        if (iter->head_page == cpu_buffer->reader_page)
1801                iter->head_page = rb_set_head_page(cpu_buffer);
1802        else
1803                rb_inc_page(cpu_buffer, &iter->head_page);
1804
1805        iter->read_stamp = iter->head_page->page->time_stamp;
1806        iter->head = 0;
1807}
1808
1809/* Slow path, do not inline */
1810static noinline struct ring_buffer_event *
1811rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1812{
1813        event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1814
1815        /* Not the first event on the page? */
1816        if (rb_event_index(event)) {
1817                event->time_delta = delta & TS_MASK;
1818                event->array[0] = delta >> TS_SHIFT;
1819        } else {
1820                /* nope, just zero it */
1821                event->time_delta = 0;
1822                event->array[0] = 0;
1823        }
1824
1825        return skip_time_extend(event);
1826}
1827
1828/**
1829 * rb_update_event - update event type and data
1830 * @event: the even to update
1831 * @type: the type of event
1832 * @length: the size of the event field in the ring buffer
1833 *
1834 * Update the type and data fields of the event. The length
1835 * is the actual size that is written to the ring buffer,
1836 * and with this, we can determine what to place into the
1837 * data field.
1838 */
1839static void
1840rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1841                struct ring_buffer_event *event, unsigned length,
1842                int add_timestamp, u64 delta)
1843{
1844        /* Only a commit updates the timestamp */
1845        if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1846                delta = 0;
1847
1848        /*
1849         * If we need to add a timestamp, then we
1850         * add it to the start of the resevered space.
1851         */
1852        if (unlikely(add_timestamp)) {
1853                event = rb_add_time_stamp(event, delta);
1854                length -= RB_LEN_TIME_EXTEND;
1855                delta = 0;
1856        }
1857
1858        event->time_delta = delta;
1859        length -= RB_EVNT_HDR_SIZE;
1860        if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1861                event->type_len = 0;
1862                event->array[0] = length;
1863        } else
1864                event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1865}
1866
1867/*
1868 * rb_handle_head_page - writer hit the head page
1869 *
1870 * Returns: +1 to retry page
1871 *           0 to continue
1872 *          -1 on error
1873 */
1874static int
1875rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1876                    struct buffer_page *tail_page,
1877                    struct buffer_page *next_page)
1878{
1879        struct buffer_page *new_head;
1880        int entries;
1881        int type;
1882        int ret;
1883
1884        entries = rb_page_entries(next_page);
1885
1886        /*
1887         * The hard part is here. We need to move the head
1888         * forward, and protect against both readers on
1889         * other CPUs and writers coming in via interrupts.
1890         */
1891        type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1892                                       RB_PAGE_HEAD);
1893
1894        /*
1895         * type can be one of four:
1896         *  NORMAL - an interrupt already moved it for us
1897         *  HEAD   - we are the first to get here.
1898         *  UPDATE - we are the interrupt interrupting
1899         *           a current move.
1900         *  MOVED  - a reader on another CPU moved the next
1901         *           pointer to its reader page. Give up
1902         *           and try again.
1903         */
1904
1905        switch (type) {
1906        case RB_PAGE_HEAD:
1907                /*
1908                 * We changed the head to UPDATE, thus
1909                 * it is our responsibility to update
1910                 * the counters.
1911                 */
1912                local_add(entries, &cpu_buffer->overrun);
1913                local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1914
1915                /*
1916                 * The entries will be zeroed out when we move the
1917                 * tail page.
1918                 */
1919
1920                /* still more to do */
1921                break;
1922
1923        case RB_PAGE_UPDATE:
1924                /*
1925                 * This is an interrupt that interrupt the
1926                 * previous update. Still more to do.
1927                 */
1928                break;
1929        case RB_PAGE_NORMAL:
1930                /*
1931                 * An interrupt came in before the update
1932                 * and processed this for us.
1933                 * Nothing left to do.
1934                 */
1935                return 1;
1936        case RB_PAGE_MOVED:
1937                /*
1938                 * The reader is on another CPU and just did
1939                 * a swap with our next_page.
1940                 * Try again.
1941                 */
1942                return 1;
1943        default:
1944                RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1945                return -1;
1946        }
1947
1948        /*
1949         * Now that we are here, the old head pointer is
1950         * set to UPDATE. This will keep the reader from
1951         * swapping the head page with the reader page.
1952         * The reader (on another CPU) will spin till
1953         * we are finished.
1954         *
1955         * We just need to protect against interrupts
1956         * doing the job. We will set the next pointer
1957         * to HEAD. After that, we set the old pointer
1958         * to NORMAL, but only if it was HEAD before.
1959         * otherwise we are an interrupt, and only
1960         * want the outer most commit to reset it.
1961         */
1962        new_head = next_page;
1963        rb_inc_page(cpu_buffer, &new_head);
1964
1965        ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1966                                    RB_PAGE_NORMAL);
1967
1968        /*
1969         * Valid returns are:
1970         *  HEAD   - an interrupt came in and already set it.
1971         *  NORMAL - One of two things:
1972         *            1) We really set it.
1973         *            2) A bunch of interrupts came in and moved
1974         *               the page forward again.
1975         */
1976        switch (ret) {
1977        case RB_PAGE_HEAD:
1978        case RB_PAGE_NORMAL:
1979                /* OK */
1980                break;
1981        default:
1982                RB_WARN_ON(cpu_buffer, 1);
1983                return -1;
1984        }
1985
1986        /*
1987         * It is possible that an interrupt came in,
1988         * set the head up, then more interrupts came in
1989         * and moved it again. When we get back here,
1990         * the page would have been set to NORMAL but we
1991         * just set it back to HEAD.
1992         *
1993         * How do you detect this? Well, if that happened
1994         * the tail page would have moved.
1995         */
1996        if (ret == RB_PAGE_NORMAL) {
1997                /*
1998                 * If the tail had moved passed next, then we need
1999                 * to reset the pointer.
2000                 */
2001                if (cpu_buffer->tail_page != tail_page &&
2002                    cpu_buffer->tail_page != next_page)
2003                        rb_head_page_set_normal(cpu_buffer, new_head,
2004                                                next_page,
2005                                                RB_PAGE_HEAD);
2006        }
2007
2008        /*
2009         * If this was the outer most commit (the one that
2010         * changed the original pointer from HEAD to UPDATE),
2011         * then it is up to us to reset it to NORMAL.
2012         */
2013        if (type == RB_PAGE_HEAD) {
2014                ret = rb_head_page_set_normal(cpu_buffer, next_page,
2015                                              tail_page,
2016                                              RB_PAGE_UPDATE);
2017                if (RB_WARN_ON(cpu_buffer,
2018                               ret != RB_PAGE_UPDATE))
2019                        return -1;
2020        }
2021
2022        return 0;
2023}
2024
2025static unsigned rb_calculate_event_length(unsigned length)
2026{
2027        struct ring_buffer_event event; /* Used only for sizeof array */
2028
2029        /* zero length can cause confusions */
2030        if (!length)
2031                length = 1;
2032
2033        if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2034                length += sizeof(event.array[0]);
2035
2036        length += RB_EVNT_HDR_SIZE;
2037        length = ALIGN(length, RB_ARCH_ALIGNMENT);
2038
2039        return length;
2040}
2041
2042static inline void
2043rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2044              struct buffer_page *tail_page,
2045              unsigned long tail, unsigned long length)
2046{
2047        struct ring_buffer_event *event;
2048
2049        /*
2050         * Only the event that crossed the page boundary
2051         * must fill the old tail_page with padding.
2052         */
2053        if (tail >= BUF_PAGE_SIZE) {
2054                /*
2055                 * If the page was filled, then we still need
2056                 * to update the real_end. Reset it to zero
2057                 * and the reader will ignore it.
2058                 */
2059                if (tail == BUF_PAGE_SIZE)
2060                        tail_page->real_end = 0;
2061
2062                local_sub(length, &tail_page->write);
2063                return;
2064        }
2065
2066        event = __rb_page_index(tail_page, tail);
2067        kmemcheck_annotate_bitfield(event, bitfield);
2068
2069        /* account for padding bytes */
2070        local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2071
2072        /*
2073         * Save the original length to the meta data.
2074         * This will be used by the reader to add lost event
2075         * counter.
2076         */
2077        tail_page->real_end = tail;
2078
2079        /*
2080         * If this event is bigger than the minimum size, then
2081         * we need to be careful that we don't subtract the
2082         * write counter enough to allow another writer to slip
2083         * in on this page.
2084         * We put in a discarded commit instead, to make sure
2085         * that this space is not used again.
2086         *
2087         * If we are less than the minimum size, we don't need to
2088         * worry about it.
2089         */
2090        if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2091                /* No room for any events */
2092
2093                /* Mark the rest of the page with padding */
2094                rb_event_set_padding(event);
2095
2096                /* Set the write back to the previous setting */
2097                local_sub(length, &tail_page->write);
2098                return;
2099        }
2100
2101        /* Put in a discarded event */
2102        event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2103        event->type_len = RINGBUF_TYPE_PADDING;
2104        /* time delta must be non zero */
2105        event->time_delta = 1;
2106
2107        /* Set write to end of buffer */
2108        length = (tail + length) - BUF_PAGE_SIZE;
2109        local_sub(length, &tail_page->write);
2110}
2111
2112/*
2113 * This is the slow path, force gcc not to inline it.
2114 */
2115static noinline struct ring_buffer_event *
2116rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2117             unsigned long length, unsigned long tail,
2118             struct buffer_page *tail_page, u64 ts)
2119{
2120        struct buffer_page *commit_page = cpu_buffer->commit_page;
2121        struct ring_buffer *buffer = cpu_buffer->buffer;
2122        struct buffer_page *next_page;
2123        int ret;
2124
2125        next_page = tail_page;
2126
2127        rb_inc_page(cpu_buffer, &next_page);
2128
2129        /*
2130         * If for some reason, we had an interrupt storm that made
2131         * it all the way around the buffer, bail, and warn
2132         * about it.
2133         */
2134        if (unlikely(next_page == commit_page)) {
2135                local_inc(&cpu_buffer->commit_overrun);
2136                goto out_reset;
2137        }
2138
2139        /*
2140         * This is where the fun begins!
2141         *
2142         * We are fighting against races between a reader that
2143         * could be on another CPU trying to swap its reader
2144         * page with the buffer head.
2145         *
2146         * We are also fighting against interrupts coming in and
2147         * moving the head or tail on us as well.
2148         *
2149         * If the next page is the head page then we have filled
2150         * the buffer, unless the commit page is still on the
2151         * reader page.
2152         */
2153        if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2154
2155                /*
2156                 * If the commit is not on the reader page, then
2157                 * move the header page.
2158                 */
2159                if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2160                        /*
2161                         * If we are not in overwrite mode,
2162                         * this is easy, just stop here.
2163                         */
2164                        if (!(buffer->flags & RB_FL_OVERWRITE)) {
2165                                local_inc(&cpu_buffer->dropped_events);
2166                                goto out_reset;
2167                        }
2168
2169                        ret = rb_handle_head_page(cpu_buffer,
2170                                                  tail_page,
2171                                                  next_page);
2172                        if (ret < 0)
2173                                goto out_reset;
2174                        if (ret)
2175                                goto out_again;
2176                } else {
2177                        /*
2178                         * We need to be careful here too. The
2179                         * commit page could still be on the reader
2180                         * page. We could have a small buffer, and
2181                         * have filled up the buffer with events
2182                         * from interrupts and such, and wrapped.
2183                         *
2184                         * Note, if the tail page is also the on the
2185                         * reader_page, we let it move out.
2186                         */
2187                        if (unlikely((cpu_buffer->commit_page !=
2188                                      cpu_buffer->tail_page) &&
2189                                     (cpu_buffer->commit_page ==
2190                                      cpu_buffer->reader_page))) {
2191                                local_inc(&cpu_buffer->commit_overrun);
2192                                goto out_reset;
2193                        }
2194                }
2195        }
2196
2197        ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2198        if (ret) {
2199                /*
2200                 * Nested commits always have zero deltas, so
2201                 * just reread the time stamp
2202                 */
2203                ts = rb_time_stamp(buffer);
2204                next_page->page->time_stamp = ts;
2205        }
2206
2207 out_again:
2208
2209        rb_reset_tail(cpu_buffer, tail_page, tail, length);
2210
2211        /* fail and let the caller try again */
2212        return ERR_PTR(-EAGAIN);
2213
2214 out_reset:
2215        /* reset write */
2216        rb_reset_tail(cpu_buffer, tail_page, tail, length);
2217
2218        return NULL;
2219}
2220
2221static struct ring_buffer_event *
2222__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2223                  unsigned long length, u64 ts,
2224                  u64 delta, int add_timestamp)
2225{
2226        struct buffer_page *tail_page;
2227        struct ring_buffer_event *event;
2228        unsigned long tail, write;
2229
2230        /*
2231         * If the time delta since the last event is too big to
2232         * hold in the time field of the event, then we append a
2233         * TIME EXTEND event ahead of the data event.
2234         */
2235        if (unlikely(add_timestamp))
2236                length += RB_LEN_TIME_EXTEND;
2237
2238        tail_page = cpu_buffer->tail_page;
2239        write = local_add_return(length, &tail_page->write);
2240
2241        /* set write to only the index of the write */
2242        write &= RB_WRITE_MASK;
2243        tail = write - length;
2244
2245        /* See if we shot pass the end of this buffer page */
2246        if (unlikely(write > BUF_PAGE_SIZE))
2247                return rb_move_tail(cpu_buffer, length, tail,
2248                                    tail_page, ts);
2249
2250        /* We reserved something on the buffer */
2251
2252        event = __rb_page_index(tail_page, tail);
2253        kmemcheck_annotate_bitfield(event, bitfield);
2254        rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2255
2256        local_inc(&tail_page->entries);
2257
2258        /*
2259         * If this is the first commit on the page, then update
2260         * its timestamp.
2261         */
2262        if (!tail)
2263                tail_page->page->time_stamp = ts;
2264
2265        /* account for these added bytes */
2266        local_add(length, &cpu_buffer->entries_bytes);
2267
2268        return event;
2269}
2270
2271static inline int
2272rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2273                  struct ring_buffer_event *event)
2274{
2275        unsigned long new_index, old_index;
2276        struct buffer_page *bpage;
2277        unsigned long index;
2278        unsigned long addr;
2279
2280        new_index = rb_event_index(event);
2281        old_index = new_index + rb_event_ts_length(event);
2282        addr = (unsigned long)event;
2283        addr &= PAGE_MASK;
2284
2285        bpage = cpu_buffer->tail_page;
2286
2287        if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2288                unsigned long write_mask =
2289                        local_read(&bpage->write) & ~RB_WRITE_MASK;
2290                unsigned long event_length = rb_event_length(event);
2291                /*
2292                 * This is on the tail page. It is possible that
2293                 * a write could come in and move the tail page
2294                 * and write to the next page. That is fine
2295                 * because we just shorten what is on this page.
2296                 */
2297                old_index += write_mask;
2298                new_index += write_mask;
2299                index = local_cmpxchg(&bpage->write, old_index, new_index);
2300                if (index == old_index) {
2301                        /* update counters */
2302                        local_sub(event_length, &cpu_buffer->entries_bytes);
2303                        return 1;
2304                }
2305        }
2306
2307        /* could not discard */
2308        return 0;
2309}
2310
2311static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2312{
2313        local_inc(&cpu_buffer->committing);
2314        local_inc(&cpu_buffer->commits);
2315}
2316
2317static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2318{
2319        unsigned long commits;
2320
2321        if (RB_WARN_ON(cpu_buffer,
2322                       !local_read(&cpu_buffer->committing)))
2323                return;
2324
2325 again:
2326        commits = local_read(&cpu_buffer->commits);
2327        /* synchronize with interrupts */
2328        barrier();
2329        if (local_read(&cpu_buffer->committing) == 1)
2330                rb_set_commit_to_write(cpu_buffer);
2331
2332        local_dec(&cpu_buffer->committing);
2333
2334        /* synchronize with interrupts */
2335        barrier();
2336
2337        /*
2338         * Need to account for interrupts coming in between the
2339         * updating of the commit page and the clearing of the
2340         * committing counter.
2341         */
2342        if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2343            !local_read(&cpu_buffer->committing)) {
2344                local_inc(&cpu_buffer->committing);
2345                goto again;
2346        }
2347}
2348
2349static struct ring_buffer_event *
2350rb_reserve_next_event(struct ring_buffer *buffer,
2351                      struct ring_buffer_per_cpu *cpu_buffer,
2352                      unsigned long length)
2353{
2354        struct ring_buffer_event *event;
2355        u64 ts, delta;
2356        int nr_loops = 0;
2357        int add_timestamp;
2358        u64 diff;
2359
2360        rb_start_commit(cpu_buffer);
2361
2362#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2363        /*
2364         * Due to the ability to swap a cpu buffer from a buffer
2365         * it is possible it was swapped before we committed.
2366         * (committing stops a swap). We check for it here and
2367         * if it happened, we have to fail the write.
2368         */
2369        barrier();
2370        if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2371                local_dec(&cpu_buffer->committing);
2372                local_dec(&cpu_buffer->commits);
2373                return NULL;
2374        }
2375#endif
2376
2377        length = rb_calculate_event_length(length);
2378 again:
2379        add_timestamp = 0;
2380        delta = 0;
2381
2382        /*
2383         * We allow for interrupts to reenter here and do a trace.
2384         * If one does, it will cause this original code to loop
2385         * back here. Even with heavy interrupts happening, this
2386         * should only happen a few times in a row. If this happens
2387         * 1000 times in a row, there must be either an interrupt
2388         * storm or we have something buggy.
2389         * Bail!
2390         */
2391        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2392                goto out_fail;
2393
2394        ts = rb_time_stamp(cpu_buffer->buffer);
2395        diff = ts - cpu_buffer->write_stamp;
2396
2397        /* make sure this diff is calculated here */
2398        barrier();
2399
2400        /* Did the write stamp get updated already? */
2401        if (likely(ts >= cpu_buffer->write_stamp)) {
2402                delta = diff;
2403                if (unlikely(test_time_stamp(delta))) {
2404                        int local_clock_stable = 1;
2405#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2406                        local_clock_stable = sched_clock_stable;
2407#endif
2408                        WARN_ONCE(delta > (1ULL << 59),
2409                                  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2410                                  (unsigned long long)delta,
2411                                  (unsigned long long)ts,
2412                                  (unsigned long long)cpu_buffer->write_stamp,
2413                                  local_clock_stable ? "" :
2414                                  "If you just came from a suspend/resume,\n"
2415                                  "please switch to the trace global clock:\n"
2416                                  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2417                        add_timestamp = 1;
2418                }
2419        }
2420
2421        event = __rb_reserve_next(cpu_buffer, length, ts,
2422                                  delta, add_timestamp);
2423        if (unlikely(PTR_ERR(event) == -EAGAIN))
2424                goto again;
2425
2426        if (!event)
2427                goto out_fail;
2428
2429        return event;
2430
2431 out_fail:
2432        rb_end_commit(cpu_buffer);
2433        return NULL;
2434}
2435
2436#ifdef CONFIG_TRACING
2437
2438/*
2439 * The lock and unlock are done within a preempt disable section.
2440 * The current_context per_cpu variable can only be modified
2441 * by the current task between lock and unlock. But it can
2442 * be modified more than once via an interrupt. To pass this
2443 * information from the lock to the unlock without having to
2444 * access the 'in_interrupt()' functions again (which do show
2445 * a bit of overhead in something as critical as function tracing,
2446 * we use a bitmask trick.
2447 *
2448 *  bit 0 =  NMI context
2449 *  bit 1 =  IRQ context
2450 *  bit 2 =  SoftIRQ context
2451 *  bit 3 =  normal context.
2452 *
2453 * This works because this is the order of contexts that can
2454 * preempt other contexts. A SoftIRQ never preempts an IRQ
2455 * context.
2456 *
2457 * When the context is determined, the corresponding bit is
2458 * checked and set (if it was set, then a recursion of that context
2459 * happened).
2460 *
2461 * On unlock, we need to clear this bit. To do so, just subtract
2462 * 1 from the current_context and AND it to itself.
2463 *
2464 * (binary)
2465 *  101 - 1 = 100
2466 *  101 & 100 = 100 (clearing bit zero)
2467 *
2468 *  1010 - 1 = 1001
2469 *  1010 & 1001 = 1000 (clearing bit 1)
2470 *
2471 * The least significant bit can be cleared this way, and it
2472 * just so happens that it is the same bit corresponding to
2473 * the current context.
2474 */
2475static DEFINE_PER_CPU(unsigned int, current_context);
2476
2477static __always_inline int trace_recursive_lock(void)
2478{
2479        unsigned int val = this_cpu_read(current_context);
2480        int bit;
2481
2482        if (in_interrupt()) {
2483                if (in_nmi())
2484                        bit = 0;
2485                else if (in_irq())
2486                        bit = 1;
2487                else
2488                        bit = 2;
2489        } else
2490                bit = 3;
2491
2492        if (unlikely(val & (1 << bit)))
2493                return 1;
2494
2495        val |= (1 << bit);
2496        this_cpu_write(current_context, val);
2497
2498        return 0;
2499}
2500
2501static __always_inline void trace_recursive_unlock(void)
2502{
2503        unsigned int val = this_cpu_read(current_context);
2504
2505        val--;
2506        val &= this_cpu_read(current_context);
2507        this_cpu_write(current_context, val);
2508}
2509
2510#else
2511
2512#define trace_recursive_lock()          (0)
2513#define trace_recursive_unlock()        do { } while (0)
2514
2515#endif
2516
2517/**
2518 * ring_buffer_lock_reserve - reserve a part of the buffer
2519 * @buffer: the ring buffer to reserve from
2520 * @length: the length of the data to reserve (excluding event header)
2521 *
2522 * Returns a reseverd event on the ring buffer to copy directly to.
2523 * The user of this interface will need to get the body to write into
2524 * and can use the ring_buffer_event_data() interface.
2525 *
2526 * The length is the length of the data needed, not the event length
2527 * which also includes the event header.
2528 *
2529 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2530 * If NULL is returned, then nothing has been allocated or locked.
2531 */
2532struct ring_buffer_event *
2533ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2534{
2535        struct ring_buffer_per_cpu *cpu_buffer;
2536        struct ring_buffer_event *event;
2537        int cpu;
2538
2539        if (ring_buffer_flags != RB_BUFFERS_ON)
2540                return NULL;
2541
2542        /* If we are tracing schedule, we don't want to recurse */
2543        preempt_disable_notrace();
2544
2545        if (atomic_read(&buffer->record_disabled))
2546                goto out_nocheck;
2547
2548        if (trace_recursive_lock())
2549                goto out_nocheck;
2550
2551        cpu = raw_smp_processor_id();
2552
2553        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2554                goto out;
2555
2556        cpu_buffer = buffer->buffers[cpu];
2557
2558        if (atomic_read(&cpu_buffer->record_disabled))
2559                goto out;
2560
2561        if (length > BUF_MAX_DATA_SIZE)
2562                goto out;
2563
2564        event = rb_reserve_next_event(buffer, cpu_buffer, length);
2565        if (!event)
2566                goto out;
2567
2568        return event;
2569
2570 out:
2571        trace_recursive_unlock();
2572
2573 out_nocheck:
2574        preempt_enable_notrace();
2575        return NULL;
2576}
2577EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2578
2579static void
2580rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2581                      struct ring_buffer_event *event)
2582{
2583        u64 delta;
2584
2585        /*
2586         * The event first in the commit queue updates the
2587         * time stamp.
2588         */
2589        if (rb_event_is_commit(cpu_buffer, event)) {
2590                /*
2591                 * A commit event that is first on a page
2592                 * updates the write timestamp with the page stamp
2593                 */
2594                if (!rb_event_index(event))
2595                        cpu_buffer->write_stamp =
2596                                cpu_buffer->commit_page->page->time_stamp;
2597                else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2598                        delta = event->array[0];
2599                        delta <<= TS_SHIFT;
2600                        delta += event->time_delta;
2601                        cpu_buffer->write_stamp += delta;
2602                } else
2603                        cpu_buffer->write_stamp += event->time_delta;
2604        }
2605}
2606
2607static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2608                      struct ring_buffer_event *event)
2609{
2610        local_inc(&cpu_buffer->entries);
2611        rb_update_write_stamp(cpu_buffer, event);
2612        rb_end_commit(cpu_buffer);
2613}
2614
2615/**
2616 * ring_buffer_unlock_commit - commit a reserved
2617 * @buffer: The buffer to commit to
2618 * @event: The event pointer to commit.
2619 *
2620 * This commits the data to the ring buffer, and releases any locks held.
2621 *
2622 * Must be paired with ring_buffer_lock_reserve.
2623 */
2624int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2625                              struct ring_buffer_event *event)
2626{
2627        struct ring_buffer_per_cpu *cpu_buffer;
2628        int cpu = raw_smp_processor_id();
2629
2630        cpu_buffer = buffer->buffers[cpu];
2631
2632        rb_commit(cpu_buffer, event);
2633
2634        trace_recursive_unlock();
2635
2636        preempt_enable_notrace();
2637
2638        return 0;
2639}
2640EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2641
2642static inline void rb_event_discard(struct ring_buffer_event *event)
2643{
2644        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2645                event = skip_time_extend(event);
2646
2647        /* array[0] holds the actual length for the discarded event */
2648        event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2649        event->type_len = RINGBUF_TYPE_PADDING;
2650        /* time delta must be non zero */
2651        if (!event->time_delta)
2652                event->time_delta = 1;
2653}
2654
2655/*
2656 * Decrement the entries to the page that an event is on.
2657 * The event does not even need to exist, only the pointer
2658 * to the page it is on. This may only be called before the commit
2659 * takes place.
2660 */
2661static inline void
2662rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2663                   struct ring_buffer_event *event)
2664{
2665        unsigned long addr = (unsigned long)event;
2666        struct buffer_page *bpage = cpu_buffer->commit_page;
2667        struct buffer_page *start;
2668
2669        addr &= PAGE_MASK;
2670
2671        /* Do the likely case first */
2672        if (likely(bpage->page == (void *)addr)) {
2673                local_dec(&bpage->entries);
2674                return;
2675        }
2676
2677        /*
2678         * Because the commit page may be on the reader page we
2679         * start with the next page and check the end loop there.
2680         */
2681        rb_inc_page(cpu_buffer, &bpage);
2682        start = bpage;
2683        do {
2684                if (bpage->page == (void *)addr) {
2685                        local_dec(&bpage->entries);
2686                        return;
2687                }
2688                rb_inc_page(cpu_buffer, &bpage);
2689        } while (bpage != start);
2690
2691        /* commit not part of this buffer?? */
2692        RB_WARN_ON(cpu_buffer, 1);
2693}
2694
2695/**
2696 * ring_buffer_commit_discard - discard an event that has not been committed
2697 * @buffer: the ring buffer
2698 * @event: non committed event to discard
2699 *
2700 * Sometimes an event that is in the ring buffer needs to be ignored.
2701 * This function lets the user discard an event in the ring buffer
2702 * and then that event will not be read later.
2703 *
2704 * This function only works if it is called before the the item has been
2705 * committed. It will try to free the event from the ring buffer
2706 * if another event has not been added behind it.
2707 *
2708 * If another event has been added behind it, it will set the event
2709 * up as discarded, and perform the commit.
2710 *
2711 * If this function is called, do not call ring_buffer_unlock_commit on
2712 * the event.
2713 */
2714void ring_buffer_discard_commit(struct ring_buffer *buffer,
2715                                struct ring_buffer_event *event)
2716{
2717        struct ring_buffer_per_cpu *cpu_buffer;
2718        int cpu;
2719
2720        /* The event is discarded regardless */
2721        rb_event_discard(event);
2722
2723        cpu = smp_processor_id();
2724        cpu_buffer = buffer->buffers[cpu];
2725
2726        /*
2727         * This must only be called if the event has not been
2728         * committed yet. Thus we can assume that preemption
2729         * is still disabled.
2730         */
2731        RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2732
2733        rb_decrement_entry(cpu_buffer, event);
2734        if (rb_try_to_discard(cpu_buffer, event))
2735                goto out;
2736
2737        /*
2738         * The commit is still visible by the reader, so we
2739         * must still update the timestamp.
2740         */
2741        rb_update_write_stamp(cpu_buffer, event);
2742 out:
2743        rb_end_commit(cpu_buffer);
2744
2745        trace_recursive_unlock();
2746
2747        preempt_enable_notrace();
2748
2749}
2750EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2751
2752/**
2753 * ring_buffer_write - write data to the buffer without reserving
2754 * @buffer: The ring buffer to write to.
2755 * @length: The length of the data being written (excluding the event header)
2756 * @data: The data to write to the buffer.
2757 *
2758 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2759 * one function. If you already have the data to write to the buffer, it
2760 * may be easier to simply call this function.
2761 *
2762 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2763 * and not the length of the event which would hold the header.
2764 */
2765int ring_buffer_write(struct ring_buffer *buffer,
2766                      unsigned long length,
2767                      void *data)
2768{
2769        struct ring_buffer_per_cpu *cpu_buffer;
2770        struct ring_buffer_event *event;
2771        void *body;
2772        int ret = -EBUSY;
2773        int cpu;
2774
2775        if (ring_buffer_flags != RB_BUFFERS_ON)
2776                return -EBUSY;
2777
2778        preempt_disable_notrace();
2779
2780        if (atomic_read(&buffer->record_disabled))
2781                goto out;
2782
2783        cpu = raw_smp_processor_id();
2784
2785        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2786                goto out;
2787
2788        cpu_buffer = buffer->buffers[cpu];
2789
2790        if (atomic_read(&cpu_buffer->record_disabled))
2791                goto out;
2792
2793        if (length > BUF_MAX_DATA_SIZE)
2794                goto out;
2795
2796        event = rb_reserve_next_event(buffer, cpu_buffer, length);
2797        if (!event)
2798                goto out;
2799
2800        body = rb_event_data(event);
2801
2802        memcpy(body, data, length);
2803
2804        rb_commit(cpu_buffer, event);
2805
2806        ret = 0;
2807 out:
2808        preempt_enable_notrace();
2809
2810        return ret;
2811}
2812EXPORT_SYMBOL_GPL(ring_buffer_write);
2813
2814static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2815{
2816        struct buffer_page *reader = cpu_buffer->reader_page;
2817        struct buffer_page *head = rb_set_head_page(cpu_buffer);
2818        struct buffer_page *commit = cpu_buffer->commit_page;
2819
2820        /* In case of error, head will be NULL */
2821        if (unlikely(!head))
2822                return 1;
2823
2824        return reader->read == rb_page_commit(reader) &&
2825                (commit == reader ||
2826                 (commit == head &&
2827                  head->read == rb_page_commit(commit)));
2828}
2829
2830/**
2831 * ring_buffer_record_disable - stop all writes into the buffer
2832 * @buffer: The ring buffer to stop writes to.
2833 *
2834 * This prevents all writes to the buffer. Any attempt to write
2835 * to the buffer after this will fail and return NULL.
2836 *
2837 * The caller should call synchronize_sched() after this.
2838 */
2839void ring_buffer_record_disable(struct ring_buffer *buffer)
2840{
2841        atomic_inc(&buffer->record_disabled);
2842}
2843EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2844
2845/**
2846 * ring_buffer_record_enable - enable writes to the buffer
2847 * @buffer: The ring buffer to enable writes
2848 *
2849 * Note, multiple disables will need the same number of enables
2850 * to truly enable the writing (much like preempt_disable).
2851 */
2852void ring_buffer_record_enable(struct ring_buffer *buffer)
2853{
2854        atomic_dec(&buffer->record_disabled);
2855}
2856EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2857
2858/**
2859 * ring_buffer_record_off - stop all writes into the buffer
2860 * @buffer: The ring buffer to stop writes to.
2861 *
2862 * This prevents all writes to the buffer. Any attempt to write
2863 * to the buffer after this will fail and return NULL.
2864 *
2865 * This is different than ring_buffer_record_disable() as
2866 * it works like an on/off switch, where as the disable() version
2867 * must be paired with a enable().
2868 */
2869void ring_buffer_record_off(struct ring_buffer *buffer)
2870{
2871        unsigned int rd;
2872        unsigned int new_rd;
2873
2874        do {
2875                rd = atomic_read(&buffer->record_disabled);
2876                new_rd = rd | RB_BUFFER_OFF;
2877        } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2878}
2879EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2880
2881/**
2882 * ring_buffer_record_on - restart writes into the buffer
2883 * @buffer: The ring buffer to start writes to.
2884 *
2885 * This enables all writes to the buffer that was disabled by
2886 * ring_buffer_record_off().
2887 *
2888 * This is different than ring_buffer_record_enable() as
2889 * it works like an on/off switch, where as the enable() version
2890 * must be paired with a disable().
2891 */
2892void ring_buffer_record_on(struct ring_buffer *buffer)
2893{
2894        unsigned int rd;
2895        unsigned int new_rd;
2896
2897        do {
2898                rd = atomic_read(&buffer->record_disabled);
2899                new_rd = rd & ~RB_BUFFER_OFF;
2900        } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2901}
2902EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2903
2904/**
2905 * ring_buffer_record_is_on - return true if the ring buffer can write
2906 * @buffer: The ring buffer to see if write is enabled
2907 *
2908 * Returns true if the ring buffer is in a state that it accepts writes.
2909 */
2910int ring_buffer_record_is_on(struct ring_buffer *buffer)
2911{
2912        return !atomic_read(&buffer->record_disabled);
2913}
2914
2915/**
2916 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2917 * @buffer: The ring buffer to stop writes to.
2918 * @cpu: The CPU buffer to stop
2919 *
2920 * This prevents all writes to the buffer. Any attempt to write
2921 * to the buffer after this will fail and return NULL.
2922 *
2923 * The caller should call synchronize_sched() after this.
2924 */
2925void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2926{
2927        struct ring_buffer_per_cpu *cpu_buffer;
2928
2929        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2930                return;
2931
2932        cpu_buffer = buffer->buffers[cpu];
2933        atomic_inc(&cpu_buffer->record_disabled);
2934}
2935EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2936
2937/**
2938 * ring_buffer_record_enable_cpu - enable writes to the buffer
2939 * @buffer: The ring buffer to enable writes
2940 * @cpu: The CPU to enable.
2941 *
2942 * Note, multiple disables will need the same number of enables
2943 * to truly enable the writing (much like preempt_disable).
2944 */
2945void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2946{
2947        struct ring_buffer_per_cpu *cpu_buffer;
2948
2949        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2950                return;
2951
2952        cpu_buffer = buffer->buffers[cpu];
2953        atomic_dec(&cpu_buffer->record_disabled);
2954}
2955EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2956
2957/*
2958 * The total entries in the ring buffer is the running counter
2959 * of entries entered into the ring buffer, minus the sum of
2960 * the entries read from the ring buffer and the number of
2961 * entries that were overwritten.
2962 */
2963static inline unsigned long
2964rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2965{
2966        return local_read(&cpu_buffer->entries) -
2967                (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2968}
2969
2970/**
2971 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2972 * @buffer: The ring buffer
2973 * @cpu: The per CPU buffer to read from.
2974 */
2975u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2976{
2977        unsigned long flags;
2978        struct ring_buffer_per_cpu *cpu_buffer;
2979        struct buffer_page *bpage;
2980        u64 ret = 0;
2981
2982        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2983                return 0;
2984
2985        cpu_buffer = buffer->buffers[cpu];
2986        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2987        /*
2988         * if the tail is on reader_page, oldest time stamp is on the reader
2989         * page
2990         */
2991        if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2992                bpage = cpu_buffer->reader_page;
2993        else
2994                bpage = rb_set_head_page(cpu_buffer);
2995        if (bpage)
2996                ret = bpage->page->time_stamp;
2997        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2998
2999        return ret;
3000}
3001EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3002
3003/**
3004 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3005 * @buffer: The ring buffer
3006 * @cpu: The per CPU buffer to read from.
3007 */
3008unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3009{
3010        struct ring_buffer_per_cpu *cpu_buffer;
3011        unsigned long ret;
3012
3013        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3014                return 0;
3015
3016        cpu_buffer = buffer->buffers[cpu];
3017        ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3018
3019        return ret;
3020}
3021EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3022
3023/**
3024 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3025 * @buffer: The ring buffer
3026 * @cpu: The per CPU buffer to get the entries from.
3027 */
3028unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3029{
3030        struct ring_buffer_per_cpu *cpu_buffer;
3031
3032        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3033                return 0;
3034
3035        cpu_buffer = buffer->buffers[cpu];
3036
3037        return rb_num_of_entries(cpu_buffer);
3038}
3039EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3040
3041/**
3042 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3043 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3044 * @buffer: The ring buffer
3045 * @cpu: The per CPU buffer to get the number of overruns from
3046 */
3047unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3048{
3049        struct ring_buffer_per_cpu *cpu_buffer;
3050        unsigned long ret;
3051
3052        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3053                return 0;
3054
3055        cpu_buffer = buffer->buffers[cpu];
3056        ret = local_read(&cpu_buffer->overrun);
3057
3058        return ret;
3059}
3060EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3061
3062/**
3063 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3064 * commits failing due to the buffer wrapping around while there are uncommitted
3065 * events, such as during an interrupt storm.
3066 * @buffer: The ring buffer
3067 * @cpu: The per CPU buffer to get the number of overruns from
3068 */
3069unsigned long
3070ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3071{
3072        struct ring_buffer_per_cpu *cpu_buffer;
3073        unsigned long ret;
3074
3075        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3076                return 0;
3077
3078        cpu_buffer = buffer->buffers[cpu];
3079        ret = local_read(&cpu_buffer->commit_overrun);
3080
3081        return ret;
3082}
3083EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3084
3085/**
3086 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3087 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3088 * @buffer: The ring buffer
3089 * @cpu: The per CPU buffer to get the number of overruns from
3090 */
3091unsigned long
3092ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3093{
3094        struct ring_buffer_per_cpu *cpu_buffer;
3095        unsigned long ret;
3096
3097        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3098                return 0;
3099
3100        cpu_buffer = buffer->buffers[cpu];
3101        ret = local_read(&cpu_buffer->dropped_events);
3102
3103        return ret;
3104}
3105EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3106
3107/**
3108 * ring_buffer_read_events_cpu - get the number of events successfully read
3109 * @buffer: The ring buffer
3110 * @cpu: The per CPU buffer to get the number of events read
3111 */
3112unsigned long
3113ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3114{
3115        struct ring_buffer_per_cpu *cpu_buffer;
3116
3117        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3118                return 0;
3119
3120        cpu_buffer = buffer->buffers[cpu];
3121        return cpu_buffer->read;
3122}
3123EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3124
3125/**
3126 * ring_buffer_entries - get the number of entries in a buffer
3127 * @buffer: The ring buffer
3128 *
3129 * Returns the total number of entries in the ring buffer
3130 * (all CPU entries)
3131 */
3132unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3133{
3134        struct ring_buffer_per_cpu *cpu_buffer;
3135        unsigned long entries = 0;
3136        int cpu;
3137
3138        /* if you care about this being correct, lock the buffer */
3139        for_each_buffer_cpu(buffer, cpu) {
3140                cpu_buffer = buffer->buffers[cpu];
3141                entries += rb_num_of_entries(cpu_buffer);
3142        }
3143
3144        return entries;
3145}
3146EXPORT_SYMBOL_GPL(ring_buffer_entries);
3147
3148/**
3149 * ring_buffer_overruns - get the number of overruns in buffer
3150 * @buffer: The ring buffer
3151 *
3152 * Returns the total number of overruns in the ring buffer
3153 * (all CPU entries)
3154 */
3155unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3156{
3157        struct ring_buffer_per_cpu *cpu_buffer;
3158        unsigned long overruns = 0;
3159        int cpu;
3160
3161        /* if you care about this being correct, lock the buffer */
3162        for_each_buffer_cpu(buffer, cpu) {
3163                cpu_buffer = buffer->buffers[cpu];
3164                overruns += local_read(&cpu_buffer->overrun);
3165        }
3166
3167        return overruns;
3168}
3169EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3170
3171static void rb_iter_reset(struct ring_buffer_iter *iter)
3172{
3173        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3174
3175        /* Iterator usage is expected to have record disabled */
3176        if (list_empty(&cpu_buffer->reader_page->list)) {
3177                iter->head_page = rb_set_head_page(cpu_buffer);
3178                if (unlikely(!iter->head_page))
3179                        return;
3180                iter->head = iter->head_page->read;
3181        } else {
3182                iter->head_page = cpu_buffer->reader_page;
3183                iter->head = cpu_buffer->reader_page->read;
3184        }
3185        if (iter->head)
3186                iter->read_stamp = cpu_buffer->read_stamp;
3187        else
3188                iter->read_stamp = iter->head_page->page->time_stamp;
3189        iter->cache_reader_page = cpu_buffer->reader_page;
3190        iter->cache_read = cpu_buffer->read;
3191}
3192
3193/**
3194 * ring_buffer_iter_reset - reset an iterator
3195 * @iter: The iterator to reset
3196 *
3197 * Resets the iterator, so that it will start from the beginning
3198 * again.
3199 */
3200void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3201{
3202        struct ring_buffer_per_cpu *cpu_buffer;
3203        unsigned long flags;
3204
3205        if (!iter)
3206                return;
3207
3208        cpu_buffer = iter->cpu_buffer;
3209
3210        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3211        rb_iter_reset(iter);
3212        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3213}
3214EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3215
3216/**
3217 * ring_buffer_iter_empty - check if an iterator has no more to read
3218 * @iter: The iterator to check
3219 */
3220int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3221{
3222        struct ring_buffer_per_cpu *cpu_buffer;
3223
3224        cpu_buffer = iter->cpu_buffer;
3225
3226        return iter->head_page == cpu_buffer->commit_page &&
3227                iter->head == rb_commit_index(cpu_buffer);
3228}
3229EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3230
3231static void
3232rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3233                     struct ring_buffer_event *event)
3234{
3235        u64 delta;
3236
3237        switch (event->type_len) {
3238        case RINGBUF_TYPE_PADDING:
3239                return;
3240
3241        case RINGBUF_TYPE_TIME_EXTEND:
3242                delta = event->array[0];
3243                delta <<= TS_SHIFT;
3244                delta += event->time_delta;
3245                cpu_buffer->read_stamp += delta;
3246                return;
3247
3248        case RINGBUF_TYPE_TIME_STAMP:
3249                /* FIXME: not implemented */
3250                return;
3251
3252        case RINGBUF_TYPE_DATA:
3253                cpu_buffer->read_stamp += event->time_delta;
3254                return;
3255
3256        default:
3257                BUG();
3258        }
3259        return;
3260}
3261
3262static void
3263rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3264                          struct ring_buffer_event *event)
3265{
3266        u64 delta;
3267
3268        switch (event->type_len) {
3269        case RINGBUF_TYPE_PADDING:
3270                return;
3271
3272        case RINGBUF_TYPE_TIME_EXTEND:
3273                delta = event->array[0];
3274                delta <<= TS_SHIFT;
3275                delta += event->time_delta;
3276                iter->read_stamp += delta;
3277                return;
3278
3279        case RINGBUF_TYPE_TIME_STAMP:
3280                /* FIXME: not implemented */
3281                return;
3282
3283        case RINGBUF_TYPE_DATA:
3284                iter->read_stamp += event->time_delta;
3285                return;
3286
3287        default:
3288                BUG();
3289        }
3290        return;
3291}
3292
3293static struct buffer_page *
3294rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3295{
3296        struct buffer_page *reader = NULL;
3297        unsigned long overwrite;
3298        unsigned long flags;
3299        int nr_loops = 0;
3300        int ret;
3301
3302        local_irq_save(flags);
3303        arch_spin_lock(&cpu_buffer->lock);
3304
3305 again:
3306        /*
3307         * This should normally only loop twice. But because the
3308         * start of the reader inserts an empty page, it causes
3309         * a case where we will loop three times. There should be no
3310         * reason to loop four times (that I know of).
3311         */
3312        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3313                reader = NULL;
3314                goto out;
3315        }
3316
3317        reader = cpu_buffer->reader_page;
3318
3319        /* If there's more to read, return this page */
3320        if (cpu_buffer->reader_page->read < rb_page_size(reader))
3321                goto out;
3322
3323        /* Never should we have an index greater than the size */
3324        if (RB_WARN_ON(cpu_buffer,
3325                       cpu_buffer->reader_page->read > rb_page_size(reader)))
3326                goto out;
3327
3328        /* check if we caught up to the tail */
3329        reader = NULL;
3330        if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3331                goto out;
3332
3333        /* Don't bother swapping if the ring buffer is empty */
3334        if (rb_num_of_entries(cpu_buffer) == 0)
3335                goto out;
3336
3337        /*
3338         * Reset the reader page to size zero.
3339         */
3340        local_set(&cpu_buffer->reader_page->write, 0);
3341        local_set(&cpu_buffer->reader_page->entries, 0);
3342        local_set(&cpu_buffer->reader_page->page->commit, 0);
3343        cpu_buffer->reader_page->real_end = 0;
3344
3345 spin:
3346        /*
3347         * Splice the empty reader page into the list around the head.
3348         */
3349        reader = rb_set_head_page(cpu_buffer);
3350        if (!reader)
3351                goto out;
3352        cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3353        cpu_buffer->reader_page->list.prev = reader->list.prev;
3354
3355        /*
3356         * cpu_buffer->pages just needs to point to the buffer, it
3357         *  has no specific buffer page to point to. Lets move it out
3358         *  of our way so we don't accidentally swap it.
3359         */
3360        cpu_buffer->pages = reader->list.prev;
3361
3362        /* The reader page will be pointing to the new head */
3363        rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3364
3365        /*
3366         * We want to make sure we read the overruns after we set up our
3367         * pointers to the next object. The writer side does a
3368         * cmpxchg to cross pages which acts as the mb on the writer
3369         * side. Note, the reader will constantly fail the swap
3370         * while the writer is updating the pointers, so this
3371         * guarantees that the overwrite recorded here is the one we
3372         * want to compare with the last_overrun.
3373         */
3374        smp_mb();
3375        overwrite = local_read(&(cpu_buffer->overrun));
3376
3377        /*
3378         * Here's the tricky part.
3379         *
3380         * We need to move the pointer past the header page.
3381         * But we can only do that if a writer is not currently
3382         * moving it. The page before the header page has the
3383         * flag bit '1' set if it is pointing to the page we want.
3384         * but if the writer is in the process of moving it
3385         * than it will be '2' or already moved '0'.
3386         */
3387
3388        ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3389
3390        /*
3391         * If we did not convert it, then we must try again.
3392         */
3393        if (!ret)
3394                goto spin;
3395
3396        /*
3397         * Yeah! We succeeded in replacing the page.
3398         *
3399         * Now make the new head point back to the reader page.
3400         */
3401        rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3402        rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3403
3404        /* Finally update the reader page to the new head */
3405        cpu_buffer->reader_page = reader;
3406        rb_reset_reader_page(cpu_buffer);
3407
3408        if (overwrite != cpu_buffer->last_overrun) {
3409                cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3410                cpu_buffer->last_overrun = overwrite;
3411        }
3412
3413        goto again;
3414
3415 out:
3416        arch_spin_unlock(&cpu_buffer->lock);
3417        local_irq_restore(flags);
3418
3419        return reader;
3420}
3421
3422static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3423{
3424        struct ring_buffer_event *event;
3425        struct buffer_page *reader;
3426        unsigned length;
3427
3428        reader = rb_get_reader_page(cpu_buffer);
3429
3430        /* This function should not be called when buffer is empty */
3431        if (RB_WARN_ON(cpu_buffer, !reader))
3432                return;
3433
3434        event = rb_reader_event(cpu_buffer);
3435
3436        if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3437                cpu_buffer->read++;
3438
3439        rb_update_read_stamp(cpu_buffer, event);
3440
3441        length = rb_event_length(event);
3442        cpu_buffer->reader_page->read += length;
3443}
3444
3445static void rb_advance_iter(struct ring_buffer_iter *iter)
3446{
3447        struct ring_buffer_per_cpu *cpu_buffer;
3448        struct ring_buffer_event *event;
3449        unsigned length;
3450
3451        cpu_buffer = iter->cpu_buffer;
3452
3453        /*
3454         * Check if we are at the end of the buffer.
3455         */
3456        if (iter->head >= rb_page_size(iter->head_page)) {
3457                /* discarded commits can make the page empty */
3458                if (iter->head_page == cpu_buffer->commit_page)
3459                        return;
3460                rb_inc_iter(iter);
3461                return;
3462        }
3463
3464        event = rb_iter_head_event(iter);
3465
3466        length = rb_event_length(event);
3467
3468        /*
3469         * This should not be called to advance the header if we are
3470         * at the tail of the buffer.
3471         */
3472        if (RB_WARN_ON(cpu_buffer,
3473                       (iter->head_page == cpu_buffer->commit_page) &&
3474                       (iter->head + length > rb_commit_index(cpu_buffer))))
3475                return;
3476
3477        rb_update_iter_read_stamp(iter, event);
3478
3479        iter->head += length;
3480
3481        /* check for end of page padding */
3482        if ((iter->head >= rb_page_size(iter->head_page)) &&
3483            (iter->head_page != cpu_buffer->commit_page))
3484                rb_inc_iter(iter);
3485}
3486
3487static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3488{
3489        return cpu_buffer->lost_events;
3490}
3491
3492static struct ring_buffer_event *
3493rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3494               unsigned long *lost_events)
3495{
3496        struct ring_buffer_event *event;
3497        struct buffer_page *reader;
3498        int nr_loops = 0;
3499
3500 again:
3501        /*
3502         * We repeat when a time extend is encountered.
3503         * Since the time extend is always attached to a data event,
3504         * we should never loop more than once.
3505         * (We never hit the following condition more than twice).
3506         */
3507        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3508                return NULL;
3509
3510        reader = rb_get_reader_page(cpu_buffer);
3511        if (!reader)
3512                return NULL;
3513
3514        event = rb_reader_event(cpu_buffer);
3515
3516        switch (event->type_len) {
3517        case RINGBUF_TYPE_PADDING:
3518                if (rb_null_event(event))
3519                        RB_WARN_ON(cpu_buffer, 1);
3520                /*
3521                 * Because the writer could be discarding every
3522                 * event it creates (which would probably be bad)
3523                 * if we were to go back to "again" then we may never
3524                 * catch up, and will trigger the warn on, or lock
3525                 * the box. Return the padding, and we will release
3526                 * the current locks, and try again.
3527                 */
3528                return event;
3529
3530        case RINGBUF_TYPE_TIME_EXTEND:
3531                /* Internal data, OK to advance */
3532                rb_advance_reader(cpu_buffer);
3533                goto again;
3534
3535        case RINGBUF_TYPE_TIME_STAMP:
3536                /* FIXME: not implemented */
3537                rb_advance_reader(cpu_buffer);
3538                goto again;
3539
3540        case RINGBUF_TYPE_DATA:
3541                if (ts) {
3542                        *ts = cpu_buffer->read_stamp + event->time_delta;
3543                        ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3544                                                         cpu_buffer->cpu, ts);
3545                }
3546                if (lost_events)
3547                        *lost_events = rb_lost_events(cpu_buffer);
3548                return event;
3549
3550        default:
3551                BUG();
3552        }
3553
3554        return NULL;
3555}
3556EXPORT_SYMBOL_GPL(ring_buffer_peek);
3557
3558static struct ring_buffer_event *
3559rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3560{
3561        struct ring_buffer *buffer;
3562        struct ring_buffer_per_cpu *cpu_buffer;
3563        struct ring_buffer_event *event;
3564        int nr_loops = 0;
3565
3566        cpu_buffer = iter->cpu_buffer;
3567        buffer = cpu_buffer->buffer;
3568
3569        /*
3570         * Check if someone performed a consuming read to
3571         * the buffer. A consuming read invalidates the iterator
3572         * and we need to reset the iterator in this case.
3573         */
3574        if (unlikely(iter->cache_read != cpu_buffer->read ||
3575                     iter->cache_reader_page != cpu_buffer->reader_page))
3576                rb_iter_reset(iter);
3577
3578 again:
3579        if (ring_buffer_iter_empty(iter))
3580                return NULL;
3581
3582        /*
3583         * We repeat when a time extend is encountered.
3584         * Since the time extend is always attached to a data event,
3585         * we should never loop more than once.
3586         * (We never hit the following condition more than twice).
3587         */
3588        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3589                return NULL;
3590
3591        if (rb_per_cpu_empty(cpu_buffer))
3592                return NULL;
3593
3594        if (iter->head >= local_read(&iter->head_page->page->commit)) {
3595                rb_inc_iter(iter);
3596                goto again;
3597        }
3598
3599        event = rb_iter_head_event(iter);
3600
3601        switch (event->type_len) {
3602        case RINGBUF_TYPE_PADDING:
3603                if (rb_null_event(event)) {
3604                        rb_inc_iter(iter);
3605                        goto again;
3606                }
3607                rb_advance_iter(iter);
3608                return event;
3609
3610        case RINGBUF_TYPE_TIME_EXTEND:
3611                /* Internal data, OK to advance */
3612                rb_advance_iter(iter);
3613                goto again;
3614
3615        case RINGBUF_TYPE_TIME_STAMP:
3616                /* FIXME: not implemented */
3617                rb_advance_iter(iter);
3618                goto again;
3619
3620        case RINGBUF_TYPE_DATA:
3621                if (ts) {
3622                        *ts = iter->read_stamp + event->time_delta;
3623                        ring_buffer_normalize_time_stamp(buffer,
3624                                                         cpu_buffer->cpu, ts);
3625                }
3626                return event;
3627
3628        default:
3629                BUG();
3630        }
3631
3632        return NULL;
3633}
3634EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3635
3636static inline int rb_ok_to_lock(void)
3637{
3638        /*
3639         * If an NMI die dumps out the content of the ring buffer
3640         * do not grab locks. We also permanently disable the ring
3641         * buffer too. A one time deal is all you get from reading
3642         * the ring buffer from an NMI.
3643         */
3644        if (likely(!in_nmi()))
3645                return 1;
3646
3647        tracing_off_permanent();
3648        return 0;
3649}
3650
3651/**
3652 * ring_buffer_peek - peek at the next event to be read
3653 * @buffer: The ring buffer to read
3654 * @cpu: The cpu to peak at
3655 * @ts: The timestamp counter of this event.
3656 * @lost_events: a variable to store if events were lost (may be NULL)
3657 *
3658 * This will return the event that will be read next, but does
3659 * not consume the data.
3660 */
3661struct ring_buffer_event *
3662ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3663                 unsigned long *lost_events)
3664{
3665        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3666        struct ring_buffer_event *event;
3667        unsigned long flags;
3668        int dolock;
3669
3670        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3671                return NULL;
3672
3673        dolock = rb_ok_to_lock();
3674 again:
3675        local_irq_save(flags);
3676        if (dolock)
3677                raw_spin_lock(&cpu_buffer->reader_lock);
3678        event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3679        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3680                rb_advance_reader(cpu_buffer);
3681        if (dolock)
3682                raw_spin_unlock(&cpu_buffer->reader_lock);
3683        local_irq_restore(flags);
3684
3685        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3686                goto again;
3687
3688        return event;
3689}
3690
3691/**
3692 * ring_buffer_iter_peek - peek at the next event to be read
3693 * @iter: The ring buffer iterator
3694 * @ts: The timestamp counter of this event.
3695 *
3696 * This will return the event that will be read next, but does
3697 * not increment the iterator.
3698 */
3699struct ring_buffer_event *
3700ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3701{
3702        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3703        struct ring_buffer_event *event;
3704        unsigned long flags;
3705
3706 again:
3707        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3708        event = rb_iter_peek(iter, ts);
3709        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3710
3711        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3712                goto again;
3713
3714        return event;
3715}
3716
3717/**
3718 * ring_buffer_consume - return an event and consume it
3719 * @buffer: The ring buffer to get the next event from
3720 * @cpu: the cpu to read the buffer from
3721 * @ts: a variable to store the timestamp (may be NULL)
3722 * @lost_events: a variable to store if events were lost (may be NULL)
3723 *
3724 * Returns the next event in the ring buffer, and that event is consumed.
3725 * Meaning, that sequential reads will keep returning a different event,
3726 * and eventually empty the ring buffer if the producer is slower.
3727 */
3728struct ring_buffer_event *
3729ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3730                    unsigned long *lost_events)
3731{
3732        struct ring_buffer_per_cpu *cpu_buffer;
3733        struct ring_buffer_event *event = NULL;
3734        unsigned long flags;
3735        int dolock;
3736
3737        dolock = rb_ok_to_lock();
3738
3739 again:
3740        /* might be called in atomic */
3741        preempt_disable();
3742
3743        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3744                goto out;
3745
3746        cpu_buffer = buffer->buffers[cpu];
3747        local_irq_save(flags);
3748        if (dolock)
3749                raw_spin_lock(&cpu_buffer->reader_lock);
3750
3751        event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3752        if (event) {
3753                cpu_buffer->lost_events = 0;
3754                rb_advance_reader(cpu_buffer);
3755        }
3756
3757        if (dolock)
3758                raw_spin_unlock(&cpu_buffer->reader_lock);
3759        local_irq_restore(flags);
3760
3761 out:
3762        preempt_enable();
3763
3764        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3765                goto again;
3766
3767        return event;
3768}
3769EXPORT_SYMBOL_GPL(ring_buffer_consume);
3770
3771/**
3772 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3773 * @buffer: The ring buffer to read from
3774 * @cpu: The cpu buffer to iterate over
3775 *
3776 * This performs the initial preparations necessary to iterate
3777 * through the buffer.  Memory is allocated, buffer recording
3778 * is disabled, and the iterator pointer is returned to the caller.
3779 *
3780 * Disabling buffer recordng prevents the reading from being
3781 * corrupted. This is not a consuming read, so a producer is not
3782 * expected.
3783 *
3784 * After a sequence of ring_buffer_read_prepare calls, the user is
3785 * expected to make at least one call to ring_buffer_prepare_sync.
3786 * Afterwards, ring_buffer_read_start is invoked to get things going
3787 * for real.
3788 *
3789 * This overall must be paired with ring_buffer_finish.
3790 */
3791struct ring_buffer_iter *
3792ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3793{
3794        struct ring_buffer_per_cpu *cpu_buffer;
3795        struct ring_buffer_iter *iter;
3796
3797        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3798                return NULL;
3799
3800        iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3801        if (!iter)
3802                return NULL;
3803
3804        cpu_buffer = buffer->buffers[cpu];
3805
3806        iter->cpu_buffer = cpu_buffer;
3807
3808        atomic_inc(&buffer->resize_disabled);
3809        atomic_inc(&cpu_buffer->record_disabled);
3810
3811        return iter;
3812}
3813EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3814
3815/**
3816 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3817 *
3818 * All previously invoked ring_buffer_read_prepare calls to prepare
3819 * iterators will be synchronized.  Afterwards, read_buffer_read_start
3820 * calls on those iterators are allowed.
3821 */
3822void
3823ring_buffer_read_prepare_sync(void)
3824{
3825        synchronize_sched();
3826}
3827EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3828
3829/**
3830 * ring_buffer_read_start - start a non consuming read of the buffer
3831 * @iter: The iterator returned by ring_buffer_read_prepare
3832 *
3833 * This finalizes the startup of an iteration through the buffer.
3834 * The iterator comes from a call to ring_buffer_read_prepare and
3835 * an intervening ring_buffer_read_prepare_sync must have been
3836 * performed.
3837 *
3838 * Must be paired with ring_buffer_finish.
3839 */
3840void
3841ring_buffer_read_start(struct ring_buffer_iter *iter)
3842{
3843        struct ring_buffer_per_cpu *cpu_buffer;
3844        unsigned long flags;
3845
3846        if (!iter)
3847                return;
3848
3849        cpu_buffer = iter->cpu_buffer;
3850
3851        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3852        arch_spin_lock(&cpu_buffer->lock);
3853        rb_iter_reset(iter);
3854        arch_spin_unlock(&cpu_buffer->lock);
3855        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3856}
3857EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3858
3859/**
3860 * ring_buffer_finish - finish reading the iterator of the buffer
3861 * @iter: The iterator retrieved by ring_buffer_start
3862 *
3863 * This re-enables the recording to the buffer, and frees the
3864 * iterator.
3865 */
3866void
3867ring_buffer_read_finish(struct ring_buffer_iter *iter)
3868{
3869        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3870        unsigned long flags;
3871
3872        /*
3873         * Ring buffer is disabled from recording, here's a good place
3874         * to check the integrity of the ring buffer.
3875         * Must prevent readers from trying to read, as the check
3876         * clears the HEAD page and readers require it.
3877         */
3878        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3879        rb_check_pages(cpu_buffer);
3880        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3881
3882        atomic_dec(&cpu_buffer->record_disabled);
3883        atomic_dec(&cpu_buffer->buffer->resize_disabled);
3884        kfree(iter);
3885}
3886EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3887
3888/**
3889 * ring_buffer_read - read the next item in the ring buffer by the iterator
3890 * @iter: The ring buffer iterator
3891 * @ts: The time stamp of the event read.
3892 *
3893 * This reads the next event in the ring buffer and increments the iterator.
3894 */
3895struct ring_buffer_event *
3896ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3897{
3898        struct ring_buffer_event *event;
3899        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3900        unsigned long flags;
3901
3902        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3903 again:
3904        event = rb_iter_peek(iter, ts);
3905        if (!event)
3906                goto out;
3907
3908        if (event->type_len == RINGBUF_TYPE_PADDING)
3909                goto again;
3910
3911        rb_advance_iter(iter);
3912 out:
3913        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3914
3915        return event;
3916}
3917EXPORT_SYMBOL_GPL(ring_buffer_read);
3918
3919/**
3920 * ring_buffer_size - return the size of the ring buffer (in bytes)
3921 * @buffer: The ring buffer.
3922 */
3923unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
3924{
3925        /*
3926         * Earlier, this method returned
3927         *      BUF_PAGE_SIZE * buffer->nr_pages
3928         * Since the nr_pages field is now removed, we have converted this to
3929         * return the per cpu buffer value.
3930         */
3931        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3932                return 0;
3933
3934        return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
3935}
3936EXPORT_SYMBOL_GPL(ring_buffer_size);
3937
3938static void
3939rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3940{
3941        rb_head_page_deactivate(cpu_buffer);
3942
3943        cpu_buffer->head_page
3944                = list_entry(cpu_buffer->pages, struct buffer_page, list);
3945        local_set(&cpu_buffer->head_page->write, 0);
3946        local_set(&cpu_buffer->head_page->entries, 0);
3947        local_set(&cpu_buffer->head_page->page->commit, 0);
3948
3949        cpu_buffer->head_page->read = 0;
3950
3951        cpu_buffer->tail_page = cpu_buffer->head_page;
3952        cpu_buffer->commit_page = cpu_buffer->head_page;
3953
3954        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3955        INIT_LIST_HEAD(&cpu_buffer->new_pages);
3956        local_set(&cpu_buffer->reader_page->write, 0);
3957        local_set(&cpu_buffer->reader_page->entries, 0);
3958        local_set(&cpu_buffer->reader_page->page->commit, 0);
3959        cpu_buffer->reader_page->read = 0;
3960
3961        local_set(&cpu_buffer->entries_bytes, 0);
3962        local_set(&cpu_buffer->overrun, 0);
3963        local_set(&cpu_buffer->commit_overrun, 0);
3964        local_set(&cpu_buffer->dropped_events, 0);
3965        local_set(&cpu_buffer->entries, 0);
3966        local_set(&cpu_buffer->committing, 0);
3967        local_set(&cpu_buffer->commits, 0);
3968        cpu_buffer->read = 0;
3969        cpu_buffer->read_bytes = 0;
3970
3971        cpu_buffer->write_stamp = 0;
3972        cpu_buffer->read_stamp = 0;
3973
3974        cpu_buffer->lost_events = 0;
3975        cpu_buffer->last_overrun = 0;
3976
3977        rb_head_page_activate(cpu_buffer);
3978}
3979
3980/**
3981 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3982 * @buffer: The ring buffer to reset a per cpu buffer of
3983 * @cpu: The CPU buffer to be reset
3984 */
3985void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3986{
3987        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3988        unsigned long flags;
3989
3990        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3991                return;
3992
3993        atomic_inc(&buffer->resize_disabled);
3994        atomic_inc(&cpu_buffer->record_disabled);
3995
3996        /* Make sure all commits have finished */
3997        synchronize_sched();
3998
3999        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4000
4001        if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4002                goto out;
4003
4004        arch_spin_lock(&cpu_buffer->lock);
4005
4006        rb_reset_cpu(cpu_buffer);
4007
4008        arch_spin_unlock(&cpu_buffer->lock);
4009
4010 out:
4011        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4012
4013        atomic_dec(&cpu_buffer->record_disabled);
4014        atomic_dec(&buffer->resize_disabled);
4015}
4016EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4017
4018/**
4019 * ring_buffer_reset - reset a ring buffer
4020 * @buffer: The ring buffer to reset all cpu buffers
4021 */
4022void ring_buffer_reset(struct ring_buffer *buffer)
4023{
4024        int cpu;
4025
4026        for_each_buffer_cpu(buffer, cpu)
4027                ring_buffer_reset_cpu(buffer, cpu);
4028}
4029EXPORT_SYMBOL_GPL(ring_buffer_reset);
4030
4031/**
4032 * rind_buffer_empty - is the ring buffer empty?
4033 * @buffer: The ring buffer to test
4034 */
4035int ring_buffer_empty(struct ring_buffer *buffer)
4036{
4037        struct ring_buffer_per_cpu *cpu_buffer;
4038        unsigned long flags;
4039        int dolock;
4040        int cpu;
4041        int ret;
4042
4043        dolock = rb_ok_to_lock();
4044
4045        /* yes this is racy, but if you don't like the race, lock the buffer */
4046        for_each_buffer_cpu(buffer, cpu) {
4047                cpu_buffer = buffer->buffers[cpu];
4048                local_irq_save(flags);
4049                if (dolock)
4050                        raw_spin_lock(&cpu_buffer->reader_lock);
4051                ret = rb_per_cpu_empty(cpu_buffer);
4052                if (dolock)
4053                        raw_spin_unlock(&cpu_buffer->reader_lock);
4054                local_irq_restore(flags);
4055
4056                if (!ret)
4057                        return 0;
4058        }
4059
4060        return 1;
4061}
4062EXPORT_SYMBOL_GPL(ring_buffer_empty);
4063
4064/**
4065 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4066 * @buffer: The ring buffer
4067 * @cpu: The CPU buffer to test
4068 */
4069int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4070{
4071        struct ring_buffer_per_cpu *cpu_buffer;
4072        unsigned long flags;
4073        int dolock;
4074        int ret;
4075
4076        if (!cpumask_test_cpu(cpu, buffer->cpumask))
4077                return 1;
4078
4079        dolock = rb_ok_to_lock();
4080
4081        cpu_buffer = buffer->buffers[cpu];
4082        local_irq_save(flags);
4083        if (dolock)
4084                raw_spin_lock(&cpu_buffer->reader_lock);
4085        ret = rb_per_cpu_empty(cpu_buffer);
4086        if (dolock)
4087                raw_spin_unlock(&cpu_buffer->reader_lock);
4088        local_irq_restore(flags);
4089
4090        return ret;
4091}
4092EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4093
4094#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4095/**
4096 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4097 * @buffer_a: One buffer to swap with
4098 * @buffer_b: The other buffer to swap with
4099 *
4100 * This function is useful for tracers that want to take a "snapshot"
4101 * of a CPU buffer and has another back up buffer lying around.
4102 * it is expected that the tracer handles the cpu buffer not being
4103 * used at the moment.
4104 */
4105int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4106                         struct ring_buffer *buffer_b, int cpu)
4107{
4108        struct ring_buffer_per_cpu *cpu_buffer_a;
4109        struct ring_buffer_per_cpu *cpu_buffer_b;
4110        int ret = -EINVAL;
4111
4112        if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4113            !cpumask_test_cpu(cpu, buffer_b->cpumask))
4114                goto out;
4115
4116        cpu_buffer_a = buffer_a->buffers[cpu];
4117        cpu_buffer_b = buffer_b->buffers[cpu];
4118
4119        /* At least make sure the two buffers are somewhat the same */
4120        if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4121                goto out;
4122
4123        ret = -EAGAIN;
4124
4125        if (ring_buffer_flags != RB_BUFFERS_ON)
4126                goto out;
4127
4128        if (atomic_read(&buffer_a->record_disabled))
4129                goto out;
4130
4131        if (atomic_read(&buffer_b->record_disabled))
4132                goto out;
4133
4134        if (atomic_read(&cpu_buffer_a->record_disabled))
4135                goto out;
4136
4137        if (atomic_read(&cpu_buffer_b->record_disabled))
4138                goto out;
4139
4140        /*
4141         * We can't do a synchronize_sched here because this
4142         * function can be called in atomic context.
4143         * Normally this will be called from the same CPU as cpu.
4144         * If not it's up to the caller to protect this.
4145         */
4146        atomic_inc(&cpu_buffer_a->record_disabled);
4147        atomic_inc(&cpu_buffer_b->record_disabled);
4148
4149        ret = -EBUSY;
4150        if (local_read(&cpu_buffer_a->committing))
4151                goto out_dec;
4152        if (local_read(&cpu_buffer_b->committing))
4153                goto out_dec;
4154
4155        buffer_a->buffers[cpu] = cpu_buffer_b;
4156        buffer_b->buffers[cpu] = cpu_buffer_a;
4157
4158        cpu_buffer_b->buffer = buffer_a;
4159        cpu_buffer_a->buffer = buffer_b;
4160
4161        ret = 0;
4162
4163out_dec:
4164        atomic_dec(&cpu_buffer_a->record_disabled);
4165        atomic_dec(&cpu_buffer_b->record_disabled);
4166out:
4167        return ret;
4168}
4169EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4170#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4171
4172/**
4173 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4174 * @buffer: the buffer to allocate for.
4175 *
4176 * This function is used in conjunction with ring_buffer_read_page.
4177 * When reading a full page from the ring buffer, these functions
4178 * can be used to speed up the process. The calling function should
4179 * allocate a few pages first with this function. Then when it
4180 * needs to get pages from the ring buffer, it passes the result
4181 * of this function into ring_buffer_read_page, which will swap
4182 * the page that was allocated, with the read page of the buffer.
4183 *
4184 * Returns:
4185 *  The page allocated, or NULL on error.
4186 */
4187void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4188{
4189        struct buffer_data_page *bpage;
4190        struct page *page;
4191
4192        page = alloc_pages_node(cpu_to_node(cpu),
4193                                GFP_KERNEL | __GFP_NORETRY, 0);
4194        if (!page)
4195                return NULL;
4196
4197        bpage = page_address(page);
4198
4199        rb_init_page(bpage);
4200
4201        return bpage;
4202}
4203EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4204
4205/**
4206 * ring_buffer_free_read_page - free an allocated read page
4207 * @buffer: the buffer the page was allocate for
4208 * @data: the page to free
4209 *
4210 * Free a page allocated from ring_buffer_alloc_read_page.
4211 */
4212void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4213{
4214        free_page((unsigned long)data);
4215}
4216EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4217
4218/**
4219 * ring_buffer_read_page - extract a page from the ring buffer
4220 * @buffer: buffer to extract from
4221 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4222 * @len: amount to extract
4223 * @cpu: the cpu of the buffer to extract
4224 * @full: should the extraction only happen when the page is full.
4225 *
4226 * This function will pull out a page from the ring buffer and consume it.
4227 * @data_page must be the address of the variable that was returned
4228 * from ring_buffer_alloc_read_page. This is because the page might be used
4229 * to swap with a page in the ring buffer.
4230 *
4231 * for example:
4232 *      rpage = ring_buffer_alloc_read_page(buffer);
4233 *      if (!rpage)
4234 *              return error;
4235 *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4236 *      if (ret >= 0)
4237 *              process_page(rpage, ret);
4238 *
4239 * When @full is set, the function will not return true unless
4240 * the writer is off the reader page.
4241 *
4242 * Note: it is up to the calling functions to handle sleeps and wakeups.
4243 *  The ring buffer can be used anywhere in the kernel and can not
4244 *  blindly call wake_up. The layer that uses the ring buffer must be
4245 *  responsible for that.
4246 *
4247 * Returns:
4248 *  >=0 if data has been transferred, returns the offset of consumed data.
4249 *  <0 if no data has been transferred.
4250 */
4251int ring_buffer_read_page(struct ring_buffer *buffer,
4252                          void **data_page, size_t len, int cpu, int full)
4253{
4254        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4255        struct ring_buffer_event *event;
4256        struct buffer_data_page *bpage;
4257        struct buffer_page *reader;
4258        unsigned long missed_events;
4259        unsigned long flags;
4260        unsigned int commit;
4261        unsigned int read;
4262        u64 save_timestamp;
4263        int ret = -1;
4264
4265        if (!cpumask_test_cpu(cpu, buffer->cpumask))
4266                goto out;
4267
4268        /*
4269         * If len is not big enough to hold the page header, then
4270         * we can not copy anything.
4271         */
4272        if (len <= BUF_PAGE_HDR_SIZE)
4273                goto out;
4274
4275        len -= BUF_PAGE_HDR_SIZE;
4276
4277        if (!data_page)
4278                goto out;
4279
4280        bpage = *data_page;
4281        if (!bpage)
4282                goto out;
4283
4284        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4285
4286        reader = rb_get_reader_page(cpu_buffer);
4287        if (!reader)
4288                goto out_unlock;
4289
4290        event = rb_reader_event(cpu_buffer);
4291
4292        read = reader->read;
4293        commit = rb_page_commit(reader);
4294
4295        /* Check if any events were dropped */
4296        missed_events = cpu_buffer->lost_events;
4297
4298        /*
4299         * If this page has been partially read or
4300         * if len is not big enough to read the rest of the page or
4301         * a writer is still on the page, then
4302         * we must copy the data from the page to the buffer.
4303         * Otherwise, we can simply swap the page with the one passed in.
4304         */
4305        if (read || (len < (commit - read)) ||
4306            cpu_buffer->reader_page == cpu_buffer->commit_page) {
4307                struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4308                unsigned int rpos = read;
4309                unsigned int pos = 0;
4310                unsigned int size;
4311
4312                if (full)
4313                        goto out_unlock;
4314
4315                if (len > (commit - read))
4316                        len = (commit - read);
4317
4318                /* Always keep the time extend and data together */
4319                size = rb_event_ts_length(event);
4320
4321                if (len < size)
4322                        goto out_unlock;
4323
4324                /* save the current timestamp, since the user will need it */
4325                save_timestamp = cpu_buffer->read_stamp;
4326
4327                /* Need to copy one event at a time */
4328                do {
4329                        /* We need the size of one event, because
4330                         * rb_advance_reader only advances by one event,
4331                         * whereas rb_event_ts_length may include the size of
4332                         * one or two events.
4333                         * We have already ensured there's enough space if this
4334                         * is a time extend. */
4335                        size = rb_event_length(event);
4336                        memcpy(bpage->data + pos, rpage->data + rpos, size);
4337
4338                        len -= size;
4339
4340                        rb_advance_reader(cpu_buffer);
4341                        rpos = reader->read;
4342                        pos += size;
4343
4344                        if (rpos >= commit)
4345                                break;
4346
4347                        event = rb_reader_event(cpu_buffer);
4348                        /* Always keep the time extend and data together */
4349                        size = rb_event_ts_length(event);
4350                } while (len >= size);
4351
4352                /* update bpage */
4353                local_set(&bpage->commit, pos);
4354                bpage->time_stamp = save_timestamp;
4355
4356                /* we copied everything to the beginning */
4357                read = 0;
4358        } else {
4359                /* update the entry counter */
4360                cpu_buffer->read += rb_page_entries(reader);
4361                cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4362
4363                /* swap the pages */
4364                rb_init_page(bpage);
4365                bpage = reader->page;
4366                reader->page = *data_page;
4367                local_set(&reader->write, 0);
4368                local_set(&reader->entries, 0);
4369                reader->read = 0;
4370                *data_page = bpage;
4371
4372                /*
4373                 * Use the real_end for the data size,
4374                 * This gives us a chance to store the lost events
4375                 * on the page.
4376                 */
4377                if (reader->real_end)
4378                        local_set(&bpage->commit, reader->real_end);
4379        }
4380        ret = read;
4381
4382        cpu_buffer->lost_events = 0;
4383
4384        commit = local_read(&bpage->commit);
4385        /*
4386         * Set a flag in the commit field if we lost events
4387         */
4388        if (missed_events) {
4389                /* If there is room at the end of the page to save the
4390                 * missed events, then record it there.
4391                 */
4392                if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4393                        memcpy(&bpage->data[commit], &missed_events,
4394                               sizeof(missed_events));
4395                        local_add(RB_MISSED_STORED, &bpage->commit);
4396                        commit += sizeof(missed_events);
4397                }
4398                local_add(RB_MISSED_EVENTS, &bpage->commit);
4399        }
4400
4401        /*
4402         * This page may be off to user land. Zero it out here.
4403         */
4404        if (commit < BUF_PAGE_SIZE)
4405                memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4406
4407 out_unlock:
4408        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4409
4410 out:
4411        return ret;
4412}
4413EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4414
4415#ifdef CONFIG_HOTPLUG_CPU
4416static int rb_cpu_notify(struct notifier_block *self,
4417                         unsigned long action, void *hcpu)
4418{
4419        struct ring_buffer *buffer =
4420                container_of(self, struct ring_buffer, cpu_notify);
4421        long cpu = (long)hcpu;
4422        int cpu_i, nr_pages_same;
4423        unsigned int nr_pages;
4424
4425        switch (action) {
4426        case CPU_UP_PREPARE:
4427        case CPU_UP_PREPARE_FROZEN:
4428                if (cpumask_test_cpu(cpu, buffer->cpumask))
4429                        return NOTIFY_OK;
4430
4431                nr_pages = 0;
4432                nr_pages_same = 1;
4433                /* check if all cpu sizes are same */
4434                for_each_buffer_cpu(buffer, cpu_i) {
4435                        /* fill in the size from first enabled cpu */
4436                        if (nr_pages == 0)
4437                                nr_pages = buffer->buffers[cpu_i]->nr_pages;
4438                        if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4439                                nr_pages_same = 0;
4440                                break;
4441                        }
4442                }
4443                /* allocate minimum pages, user can later expand it */
4444                if (!nr_pages_same)
4445                        nr_pages = 2;
4446                buffer->buffers[cpu] =
4447                        rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4448                if (!buffer->buffers[cpu]) {
4449                        WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4450                             cpu);
4451                        return NOTIFY_OK;
4452                }
4453                smp_wmb();
4454                cpumask_set_cpu(cpu, buffer->cpumask);
4455                break;
4456        case CPU_DOWN_PREPARE:
4457        case CPU_DOWN_PREPARE_FROZEN:
4458                /*
4459                 * Do nothing.
4460                 *  If we were to free the buffer, then the user would
4461                 *  lose any trace that was in the buffer.
4462                 */
4463                break;
4464        default:
4465                break;
4466        }
4467        return NOTIFY_OK;
4468}
4469#endif
4470
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.