linux/drivers/hv/ring_buffer.c
<<
>>
Prefs
   1/*
   2 *
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16 * Place - Suite 330, Boston, MA 02111-1307 USA.
  17 *
  18 * Authors:
  19 *   Haiyang Zhang <haiyangz@microsoft.com>
  20 *   Hank Janssen  <hjanssen@microsoft.com>
  21 *   K. Y. Srinivasan <kys@microsoft.com>
  22 *
  23 */
  24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25
  26#include <linux/kernel.h>
  27#include <linux/mm.h>
  28#include <linux/hyperv.h>
  29
  30#include "hyperv_vmbus.h"
  31
  32void hv_begin_read(struct hv_ring_buffer_info *rbi)
  33{
  34        rbi->ring_buffer->interrupt_mask = 1;
  35        smp_mb();
  36}
  37
  38u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  39{
  40        u32 read;
  41        u32 write;
  42
  43        rbi->ring_buffer->interrupt_mask = 0;
  44        smp_mb();
  45
  46        /*
  47         * Now check to see if the ring buffer is still empty.
  48         * If it is not, we raced and we need to process new
  49         * incoming messages.
  50         */
  51        hv_get_ringbuffer_availbytes(rbi, &read, &write);
  52
  53        return read;
  54}
  55
  56/*
  57 * When we write to the ring buffer, check if the host needs to
  58 * be signaled. Here is the details of this protocol:
  59 *
  60 *      1. The host guarantees that while it is draining the
  61 *         ring buffer, it will set the interrupt_mask to
  62 *         indicate it does not need to be interrupted when
  63 *         new data is placed.
  64 *
  65 *      2. The host guarantees that it will completely drain
  66 *         the ring buffer before exiting the read loop. Further,
  67 *         once the ring buffer is empty, it will clear the
  68 *         interrupt_mask and re-check to see if new data has
  69 *         arrived.
  70 */
  71
  72static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  73{
  74        smp_mb();
  75        if (rbi->ring_buffer->interrupt_mask)
  76                return false;
  77
  78        /*
  79         * This is the only case we need to signal when the
  80         * ring transitions from being empty to non-empty.
  81         */
  82        if (old_write == rbi->ring_buffer->read_index)
  83                return true;
  84
  85        return false;
  86}
  87
  88/*
  89 * To optimize the flow management on the send-side,
  90 * when the sender is blocked because of lack of
  91 * sufficient space in the ring buffer, potential the
  92 * consumer of the ring buffer can signal the producer.
  93 * This is controlled by the following parameters:
  94 *
  95 * 1. pending_send_sz: This is the size in bytes that the
  96 *    producer is trying to send.
  97 * 2. The feature bit feat_pending_send_sz set to indicate if
  98 *    the consumer of the ring will signal when the ring
  99 *    state transitions from being full to a state where
 100 *    there is room for the producer to send the pending packet.
 101 */
 102
 103static bool hv_need_to_signal_on_read(u32 old_rd,
 104                                         struct hv_ring_buffer_info *rbi)
 105{
 106        u32 prev_write_sz;
 107        u32 cur_write_sz;
 108        u32 r_size;
 109        u32 write_loc = rbi->ring_buffer->write_index;
 110        u32 read_loc = rbi->ring_buffer->read_index;
 111        u32 pending_sz = rbi->ring_buffer->pending_send_sz;
 112
 113        /*
 114         * If the other end is not blocked on write don't bother.
 115         */
 116        if (pending_sz == 0)
 117                return false;
 118
 119        r_size = rbi->ring_datasize;
 120        cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
 121                        read_loc - write_loc;
 122
 123        prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
 124                        old_rd - write_loc;
 125
 126
 127        if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
 128                return true;
 129
 130        return false;
 131}
 132
 133/*
 134 * hv_get_next_write_location()
 135 *
 136 * Get the next write location for the specified ring buffer
 137 *
 138 */
 139static inline u32
 140hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
 141{
 142        u32 next = ring_info->ring_buffer->write_index;
 143
 144        return next;
 145}
 146
 147/*
 148 * hv_set_next_write_location()
 149 *
 150 * Set the next write location for the specified ring buffer
 151 *
 152 */
 153static inline void
 154hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
 155                     u32 next_write_location)
 156{
 157        ring_info->ring_buffer->write_index = next_write_location;
 158}
 159
 160/*
 161 * hv_get_next_read_location()
 162 *
 163 * Get the next read location for the specified ring buffer
 164 */
 165static inline u32
 166hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
 167{
 168        u32 next = ring_info->ring_buffer->read_index;
 169
 170        return next;
 171}
 172
 173/*
 174 * hv_get_next_readlocation_withoffset()
 175 *
 176 * Get the next read location + offset for the specified ring buffer.
 177 * This allows the caller to skip
 178 */
 179static inline u32
 180hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
 181                                 u32 offset)
 182{
 183        u32 next = ring_info->ring_buffer->read_index;
 184
 185        next += offset;
 186        next %= ring_info->ring_datasize;
 187
 188        return next;
 189}
 190
 191/*
 192 *
 193 * hv_set_next_read_location()
 194 *
 195 * Set the next read location for the specified ring buffer
 196 *
 197 */
 198static inline void
 199hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
 200                    u32 next_read_location)
 201{
 202        ring_info->ring_buffer->read_index = next_read_location;
 203}
 204
 205
 206/*
 207 *
 208 * hv_get_ring_buffer()
 209 *
 210 * Get the start of the ring buffer
 211 */
 212static inline void *
 213hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
 214{
 215        return (void *)ring_info->ring_buffer->buffer;
 216}
 217
 218
 219/*
 220 *
 221 * hv_get_ring_buffersize()
 222 *
 223 * Get the size of the ring buffer
 224 */
 225static inline u32
 226hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
 227{
 228        return ring_info->ring_datasize;
 229}
 230
 231/*
 232 *
 233 * hv_get_ring_bufferindices()
 234 *
 235 * Get the read and write indices as u64 of the specified ring buffer
 236 *
 237 */
 238static inline u64
 239hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
 240{
 241        return (u64)ring_info->ring_buffer->write_index << 32;
 242}
 243
 244/*
 245 *
 246 * hv_copyfrom_ringbuffer()
 247 *
 248 * Helper routine to copy to source from ring buffer.
 249 * Assume there is enough room. Handles wrap-around in src case only!!
 250 *
 251 */
 252static u32 hv_copyfrom_ringbuffer(
 253        struct hv_ring_buffer_info      *ring_info,
 254        void                            *dest,
 255        u32                             destlen,
 256        u32                             start_read_offset)
 257{
 258        void *ring_buffer = hv_get_ring_buffer(ring_info);
 259        u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
 260
 261        u32 frag_len;
 262
 263        /* wrap-around detected at the src */
 264        if (destlen > ring_buffer_size - start_read_offset) {
 265                frag_len = ring_buffer_size - start_read_offset;
 266
 267                memcpy(dest, ring_buffer + start_read_offset, frag_len);
 268                memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
 269        } else
 270
 271                memcpy(dest, ring_buffer + start_read_offset, destlen);
 272
 273
 274        start_read_offset += destlen;
 275        start_read_offset %= ring_buffer_size;
 276
 277        return start_read_offset;
 278}
 279
 280
 281/*
 282 *
 283 * hv_copyto_ringbuffer()
 284 *
 285 * Helper routine to copy from source to ring buffer.
 286 * Assume there is enough room. Handles wrap-around in dest case only!!
 287 *
 288 */
 289static u32 hv_copyto_ringbuffer(
 290        struct hv_ring_buffer_info      *ring_info,
 291        u32                             start_write_offset,
 292        void                            *src,
 293        u32                             srclen)
 294{
 295        void *ring_buffer = hv_get_ring_buffer(ring_info);
 296        u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
 297        u32 frag_len;
 298
 299        /* wrap-around detected! */
 300        if (srclen > ring_buffer_size - start_write_offset) {
 301                frag_len = ring_buffer_size - start_write_offset;
 302                memcpy(ring_buffer + start_write_offset, src, frag_len);
 303                memcpy(ring_buffer, src + frag_len, srclen - frag_len);
 304        } else
 305                memcpy(ring_buffer + start_write_offset, src, srclen);
 306
 307        start_write_offset += srclen;
 308        start_write_offset %= ring_buffer_size;
 309
 310        return start_write_offset;
 311}
 312
 313/*
 314 *
 315 * hv_ringbuffer_get_debuginfo()
 316 *
 317 * Get various debug metrics for the specified ring buffer
 318 *
 319 */
 320void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
 321                            struct hv_ring_buffer_debug_info *debug_info)
 322{
 323        u32 bytes_avail_towrite;
 324        u32 bytes_avail_toread;
 325
 326        if (ring_info->ring_buffer) {
 327                hv_get_ringbuffer_availbytes(ring_info,
 328                                        &bytes_avail_toread,
 329                                        &bytes_avail_towrite);
 330
 331                debug_info->bytes_avail_toread = bytes_avail_toread;
 332                debug_info->bytes_avail_towrite = bytes_avail_towrite;
 333                debug_info->current_read_index =
 334                        ring_info->ring_buffer->read_index;
 335                debug_info->current_write_index =
 336                        ring_info->ring_buffer->write_index;
 337                debug_info->current_interrupt_mask =
 338                        ring_info->ring_buffer->interrupt_mask;
 339        }
 340}
 341
 342/*
 343 *
 344 * hv_ringbuffer_init()
 345 *
 346 *Initialize the ring buffer
 347 *
 348 */
 349int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 350                   void *buffer, u32 buflen)
 351{
 352        if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
 353                return -EINVAL;
 354
 355        memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
 356
 357        ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
 358        ring_info->ring_buffer->read_index =
 359                ring_info->ring_buffer->write_index = 0;
 360
 361        ring_info->ring_size = buflen;
 362        ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
 363
 364        spin_lock_init(&ring_info->ring_lock);
 365
 366        return 0;
 367}
 368
 369/*
 370 *
 371 * hv_ringbuffer_cleanup()
 372 *
 373 * Cleanup the ring buffer
 374 *
 375 */
 376void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 377{
 378}
 379
 380/*
 381 *
 382 * hv_ringbuffer_write()
 383 *
 384 * Write to the ring buffer
 385 *
 386 */
 387int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 388                    struct scatterlist *sglist, u32 sgcount, bool *signal)
 389{
 390        int i = 0;
 391        u32 bytes_avail_towrite;
 392        u32 bytes_avail_toread;
 393        u32 totalbytes_towrite = 0;
 394
 395        struct scatterlist *sg;
 396        u32 next_write_location;
 397        u32 old_write;
 398        u64 prev_indices = 0;
 399        unsigned long flags;
 400
 401        for_each_sg(sglist, sg, sgcount, i)
 402        {
 403                totalbytes_towrite += sg->length;
 404        }
 405
 406        totalbytes_towrite += sizeof(u64);
 407
 408        spin_lock_irqsave(&outring_info->ring_lock, flags);
 409
 410        hv_get_ringbuffer_availbytes(outring_info,
 411                                &bytes_avail_toread,
 412                                &bytes_avail_towrite);
 413
 414
 415        /* If there is only room for the packet, assume it is full. */
 416        /* Otherwise, the next time around, we think the ring buffer */
 417        /* is empty since the read index == write index */
 418        if (bytes_avail_towrite <= totalbytes_towrite) {
 419                spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 420                return -EAGAIN;
 421        }
 422
 423        /* Write to the ring buffer */
 424        next_write_location = hv_get_next_write_location(outring_info);
 425
 426        old_write = next_write_location;
 427
 428        for_each_sg(sglist, sg, sgcount, i)
 429        {
 430                next_write_location = hv_copyto_ringbuffer(outring_info,
 431                                                     next_write_location,
 432                                                     sg_virt(sg),
 433                                                     sg->length);
 434        }
 435
 436        /* Set previous packet start */
 437        prev_indices = hv_get_ring_bufferindices(outring_info);
 438
 439        next_write_location = hv_copyto_ringbuffer(outring_info,
 440                                             next_write_location,
 441                                             &prev_indices,
 442                                             sizeof(u64));
 443
 444        /* Issue a full memory barrier before updating the write index */
 445        smp_mb();
 446
 447        /* Now, update the write location */
 448        hv_set_next_write_location(outring_info, next_write_location);
 449
 450
 451        spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 452
 453        *signal = hv_need_to_signal(old_write, outring_info);
 454        return 0;
 455}
 456
 457
 458/*
 459 *
 460 * hv_ringbuffer_peek()
 461 *
 462 * Read without advancing the read index
 463 *
 464 */
 465int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
 466                   void *Buffer, u32 buflen)
 467{
 468        u32 bytes_avail_towrite;
 469        u32 bytes_avail_toread;
 470        u32 next_read_location = 0;
 471        unsigned long flags;
 472
 473        spin_lock_irqsave(&Inring_info->ring_lock, flags);
 474
 475        hv_get_ringbuffer_availbytes(Inring_info,
 476                                &bytes_avail_toread,
 477                                &bytes_avail_towrite);
 478
 479        /* Make sure there is something to read */
 480        if (bytes_avail_toread < buflen) {
 481
 482                spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
 483
 484                return -EAGAIN;
 485        }
 486
 487        /* Convert to byte offset */
 488        next_read_location = hv_get_next_read_location(Inring_info);
 489
 490        next_read_location = hv_copyfrom_ringbuffer(Inring_info,
 491                                                Buffer,
 492                                                buflen,
 493                                                next_read_location);
 494
 495        spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
 496
 497        return 0;
 498}
 499
 500
 501/*
 502 *
 503 * hv_ringbuffer_read()
 504 *
 505 * Read and advance the read index
 506 *
 507 */
 508int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
 509                   u32 buflen, u32 offset, bool *signal)
 510{
 511        u32 bytes_avail_towrite;
 512        u32 bytes_avail_toread;
 513        u32 next_read_location = 0;
 514        u64 prev_indices = 0;
 515        unsigned long flags;
 516        u32 old_read;
 517
 518        if (buflen <= 0)
 519                return -EINVAL;
 520
 521        spin_lock_irqsave(&inring_info->ring_lock, flags);
 522
 523        hv_get_ringbuffer_availbytes(inring_info,
 524                                &bytes_avail_toread,
 525                                &bytes_avail_towrite);
 526
 527        old_read = bytes_avail_toread;
 528
 529        /* Make sure there is something to read */
 530        if (bytes_avail_toread < buflen) {
 531                spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 532
 533                return -EAGAIN;
 534        }
 535
 536        next_read_location =
 537                hv_get_next_readlocation_withoffset(inring_info, offset);
 538
 539        next_read_location = hv_copyfrom_ringbuffer(inring_info,
 540                                                buffer,
 541                                                buflen,
 542                                                next_read_location);
 543
 544        next_read_location = hv_copyfrom_ringbuffer(inring_info,
 545                                                &prev_indices,
 546                                                sizeof(u64),
 547                                                next_read_location);
 548
 549        /* Make sure all reads are done before we update the read index since */
 550        /* the writer may start writing to the read area once the read index */
 551        /*is updated */
 552        smp_mb();
 553
 554        /* Update the read index */
 555        hv_set_next_read_location(inring_info, next_read_location);
 556
 557        spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 558
 559        *signal = hv_need_to_signal_on_read(old_read, inring_info);
 560
 561        return 0;
 562}
 563
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.