linux/drivers/hv/channel.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009, Microsoft Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15 * Place - Suite 330, Boston, MA 02111-1307 USA.
  16 *
  17 * Authors:
  18 *   Haiyang Zhang <haiyangz@microsoft.com>
  19 *   Hank Janssen  <hjanssen@microsoft.com>
  20 */
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/kernel.h>
  24#include <linux/sched.h>
  25#include <linux/wait.h>
  26#include <linux/mm.h>
  27#include <linux/slab.h>
  28#include <linux/module.h>
  29#include <linux/hyperv.h>
  30
  31#include "hyperv_vmbus.h"
  32
  33#define NUM_PAGES_SPANNED(addr, len) \
  34((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
  35
  36/* Internal routines */
  37static int create_gpadl_header(
  38        void *kbuffer,  /* must be phys and virt contiguous */
  39        u32 size,       /* page-size multiple */
  40        struct vmbus_channel_msginfo **msginfo,
  41        u32 *messagecount);
  42static void vmbus_setevent(struct vmbus_channel *channel);
  43
  44/*
  45 * vmbus_setevent- Trigger an event notification on the specified
  46 * channel.
  47 */
  48static void vmbus_setevent(struct vmbus_channel *channel)
  49{
  50        struct hv_monitor_page *monitorpage;
  51
  52        if (channel->offermsg.monitor_allocated) {
  53                /* Each u32 represents 32 channels */
  54                sync_set_bit(channel->offermsg.child_relid & 31,
  55                        (unsigned long *) vmbus_connection.send_int_page +
  56                        (channel->offermsg.child_relid >> 5));
  57
  58                monitorpage = vmbus_connection.monitor_pages;
  59                monitorpage++; /* Get the child to parent monitor page */
  60
  61                sync_set_bit(channel->monitor_bit,
  62                        (unsigned long *)&monitorpage->trigger_group
  63                                        [channel->monitor_grp].pending);
  64
  65        } else {
  66                vmbus_set_event(channel->offermsg.child_relid);
  67        }
  68}
  69
  70/*
  71 * vmbus_get_debug_info -Retrieve various channel debug info
  72 */
  73void vmbus_get_debug_info(struct vmbus_channel *channel,
  74                              struct vmbus_channel_debug_info *debuginfo)
  75{
  76        struct hv_monitor_page *monitorpage;
  77        u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
  78        u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
  79
  80        debuginfo->relid = channel->offermsg.child_relid;
  81        debuginfo->state = channel->state;
  82        memcpy(&debuginfo->interfacetype,
  83               &channel->offermsg.offer.if_type, sizeof(uuid_le));
  84        memcpy(&debuginfo->interface_instance,
  85               &channel->offermsg.offer.if_instance,
  86               sizeof(uuid_le));
  87
  88        monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
  89
  90        debuginfo->monitorid = channel->offermsg.monitorid;
  91
  92        debuginfo->servermonitor_pending =
  93                        monitorpage->trigger_group[monitor_group].pending;
  94        debuginfo->servermonitor_latency =
  95                        monitorpage->latency[monitor_group][monitor_offset];
  96        debuginfo->servermonitor_connectionid =
  97                        monitorpage->parameter[monitor_group]
  98                                        [monitor_offset].connectionid.u.id;
  99
 100        monitorpage++;
 101
 102        debuginfo->clientmonitor_pending =
 103                        monitorpage->trigger_group[monitor_group].pending;
 104        debuginfo->clientmonitor_latency =
 105                        monitorpage->latency[monitor_group][monitor_offset];
 106        debuginfo->clientmonitor_connectionid =
 107                        monitorpage->parameter[monitor_group]
 108                                        [monitor_offset].connectionid.u.id;
 109
 110        hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
 111        hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
 112}
 113
 114/*
 115 * vmbus_open - Open the specified channel.
 116 */
 117int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 118                     u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
 119                     void (*onchannelcallback)(void *context), void *context)
 120{
 121        struct vmbus_channel_open_channel *open_msg;
 122        struct vmbus_channel_msginfo *open_info = NULL;
 123        void *in, *out;
 124        unsigned long flags;
 125        int ret, t, err = 0;
 126
 127        newchannel->onchannel_callback = onchannelcallback;
 128        newchannel->channel_callback_context = context;
 129
 130        /* Allocate the ring buffer */
 131        out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
 132                get_order(send_ringbuffer_size + recv_ringbuffer_size));
 133
 134        if (!out)
 135                return -ENOMEM;
 136
 137
 138        in = (void *)((unsigned long)out + send_ringbuffer_size);
 139
 140        newchannel->ringbuffer_pages = out;
 141        newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
 142                                           recv_ringbuffer_size) >> PAGE_SHIFT;
 143
 144        ret = hv_ringbuffer_init(
 145                &newchannel->outbound, out, send_ringbuffer_size);
 146
 147        if (ret != 0) {
 148                err = ret;
 149                goto error0;
 150        }
 151
 152        ret = hv_ringbuffer_init(
 153                &newchannel->inbound, in, recv_ringbuffer_size);
 154        if (ret != 0) {
 155                err = ret;
 156                goto error0;
 157        }
 158
 159
 160        /* Establish the gpadl for the ring buffer */
 161        newchannel->ringbuffer_gpadlhandle = 0;
 162
 163        ret = vmbus_establish_gpadl(newchannel,
 164                                         newchannel->outbound.ring_buffer,
 165                                         send_ringbuffer_size +
 166                                         recv_ringbuffer_size,
 167                                         &newchannel->ringbuffer_gpadlhandle);
 168
 169        if (ret != 0) {
 170                err = ret;
 171                goto error0;
 172        }
 173
 174        /* Create and init the channel open message */
 175        open_info = kmalloc(sizeof(*open_info) +
 176                           sizeof(struct vmbus_channel_open_channel),
 177                           GFP_KERNEL);
 178        if (!open_info) {
 179                err = -ENOMEM;
 180                goto error0;
 181        }
 182
 183        init_completion(&open_info->waitevent);
 184
 185        open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
 186        open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
 187        open_msg->openid = newchannel->offermsg.child_relid;
 188        open_msg->child_relid = newchannel->offermsg.child_relid;
 189        open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
 190        open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
 191                                                  PAGE_SHIFT;
 192        open_msg->server_contextarea_gpadlhandle = 0;
 193
 194        if (userdatalen > MAX_USER_DEFINED_BYTES) {
 195                err = -EINVAL;
 196                goto error0;
 197        }
 198
 199        if (userdatalen)
 200                memcpy(open_msg->userdata, userdata, userdatalen);
 201
 202        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 203        list_add_tail(&open_info->msglistentry,
 204                      &vmbus_connection.chn_msg_list);
 205        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 206
 207        ret = vmbus_post_msg(open_msg,
 208                               sizeof(struct vmbus_channel_open_channel));
 209
 210        if (ret != 0)
 211                goto error1;
 212
 213        t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
 214        if (t == 0) {
 215                err = -ETIMEDOUT;
 216                goto error1;
 217        }
 218
 219
 220        if (open_info->response.open_result.status)
 221                err = open_info->response.open_result.status;
 222
 223        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 224        list_del(&open_info->msglistentry);
 225        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 226
 227        kfree(open_info);
 228        return err;
 229
 230error1:
 231        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 232        list_del(&open_info->msglistentry);
 233        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 234
 235error0:
 236        free_pages((unsigned long)out,
 237                get_order(send_ringbuffer_size + recv_ringbuffer_size));
 238        kfree(open_info);
 239        return err;
 240}
 241EXPORT_SYMBOL_GPL(vmbus_open);
 242
 243/*
 244 * create_gpadl_header - Creates a gpadl for the specified buffer
 245 */
 246static int create_gpadl_header(void *kbuffer, u32 size,
 247                                         struct vmbus_channel_msginfo **msginfo,
 248                                         u32 *messagecount)
 249{
 250        int i;
 251        int pagecount;
 252        unsigned long long pfn;
 253        struct vmbus_channel_gpadl_header *gpadl_header;
 254        struct vmbus_channel_gpadl_body *gpadl_body;
 255        struct vmbus_channel_msginfo *msgheader;
 256        struct vmbus_channel_msginfo *msgbody = NULL;
 257        u32 msgsize;
 258
 259        int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 260
 261        pagecount = size >> PAGE_SHIFT;
 262        pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
 263
 264        /* do we need a gpadl body msg */
 265        pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 266                  sizeof(struct vmbus_channel_gpadl_header) -
 267                  sizeof(struct gpa_range);
 268        pfncount = pfnsize / sizeof(u64);
 269
 270        if (pagecount > pfncount) {
 271                /* we need a gpadl body */
 272                /* fill in the header */
 273                msgsize = sizeof(struct vmbus_channel_msginfo) +
 274                          sizeof(struct vmbus_channel_gpadl_header) +
 275                          sizeof(struct gpa_range) + pfncount * sizeof(u64);
 276                msgheader =  kzalloc(msgsize, GFP_KERNEL);
 277                if (!msgheader)
 278                        goto nomem;
 279
 280                INIT_LIST_HEAD(&msgheader->submsglist);
 281                msgheader->msgsize = msgsize;
 282
 283                gpadl_header = (struct vmbus_channel_gpadl_header *)
 284                        msgheader->msg;
 285                gpadl_header->rangecount = 1;
 286                gpadl_header->range_buflen = sizeof(struct gpa_range) +
 287                                         pagecount * sizeof(u64);
 288                gpadl_header->range[0].byte_offset = 0;
 289                gpadl_header->range[0].byte_count = size;
 290                for (i = 0; i < pfncount; i++)
 291                        gpadl_header->range[0].pfn_array[i] = pfn+i;
 292                *msginfo = msgheader;
 293                *messagecount = 1;
 294
 295                pfnsum = pfncount;
 296                pfnleft = pagecount - pfncount;
 297
 298                /* how many pfns can we fit */
 299                pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 300                          sizeof(struct vmbus_channel_gpadl_body);
 301                pfncount = pfnsize / sizeof(u64);
 302
 303                /* fill in the body */
 304                while (pfnleft) {
 305                        if (pfnleft > pfncount)
 306                                pfncurr = pfncount;
 307                        else
 308                                pfncurr = pfnleft;
 309
 310                        msgsize = sizeof(struct vmbus_channel_msginfo) +
 311                                  sizeof(struct vmbus_channel_gpadl_body) +
 312                                  pfncurr * sizeof(u64);
 313                        msgbody = kzalloc(msgsize, GFP_KERNEL);
 314
 315                        if (!msgbody) {
 316                                struct vmbus_channel_msginfo *pos = NULL;
 317                                struct vmbus_channel_msginfo *tmp = NULL;
 318                                /*
 319                                 * Free up all the allocated messages.
 320                                 */
 321                                list_for_each_entry_safe(pos, tmp,
 322                                        &msgheader->submsglist,
 323                                        msglistentry) {
 324
 325                                        list_del(&pos->msglistentry);
 326                                        kfree(pos);
 327                                }
 328
 329                                goto nomem;
 330                        }
 331
 332                        msgbody->msgsize = msgsize;
 333                        (*messagecount)++;
 334                        gpadl_body =
 335                                (struct vmbus_channel_gpadl_body *)msgbody->msg;
 336
 337                        /*
 338                         * Gpadl is u32 and we are using a pointer which could
 339                         * be 64-bit
 340                         * This is governed by the guest/host protocol and
 341                         * so the hypervisor gurantees that this is ok.
 342                         */
 343                        for (i = 0; i < pfncurr; i++)
 344                                gpadl_body->pfn[i] = pfn + pfnsum + i;
 345
 346                        /* add to msg header */
 347                        list_add_tail(&msgbody->msglistentry,
 348                                      &msgheader->submsglist);
 349                        pfnsum += pfncurr;
 350                        pfnleft -= pfncurr;
 351                }
 352        } else {
 353                /* everything fits in a header */
 354                msgsize = sizeof(struct vmbus_channel_msginfo) +
 355                          sizeof(struct vmbus_channel_gpadl_header) +
 356                          sizeof(struct gpa_range) + pagecount * sizeof(u64);
 357                msgheader = kzalloc(msgsize, GFP_KERNEL);
 358                if (msgheader == NULL)
 359                        goto nomem;
 360                msgheader->msgsize = msgsize;
 361
 362                gpadl_header = (struct vmbus_channel_gpadl_header *)
 363                        msgheader->msg;
 364                gpadl_header->rangecount = 1;
 365                gpadl_header->range_buflen = sizeof(struct gpa_range) +
 366                                         pagecount * sizeof(u64);
 367                gpadl_header->range[0].byte_offset = 0;
 368                gpadl_header->range[0].byte_count = size;
 369                for (i = 0; i < pagecount; i++)
 370                        gpadl_header->range[0].pfn_array[i] = pfn+i;
 371
 372                *msginfo = msgheader;
 373                *messagecount = 1;
 374        }
 375
 376        return 0;
 377nomem:
 378        kfree(msgheader);
 379        kfree(msgbody);
 380        return -ENOMEM;
 381}
 382
 383/*
 384 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
 385 *
 386 * @channel: a channel
 387 * @kbuffer: from kmalloc
 388 * @size: page-size multiple
 389 * @gpadl_handle: some funky thing
 390 */
 391int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
 392                               u32 size, u32 *gpadl_handle)
 393{
 394        struct vmbus_channel_gpadl_header *gpadlmsg;
 395        struct vmbus_channel_gpadl_body *gpadl_body;
 396        struct vmbus_channel_msginfo *msginfo = NULL;
 397        struct vmbus_channel_msginfo *submsginfo;
 398        u32 msgcount;
 399        struct list_head *curr;
 400        u32 next_gpadl_handle;
 401        unsigned long flags;
 402        int ret = 0;
 403        int t;
 404
 405        next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
 406        atomic_inc(&vmbus_connection.next_gpadl_handle);
 407
 408        ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
 409        if (ret)
 410                return ret;
 411
 412        init_completion(&msginfo->waitevent);
 413
 414        gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
 415        gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
 416        gpadlmsg->child_relid = channel->offermsg.child_relid;
 417        gpadlmsg->gpadl = next_gpadl_handle;
 418
 419
 420        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 421        list_add_tail(&msginfo->msglistentry,
 422                      &vmbus_connection.chn_msg_list);
 423
 424        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 425
 426        ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
 427                               sizeof(*msginfo));
 428        if (ret != 0)
 429                goto cleanup;
 430
 431        if (msgcount > 1) {
 432                list_for_each(curr, &msginfo->submsglist) {
 433
 434                        submsginfo = (struct vmbus_channel_msginfo *)curr;
 435                        gpadl_body =
 436                             (struct vmbus_channel_gpadl_body *)submsginfo->msg;
 437
 438                        gpadl_body->header.msgtype =
 439                                CHANNELMSG_GPADL_BODY;
 440                        gpadl_body->gpadl = next_gpadl_handle;
 441
 442                        ret = vmbus_post_msg(gpadl_body,
 443                                               submsginfo->msgsize -
 444                                               sizeof(*submsginfo));
 445                        if (ret != 0)
 446                                goto cleanup;
 447
 448                }
 449        }
 450        t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
 451        BUG_ON(t == 0);
 452
 453
 454        /* At this point, we received the gpadl created msg */
 455        *gpadl_handle = gpadlmsg->gpadl;
 456
 457cleanup:
 458        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 459        list_del(&msginfo->msglistentry);
 460        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 461
 462        kfree(msginfo);
 463        return ret;
 464}
 465EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
 466
 467/*
 468 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
 469 */
 470int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
 471{
 472        struct vmbus_channel_gpadl_teardown *msg;
 473        struct vmbus_channel_msginfo *info;
 474        unsigned long flags;
 475        int ret, t;
 476
 477        info = kmalloc(sizeof(*info) +
 478                       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
 479        if (!info)
 480                return -ENOMEM;
 481
 482        init_completion(&info->waitevent);
 483
 484        msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
 485
 486        msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
 487        msg->child_relid = channel->offermsg.child_relid;
 488        msg->gpadl = gpadl_handle;
 489
 490        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 491        list_add_tail(&info->msglistentry,
 492                      &vmbus_connection.chn_msg_list);
 493        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 494        ret = vmbus_post_msg(msg,
 495                               sizeof(struct vmbus_channel_gpadl_teardown));
 496
 497        BUG_ON(ret != 0);
 498        t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
 499        BUG_ON(t == 0);
 500
 501        /* Received a torndown response */
 502        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 503        list_del(&info->msglistentry);
 504        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 505
 506        kfree(info);
 507        return ret;
 508}
 509EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
 510
 511/*
 512 * vmbus_close - Close the specified channel
 513 */
 514void vmbus_close(struct vmbus_channel *channel)
 515{
 516        struct vmbus_channel_close_channel *msg;
 517        int ret;
 518        unsigned long flags;
 519
 520        /* Stop callback and cancel the timer asap */
 521        spin_lock_irqsave(&channel->inbound_lock, flags);
 522        channel->onchannel_callback = NULL;
 523        spin_unlock_irqrestore(&channel->inbound_lock, flags);
 524
 525        /* Send a closing message */
 526
 527        msg = &channel->close_msg.msg;
 528
 529        msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
 530        msg->child_relid = channel->offermsg.child_relid;
 531
 532        ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
 533
 534        BUG_ON(ret != 0);
 535        /* Tear down the gpadl for the channel's ring buffer */
 536        if (channel->ringbuffer_gpadlhandle)
 537                vmbus_teardown_gpadl(channel,
 538                                          channel->ringbuffer_gpadlhandle);
 539
 540        /* Cleanup the ring buffers for this channel */
 541        hv_ringbuffer_cleanup(&channel->outbound);
 542        hv_ringbuffer_cleanup(&channel->inbound);
 543
 544        free_pages((unsigned long)channel->ringbuffer_pages,
 545                get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
 546
 547
 548}
 549EXPORT_SYMBOL_GPL(vmbus_close);
 550
 551/**
 552 * vmbus_sendpacket() - Send the specified buffer on the given channel
 553 * @channel: Pointer to vmbus_channel structure.
 554 * @buffer: Pointer to the buffer you want to receive the data into.
 555 * @bufferlen: Maximum size of what the the buffer will hold
 556 * @requestid: Identifier of the request
 557 * @type: Type of packet that is being send e.g. negotiate, time
 558 * packet etc.
 559 *
 560 * Sends data in @buffer directly to hyper-v via the vmbus
 561 * This will send the data unparsed to hyper-v.
 562 *
 563 * Mainly used by Hyper-V drivers.
 564 */
 565int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
 566                           u32 bufferlen, u64 requestid,
 567                           enum vmbus_packet_type type, u32 flags)
 568{
 569        struct vmpacket_descriptor desc;
 570        u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
 571        u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
 572        struct scatterlist bufferlist[3];
 573        u64 aligned_data = 0;
 574        int ret;
 575
 576
 577        /* Setup the descriptor */
 578        desc.type = type; /* VmbusPacketTypeDataInBand; */
 579        desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
 580        /* in 8-bytes granularity */
 581        desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
 582        desc.len8 = (u16)(packetlen_aligned >> 3);
 583        desc.trans_id = requestid;
 584
 585        sg_init_table(bufferlist, 3);
 586        sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
 587        sg_set_buf(&bufferlist[1], buffer, bufferlen);
 588        sg_set_buf(&bufferlist[2], &aligned_data,
 589                   packetlen_aligned - packetlen);
 590
 591        ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
 592
 593        if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
 594                vmbus_setevent(channel);
 595
 596        return ret;
 597}
 598EXPORT_SYMBOL(vmbus_sendpacket);
 599
 600/*
 601 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
 602 * packets using a GPADL Direct packet type.
 603 */
 604int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
 605                                     struct hv_page_buffer pagebuffers[],
 606                                     u32 pagecount, void *buffer, u32 bufferlen,
 607                                     u64 requestid)
 608{
 609        int ret;
 610        int i;
 611        struct vmbus_channel_packet_page_buffer desc;
 612        u32 descsize;
 613        u32 packetlen;
 614        u32 packetlen_aligned;
 615        struct scatterlist bufferlist[3];
 616        u64 aligned_data = 0;
 617
 618        if (pagecount > MAX_PAGE_BUFFER_COUNT)
 619                return -EINVAL;
 620
 621
 622        /*
 623         * Adjust the size down since vmbus_channel_packet_page_buffer is the
 624         * largest size we support
 625         */
 626        descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
 627                          ((MAX_PAGE_BUFFER_COUNT - pagecount) *
 628                          sizeof(struct hv_page_buffer));
 629        packetlen = descsize + bufferlen;
 630        packetlen_aligned = ALIGN(packetlen, sizeof(u64));
 631
 632        /* Setup the descriptor */
 633        desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
 634        desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
 635        desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
 636        desc.length8 = (u16)(packetlen_aligned >> 3);
 637        desc.transactionid = requestid;
 638        desc.rangecount = pagecount;
 639
 640        for (i = 0; i < pagecount; i++) {
 641                desc.range[i].len = pagebuffers[i].len;
 642                desc.range[i].offset = pagebuffers[i].offset;
 643                desc.range[i].pfn        = pagebuffers[i].pfn;
 644        }
 645
 646        sg_init_table(bufferlist, 3);
 647        sg_set_buf(&bufferlist[0], &desc, descsize);
 648        sg_set_buf(&bufferlist[1], buffer, bufferlen);
 649        sg_set_buf(&bufferlist[2], &aligned_data,
 650                packetlen_aligned - packetlen);
 651
 652        ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
 653
 654        if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
 655                vmbus_setevent(channel);
 656
 657        return ret;
 658}
 659EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
 660
 661/*
 662 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
 663 * using a GPADL Direct packet type.
 664 */
 665int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 666                                struct hv_multipage_buffer *multi_pagebuffer,
 667                                void *buffer, u32 bufferlen, u64 requestid)
 668{
 669        int ret;
 670        struct vmbus_channel_packet_multipage_buffer desc;
 671        u32 descsize;
 672        u32 packetlen;
 673        u32 packetlen_aligned;
 674        struct scatterlist bufferlist[3];
 675        u64 aligned_data = 0;
 676        u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
 677                                         multi_pagebuffer->len);
 678
 679
 680        if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
 681                return -EINVAL;
 682
 683        /*
 684         * Adjust the size down since vmbus_channel_packet_multipage_buffer is
 685         * the largest size we support
 686         */
 687        descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
 688                          ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
 689                          sizeof(u64));
 690        packetlen = descsize + bufferlen;
 691        packetlen_aligned = ALIGN(packetlen, sizeof(u64));
 692
 693
 694        /* Setup the descriptor */
 695        desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
 696        desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
 697        desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
 698        desc.length8 = (u16)(packetlen_aligned >> 3);
 699        desc.transactionid = requestid;
 700        desc.rangecount = 1;
 701
 702        desc.range.len = multi_pagebuffer->len;
 703        desc.range.offset = multi_pagebuffer->offset;
 704
 705        memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
 706               pfncount * sizeof(u64));
 707
 708        sg_init_table(bufferlist, 3);
 709        sg_set_buf(&bufferlist[0], &desc, descsize);
 710        sg_set_buf(&bufferlist[1], buffer, bufferlen);
 711        sg_set_buf(&bufferlist[2], &aligned_data,
 712                packetlen_aligned - packetlen);
 713
 714        ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
 715
 716        if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
 717                vmbus_setevent(channel);
 718
 719        return ret;
 720}
 721EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
 722
 723/**
 724 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
 725 * @channel: Pointer to vmbus_channel structure.
 726 * @buffer: Pointer to the buffer you want to receive the data into.
 727 * @bufferlen: Maximum size of what the the buffer will hold
 728 * @buffer_actual_len: The actual size of the data after it was received
 729 * @requestid: Identifier of the request
 730 *
 731 * Receives directly from the hyper-v vmbus and puts the data it received
 732 * into Buffer. This will receive the data unparsed from hyper-v.
 733 *
 734 * Mainly used by Hyper-V drivers.
 735 */
 736int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
 737                        u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
 738{
 739        struct vmpacket_descriptor desc;
 740        u32 packetlen;
 741        u32 userlen;
 742        int ret;
 743
 744        *buffer_actual_len = 0;
 745        *requestid = 0;
 746
 747
 748        ret = hv_ringbuffer_peek(&channel->inbound, &desc,
 749                             sizeof(struct vmpacket_descriptor));
 750        if (ret != 0)
 751                return 0;
 752
 753        packetlen = desc.len8 << 3;
 754        userlen = packetlen - (desc.offset8 << 3);
 755
 756        *buffer_actual_len = userlen;
 757
 758        if (userlen > bufferlen) {
 759
 760                pr_err("Buffer too small - got %d needs %d\n",
 761                           bufferlen, userlen);
 762                return -ETOOSMALL;
 763        }
 764
 765        *requestid = desc.trans_id;
 766
 767        /* Copy over the packet to the user buffer */
 768        ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
 769                             (desc.offset8 << 3));
 770
 771
 772        return 0;
 773}
 774EXPORT_SYMBOL(vmbus_recvpacket);
 775
 776/*
 777 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
 778 */
 779int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
 780                              u32 bufferlen, u32 *buffer_actual_len,
 781                              u64 *requestid)
 782{
 783        struct vmpacket_descriptor desc;
 784        u32 packetlen;
 785        u32 userlen;
 786        int ret;
 787
 788        *buffer_actual_len = 0;
 789        *requestid = 0;
 790
 791
 792        ret = hv_ringbuffer_peek(&channel->inbound, &desc,
 793                             sizeof(struct vmpacket_descriptor));
 794        if (ret != 0)
 795                return 0;
 796
 797
 798        packetlen = desc.len8 << 3;
 799        userlen = packetlen - (desc.offset8 << 3);
 800
 801        *buffer_actual_len = packetlen;
 802
 803        if (packetlen > bufferlen) {
 804                pr_err("Buffer too small - needed %d bytes but "
 805                        "got space for only %d bytes\n",
 806                        packetlen, bufferlen);
 807                return -ENOBUFS;
 808        }
 809
 810        *requestid = desc.trans_id;
 811
 812        /* Copy over the entire packet to the user buffer */
 813        ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
 814
 815        return 0;
 816}
 817EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
 818
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.