linux/drivers/hv/hv_balloon.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012, Microsoft Corporation.
   3 *
   4 * Author:
   5 *   K. Y. Srinivasan <kys@microsoft.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published
   9 * by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14 * NON INFRINGEMENT.  See the GNU General Public License for more
  15 * details.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/kernel.h>
  22#include <linux/mman.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/kthread.h>
  28#include <linux/completion.h>
  29#include <linux/memory_hotplug.h>
  30#include <linux/memory.h>
  31#include <linux/notifier.h>
  32#include <linux/mman.h>
  33#include <linux/percpu_counter.h>
  34
  35#include <linux/hyperv.h>
  36
  37/*
  38 * We begin with definitions supporting the Dynamic Memory protocol
  39 * with the host.
  40 *
  41 * Begin protocol definitions.
  42 */
  43
  44
  45
  46/*
  47 * Protocol versions. The low word is the minor version, the high word the major
  48 * version.
  49 *
  50 * History:
  51 * Initial version 1.0
  52 * Changed to 0.1 on 2009/03/25
  53 * Changes to 0.2 on 2009/05/14
  54 * Changes to 0.3 on 2009/12/03
  55 * Changed to 1.0 on 2011/04/05
  56 */
  57
  58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
  59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
  60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
  61
  62enum {
  63        DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
  64        DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
  65
  66        DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
  67        DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
  68
  69        DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
  70};
  71
  72
  73
  74/*
  75 * Message Types
  76 */
  77
  78enum dm_message_type {
  79        /*
  80         * Version 0.3
  81         */
  82        DM_ERROR                        = 0,
  83        DM_VERSION_REQUEST              = 1,
  84        DM_VERSION_RESPONSE             = 2,
  85        DM_CAPABILITIES_REPORT          = 3,
  86        DM_CAPABILITIES_RESPONSE        = 4,
  87        DM_STATUS_REPORT                = 5,
  88        DM_BALLOON_REQUEST              = 6,
  89        DM_BALLOON_RESPONSE             = 7,
  90        DM_UNBALLOON_REQUEST            = 8,
  91        DM_UNBALLOON_RESPONSE           = 9,
  92        DM_MEM_HOT_ADD_REQUEST          = 10,
  93        DM_MEM_HOT_ADD_RESPONSE         = 11,
  94        DM_VERSION_03_MAX               = 11,
  95        /*
  96         * Version 1.0.
  97         */
  98        DM_INFO_MESSAGE                 = 12,
  99        DM_VERSION_1_MAX                = 12
 100};
 101
 102
 103/*
 104 * Structures defining the dynamic memory management
 105 * protocol.
 106 */
 107
 108union dm_version {
 109        struct {
 110                __u16 minor_version;
 111                __u16 major_version;
 112        };
 113        __u32 version;
 114} __packed;
 115
 116
 117union dm_caps {
 118        struct {
 119                __u64 balloon:1;
 120                __u64 hot_add:1;
 121                __u64 reservedz:62;
 122        } cap_bits;
 123        __u64 caps;
 124} __packed;
 125
 126union dm_mem_page_range {
 127        struct  {
 128                /*
 129                 * The PFN number of the first page in the range.
 130                 * 40 bits is the architectural limit of a PFN
 131                 * number for AMD64.
 132                 */
 133                __u64 start_page:40;
 134                /*
 135                 * The number of pages in the range.
 136                 */
 137                __u64 page_cnt:24;
 138        } finfo;
 139        __u64  page_range;
 140} __packed;
 141
 142
 143
 144/*
 145 * The header for all dynamic memory messages:
 146 *
 147 * type: Type of the message.
 148 * size: Size of the message in bytes; including the header.
 149 * trans_id: The guest is responsible for manufacturing this ID.
 150 */
 151
 152struct dm_header {
 153        __u16 type;
 154        __u16 size;
 155        __u32 trans_id;
 156} __packed;
 157
 158/*
 159 * A generic message format for dynamic memory.
 160 * Specific message formats are defined later in the file.
 161 */
 162
 163struct dm_message {
 164        struct dm_header hdr;
 165        __u8 data[]; /* enclosed message */
 166} __packed;
 167
 168
 169/*
 170 * Specific message types supporting the dynamic memory protocol.
 171 */
 172
 173/*
 174 * Version negotiation message. Sent from the guest to the host.
 175 * The guest is free to try different versions until the host
 176 * accepts the version.
 177 *
 178 * dm_version: The protocol version requested.
 179 * is_last_attempt: If TRUE, this is the last version guest will request.
 180 * reservedz: Reserved field, set to zero.
 181 */
 182
 183struct dm_version_request {
 184        struct dm_header hdr;
 185        union dm_version version;
 186        __u32 is_last_attempt:1;
 187        __u32 reservedz:31;
 188} __packed;
 189
 190/*
 191 * Version response message; Host to Guest and indicates
 192 * if the host has accepted the version sent by the guest.
 193 *
 194 * is_accepted: If TRUE, host has accepted the version and the guest
 195 * should proceed to the next stage of the protocol. FALSE indicates that
 196 * guest should re-try with a different version.
 197 *
 198 * reservedz: Reserved field, set to zero.
 199 */
 200
 201struct dm_version_response {
 202        struct dm_header hdr;
 203        __u64 is_accepted:1;
 204        __u64 reservedz:63;
 205} __packed;
 206
 207/*
 208 * Message reporting capabilities. This is sent from the guest to the
 209 * host.
 210 */
 211
 212struct dm_capabilities {
 213        struct dm_header hdr;
 214        union dm_caps caps;
 215        __u64 min_page_cnt;
 216        __u64 max_page_number;
 217} __packed;
 218
 219/*
 220 * Response to the capabilities message. This is sent from the host to the
 221 * guest. This message notifies if the host has accepted the guest's
 222 * capabilities. If the host has not accepted, the guest must shutdown
 223 * the service.
 224 *
 225 * is_accepted: Indicates if the host has accepted guest's capabilities.
 226 * reservedz: Must be 0.
 227 */
 228
 229struct dm_capabilities_resp_msg {
 230        struct dm_header hdr;
 231        __u64 is_accepted:1;
 232        __u64 reservedz:63;
 233} __packed;
 234
 235/*
 236 * This message is used to report memory pressure from the guest.
 237 * This message is not part of any transaction and there is no
 238 * response to this message.
 239 *
 240 * num_avail: Available memory in pages.
 241 * num_committed: Committed memory in pages.
 242 * page_file_size: The accumulated size of all page files
 243 *                 in the system in pages.
 244 * zero_free: The nunber of zero and free pages.
 245 * page_file_writes: The writes to the page file in pages.
 246 * io_diff: An indicator of file cache efficiency or page file activity,
 247 *          calculated as File Cache Page Fault Count - Page Read Count.
 248 *          This value is in pages.
 249 *
 250 * Some of these metrics are Windows specific and fortunately
 251 * the algorithm on the host side that computes the guest memory
 252 * pressure only uses num_committed value.
 253 */
 254
 255struct dm_status {
 256        struct dm_header hdr;
 257        __u64 num_avail;
 258        __u64 num_committed;
 259        __u64 page_file_size;
 260        __u64 zero_free;
 261        __u32 page_file_writes;
 262        __u32 io_diff;
 263} __packed;
 264
 265
 266/*
 267 * Message to ask the guest to allocate memory - balloon up message.
 268 * This message is sent from the host to the guest. The guest may not be
 269 * able to allocate as much memory as requested.
 270 *
 271 * num_pages: number of pages to allocate.
 272 */
 273
 274struct dm_balloon {
 275        struct dm_header hdr;
 276        __u32 num_pages;
 277        __u32 reservedz;
 278} __packed;
 279
 280
 281/*
 282 * Balloon response message; this message is sent from the guest
 283 * to the host in response to the balloon message.
 284 *
 285 * reservedz: Reserved; must be set to zero.
 286 * more_pages: If FALSE, this is the last message of the transaction.
 287 * if TRUE there will atleast one more message from the guest.
 288 *
 289 * range_count: The number of ranges in the range array.
 290 *
 291 * range_array: An array of page ranges returned to the host.
 292 *
 293 */
 294
 295struct dm_balloon_response {
 296        struct dm_header hdr;
 297        __u32 reservedz;
 298        __u32 more_pages:1;
 299        __u32 range_count:31;
 300        union dm_mem_page_range range_array[];
 301} __packed;
 302
 303/*
 304 * Un-balloon message; this message is sent from the host
 305 * to the guest to give guest more memory.
 306 *
 307 * more_pages: If FALSE, this is the last message of the transaction.
 308 * if TRUE there will atleast one more message from the guest.
 309 *
 310 * reservedz: Reserved; must be set to zero.
 311 *
 312 * range_count: The number of ranges in the range array.
 313 *
 314 * range_array: An array of page ranges returned to the host.
 315 *
 316 */
 317
 318struct dm_unballoon_request {
 319        struct dm_header hdr;
 320        __u32 more_pages:1;
 321        __u32 reservedz:31;
 322        __u32 range_count;
 323        union dm_mem_page_range range_array[];
 324} __packed;
 325
 326/*
 327 * Un-balloon response message; this message is sent from the guest
 328 * to the host in response to an unballoon request.
 329 *
 330 */
 331
 332struct dm_unballoon_response {
 333        struct dm_header hdr;
 334} __packed;
 335
 336
 337/*
 338 * Hot add request message. Message sent from the host to the guest.
 339 *
 340 * mem_range: Memory range to hot add.
 341 *
 342 * On Linux we currently don't support this since we cannot hot add
 343 * arbitrary granularity of memory.
 344 */
 345
 346struct dm_hot_add {
 347        struct dm_header hdr;
 348        union dm_mem_page_range range;
 349} __packed;
 350
 351/*
 352 * Hot add response message.
 353 * This message is sent by the guest to report the status of a hot add request.
 354 * If page_count is less than the requested page count, then the host should
 355 * assume all further hot add requests will fail, since this indicates that
 356 * the guest has hit an upper physical memory barrier.
 357 *
 358 * Hot adds may also fail due to low resources; in this case, the guest must
 359 * not complete this message until the hot add can succeed, and the host must
 360 * not send a new hot add request until the response is sent.
 361 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 362 * times it fails the request.
 363 *
 364 *
 365 * page_count: number of pages that were successfully hot added.
 366 *
 367 * result: result of the operation 1: success, 0: failure.
 368 *
 369 */
 370
 371struct dm_hot_add_response {
 372        struct dm_header hdr;
 373        __u32 page_count;
 374        __u32 result;
 375} __packed;
 376
 377/*
 378 * Types of information sent from host to the guest.
 379 */
 380
 381enum dm_info_type {
 382        INFO_TYPE_MAX_PAGE_CNT = 0,
 383        MAX_INFO_TYPE
 384};
 385
 386
 387/*
 388 * Header for the information message.
 389 */
 390
 391struct dm_info_header {
 392        enum dm_info_type type;
 393        __u32 data_size;
 394} __packed;
 395
 396/*
 397 * This message is sent from the host to the guest to pass
 398 * some relevant information (win8 addition).
 399 *
 400 * reserved: no used.
 401 * info_size: size of the information blob.
 402 * info: information blob.
 403 */
 404
 405struct dm_info_msg {
 406        struct dm_header hdr;
 407        __u32 reserved;
 408        __u32 info_size;
 409        __u8  info[];
 410};
 411
 412/*
 413 * End protocol definitions.
 414 */
 415
 416static bool hot_add;
 417static bool do_hot_add;
 418
 419module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 420MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 421
 422static atomic_t trans_id = ATOMIC_INIT(0);
 423
 424static int dm_ring_size = (5 * PAGE_SIZE);
 425
 426/*
 427 * Driver specific state.
 428 */
 429
 430enum hv_dm_state {
 431        DM_INITIALIZING = 0,
 432        DM_INITIALIZED,
 433        DM_BALLOON_UP,
 434        DM_BALLOON_DOWN,
 435        DM_HOT_ADD,
 436        DM_INIT_ERROR
 437};
 438
 439
 440static __u8 recv_buffer[PAGE_SIZE];
 441static __u8 *send_buffer;
 442#define PAGES_IN_2M     512
 443
 444struct hv_dynmem_device {
 445        struct hv_device *dev;
 446        enum hv_dm_state state;
 447        struct completion host_event;
 448        struct completion config_event;
 449
 450        /*
 451         * Number of pages we have currently ballooned out.
 452         */
 453        unsigned int num_pages_ballooned;
 454
 455        /*
 456         * This thread handles both balloon/hot-add
 457         * requests from the host as well as notifying
 458         * the host with regards to memory pressure in
 459         * the guest.
 460         */
 461        struct task_struct *thread;
 462
 463        /*
 464         * We start with the highest version we can support
 465         * and downgrade based on the host; we save here the
 466         * next version to try.
 467         */
 468        __u32 next_version;
 469};
 470
 471static struct hv_dynmem_device dm_device;
 472
 473static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
 474{
 475
 476        struct dm_hot_add_response resp;
 477
 478        if (do_hot_add) {
 479
 480                pr_info("Memory hot add not supported\n");
 481
 482                /*
 483                 * Currently we do not support hot add.
 484                 * Just fail the request.
 485                 */
 486        }
 487
 488        memset(&resp, 0, sizeof(struct dm_hot_add_response));
 489        resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
 490        resp.hdr.size = sizeof(struct dm_hot_add_response);
 491        resp.hdr.trans_id = atomic_inc_return(&trans_id);
 492
 493        resp.page_count = 0;
 494        resp.result = 0;
 495
 496        dm->state = DM_INITIALIZED;
 497        vmbus_sendpacket(dm->dev->channel, &resp,
 498                        sizeof(struct dm_hot_add_response),
 499                        (unsigned long)NULL,
 500                        VM_PKT_DATA_INBAND, 0);
 501
 502}
 503
 504static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 505{
 506        struct dm_info_header *info_hdr;
 507
 508        info_hdr = (struct dm_info_header *)msg->info;
 509
 510        switch (info_hdr->type) {
 511        case INFO_TYPE_MAX_PAGE_CNT:
 512                pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
 513                pr_info("Data Size is %d\n", info_hdr->data_size);
 514                break;
 515        default:
 516                pr_info("Received Unknown type: %d\n", info_hdr->type);
 517        }
 518}
 519
 520/*
 521 * Post our status as it relates memory pressure to the
 522 * host. Host expects the guests to post this status
 523 * periodically at 1 second intervals.
 524 *
 525 * The metrics specified in this protocol are very Windows
 526 * specific and so we cook up numbers here to convey our memory
 527 * pressure.
 528 */
 529
 530static void post_status(struct hv_dynmem_device *dm)
 531{
 532        struct dm_status status;
 533
 534
 535        memset(&status, 0, sizeof(struct dm_status));
 536        status.hdr.type = DM_STATUS_REPORT;
 537        status.hdr.size = sizeof(struct dm_status);
 538        status.hdr.trans_id = atomic_inc_return(&trans_id);
 539
 540
 541        status.num_committed = vm_memory_committed();
 542
 543        vmbus_sendpacket(dm->dev->channel, &status,
 544                                sizeof(struct dm_status),
 545                                (unsigned long)NULL,
 546                                VM_PKT_DATA_INBAND, 0);
 547
 548}
 549
 550
 551
 552static void free_balloon_pages(struct hv_dynmem_device *dm,
 553                         union dm_mem_page_range *range_array)
 554{
 555        int num_pages = range_array->finfo.page_cnt;
 556        __u64 start_frame = range_array->finfo.start_page;
 557        struct page *pg;
 558        int i;
 559
 560        for (i = 0; i < num_pages; i++) {
 561                pg = pfn_to_page(i + start_frame);
 562                __free_page(pg);
 563                dm->num_pages_ballooned--;
 564        }
 565}
 566
 567
 568
 569static int  alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
 570                         struct dm_balloon_response *bl_resp, int alloc_unit,
 571                         bool *alloc_error)
 572{
 573        int i = 0;
 574        struct page *pg;
 575
 576        if (num_pages < alloc_unit)
 577                return 0;
 578
 579        for (i = 0; (i * alloc_unit) < num_pages; i++) {
 580                if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
 581                        PAGE_SIZE)
 582                        return i * alloc_unit;
 583
 584                /*
 585                 * We execute this code in a thread context. Furthermore,
 586                 * we don't want the kernel to try too hard.
 587                 */
 588                pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
 589                                __GFP_NOMEMALLOC | __GFP_NOWARN,
 590                                get_order(alloc_unit << PAGE_SHIFT));
 591
 592                if (!pg) {
 593                        *alloc_error = true;
 594                        return i * alloc_unit;
 595                }
 596
 597
 598                dm->num_pages_ballooned += alloc_unit;
 599
 600                bl_resp->range_count++;
 601                bl_resp->range_array[i].finfo.start_page =
 602                        page_to_pfn(pg);
 603                bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
 604                bl_resp->hdr.size += sizeof(union dm_mem_page_range);
 605
 606        }
 607
 608        return num_pages;
 609}
 610
 611
 612
 613static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
 614{
 615        int num_pages = req->num_pages;
 616        int num_ballooned = 0;
 617        struct dm_balloon_response *bl_resp;
 618        int alloc_unit;
 619        int ret;
 620        bool alloc_error = false;
 621        bool done = false;
 622        int i;
 623
 624
 625        /*
 626         * Currently, we only support 4k allocations.
 627         */
 628        alloc_unit = 1;
 629
 630        while (!done) {
 631                bl_resp = (struct dm_balloon_response *)send_buffer;
 632                memset(send_buffer, 0, PAGE_SIZE);
 633                bl_resp->hdr.type = DM_BALLOON_RESPONSE;
 634                bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
 635                bl_resp->hdr.size = sizeof(struct dm_balloon_response);
 636                bl_resp->more_pages = 1;
 637
 638
 639                num_pages -= num_ballooned;
 640                num_ballooned = alloc_balloon_pages(dm, num_pages,
 641                                                bl_resp, alloc_unit,
 642                                                 &alloc_error);
 643
 644                if ((alloc_error) || (num_ballooned == num_pages)) {
 645                        bl_resp->more_pages = 0;
 646                        done = true;
 647                        dm->state = DM_INITIALIZED;
 648                }
 649
 650                /*
 651                 * We are pushing a lot of data through the channel;
 652                 * deal with transient failures caused because of the
 653                 * lack of space in the ring buffer.
 654                 */
 655
 656                do {
 657                        ret = vmbus_sendpacket(dm_device.dev->channel,
 658                                                bl_resp,
 659                                                bl_resp->hdr.size,
 660                                                (unsigned long)NULL,
 661                                                VM_PKT_DATA_INBAND, 0);
 662
 663                        if (ret == -EAGAIN)
 664                                msleep(20);
 665
 666                } while (ret == -EAGAIN);
 667
 668                if (ret) {
 669                        /*
 670                         * Free up the memory we allocatted.
 671                         */
 672                        pr_info("Balloon response failed\n");
 673
 674                        for (i = 0; i < bl_resp->range_count; i++)
 675                                free_balloon_pages(dm,
 676                                                 &bl_resp->range_array[i]);
 677
 678                        done = true;
 679                }
 680        }
 681
 682}
 683
 684static void balloon_down(struct hv_dynmem_device *dm,
 685                        struct dm_unballoon_request *req)
 686{
 687        union dm_mem_page_range *range_array = req->range_array;
 688        int range_count = req->range_count;
 689        struct dm_unballoon_response resp;
 690        int i;
 691
 692        for (i = 0; i < range_count; i++)
 693                free_balloon_pages(dm, &range_array[i]);
 694
 695        if (req->more_pages == 1)
 696                return;
 697
 698        memset(&resp, 0, sizeof(struct dm_unballoon_response));
 699        resp.hdr.type = DM_UNBALLOON_RESPONSE;
 700        resp.hdr.trans_id = atomic_inc_return(&trans_id);
 701        resp.hdr.size = sizeof(struct dm_unballoon_response);
 702
 703        vmbus_sendpacket(dm_device.dev->channel, &resp,
 704                                sizeof(struct dm_unballoon_response),
 705                                (unsigned long)NULL,
 706                                VM_PKT_DATA_INBAND, 0);
 707
 708        dm->state = DM_INITIALIZED;
 709}
 710
 711static void balloon_onchannelcallback(void *context);
 712
 713static int dm_thread_func(void *dm_dev)
 714{
 715        struct hv_dynmem_device *dm = dm_dev;
 716        int t;
 717        unsigned long  scan_start;
 718
 719        while (!kthread_should_stop()) {
 720                t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ);
 721                /*
 722                 * The host expects us to post information on the memory
 723                 * pressure every second.
 724                 */
 725
 726                if (t == 0)
 727                        post_status(dm);
 728
 729                scan_start = jiffies;
 730                switch (dm->state) {
 731                case DM_BALLOON_UP:
 732                        balloon_up(dm, (struct dm_balloon *)recv_buffer);
 733                        break;
 734
 735                case DM_HOT_ADD:
 736                        hot_add_req(dm, (struct dm_hot_add *)recv_buffer);
 737                        break;
 738                default:
 739                        break;
 740                }
 741
 742                if (!time_in_range(jiffies, scan_start, scan_start + HZ))
 743                        post_status(dm);
 744
 745        }
 746
 747        return 0;
 748}
 749
 750
 751static void version_resp(struct hv_dynmem_device *dm,
 752                        struct dm_version_response *vresp)
 753{
 754        struct dm_version_request version_req;
 755        int ret;
 756
 757        if (vresp->is_accepted) {
 758                /*
 759                 * We are done; wakeup the
 760                 * context waiting for version
 761                 * negotiation.
 762                 */
 763                complete(&dm->host_event);
 764                return;
 765        }
 766        /*
 767         * If there are more versions to try, continue
 768         * with negotiations; if not
 769         * shutdown the service since we are not able
 770         * to negotiate a suitable version number
 771         * with the host.
 772         */
 773        if (dm->next_version == 0)
 774                goto version_error;
 775
 776        dm->next_version = 0;
 777        memset(&version_req, 0, sizeof(struct dm_version_request));
 778        version_req.hdr.type = DM_VERSION_REQUEST;
 779        version_req.hdr.size = sizeof(struct dm_version_request);
 780        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
 781        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
 782        version_req.is_last_attempt = 1;
 783
 784        ret = vmbus_sendpacket(dm->dev->channel, &version_req,
 785                                sizeof(struct dm_version_request),
 786                                (unsigned long)NULL,
 787                                VM_PKT_DATA_INBAND, 0);
 788
 789        if (ret)
 790                goto version_error;
 791
 792        return;
 793
 794version_error:
 795        dm->state = DM_INIT_ERROR;
 796        complete(&dm->host_event);
 797}
 798
 799static void cap_resp(struct hv_dynmem_device *dm,
 800                        struct dm_capabilities_resp_msg *cap_resp)
 801{
 802        if (!cap_resp->is_accepted) {
 803                pr_info("Capabilities not accepted by host\n");
 804                dm->state = DM_INIT_ERROR;
 805        }
 806        complete(&dm->host_event);
 807}
 808
 809static void balloon_onchannelcallback(void *context)
 810{
 811        struct hv_device *dev = context;
 812        u32 recvlen;
 813        u64 requestid;
 814        struct dm_message *dm_msg;
 815        struct dm_header *dm_hdr;
 816        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
 817
 818        memset(recv_buffer, 0, sizeof(recv_buffer));
 819        vmbus_recvpacket(dev->channel, recv_buffer,
 820                         PAGE_SIZE, &recvlen, &requestid);
 821
 822        if (recvlen > 0) {
 823                dm_msg = (struct dm_message *)recv_buffer;
 824                dm_hdr = &dm_msg->hdr;
 825
 826                switch (dm_hdr->type) {
 827                case DM_VERSION_RESPONSE:
 828                        version_resp(dm,
 829                                 (struct dm_version_response *)dm_msg);
 830                        break;
 831
 832                case DM_CAPABILITIES_RESPONSE:
 833                        cap_resp(dm,
 834                                 (struct dm_capabilities_resp_msg *)dm_msg);
 835                        break;
 836
 837                case DM_BALLOON_REQUEST:
 838                        dm->state = DM_BALLOON_UP;
 839                        complete(&dm->config_event);
 840                        break;
 841
 842                case DM_UNBALLOON_REQUEST:
 843                        dm->state = DM_BALLOON_DOWN;
 844                        balloon_down(dm,
 845                                 (struct dm_unballoon_request *)recv_buffer);
 846                        break;
 847
 848                case DM_MEM_HOT_ADD_REQUEST:
 849                        dm->state = DM_HOT_ADD;
 850                        complete(&dm->config_event);
 851                        break;
 852
 853                case DM_INFO_MESSAGE:
 854                        process_info(dm, (struct dm_info_msg *)dm_msg);
 855                        break;
 856
 857                default:
 858                        pr_err("Unhandled message: type: %d\n", dm_hdr->type);
 859
 860                }
 861        }
 862
 863}
 864
 865static int balloon_probe(struct hv_device *dev,
 866                        const struct hv_vmbus_device_id *dev_id)
 867{
 868        int ret, t;
 869        struct dm_version_request version_req;
 870        struct dm_capabilities cap_msg;
 871
 872        do_hot_add = hot_add;
 873
 874        /*
 875         * First allocate a send buffer.
 876         */
 877
 878        send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
 879        if (!send_buffer)
 880                return -ENOMEM;
 881
 882        ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
 883                        balloon_onchannelcallback, dev);
 884
 885        if (ret)
 886                goto probe_error0;
 887
 888        dm_device.dev = dev;
 889        dm_device.state = DM_INITIALIZING;
 890        dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
 891        init_completion(&dm_device.host_event);
 892        init_completion(&dm_device.config_event);
 893
 894        dm_device.thread =
 895                 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
 896        if (IS_ERR(dm_device.thread)) {
 897                ret = PTR_ERR(dm_device.thread);
 898                goto probe_error1;
 899        }
 900
 901        hv_set_drvdata(dev, &dm_device);
 902        /*
 903         * Initiate the hand shake with the host and negotiate
 904         * a version that the host can support. We start with the
 905         * highest version number and go down if the host cannot
 906         * support it.
 907         */
 908        memset(&version_req, 0, sizeof(struct dm_version_request));
 909        version_req.hdr.type = DM_VERSION_REQUEST;
 910        version_req.hdr.size = sizeof(struct dm_version_request);
 911        version_req.hdr.trans_id = atomic_inc_return(&trans_id);
 912        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
 913        version_req.is_last_attempt = 0;
 914
 915        ret = vmbus_sendpacket(dev->channel, &version_req,
 916                                sizeof(struct dm_version_request),
 917                                (unsigned long)NULL,
 918                                VM_PKT_DATA_INBAND,
 919                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 920        if (ret)
 921                goto probe_error2;
 922
 923        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
 924        if (t == 0) {
 925                ret = -ETIMEDOUT;
 926                goto probe_error2;
 927        }
 928
 929        /*
 930         * If we could not negotiate a compatible version with the host
 931         * fail the probe function.
 932         */
 933        if (dm_device.state == DM_INIT_ERROR) {
 934                ret = -ETIMEDOUT;
 935                goto probe_error2;
 936        }
 937        /*
 938         * Now submit our capabilities to the host.
 939         */
 940        memset(&cap_msg, 0, sizeof(struct dm_capabilities));
 941        cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
 942        cap_msg.hdr.size = sizeof(struct dm_capabilities);
 943        cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
 944
 945        cap_msg.caps.cap_bits.balloon = 1;
 946        /*
 947         * While we currently don't support hot-add,
 948         * we still advertise this capability since the
 949         * host requires that guests partcipating in the
 950         * dynamic memory protocol support hot add.
 951         */
 952        cap_msg.caps.cap_bits.hot_add = 1;
 953
 954        /*
 955         * Currently the host does not use these
 956         * values and we set them to what is done in the
 957         * Windows driver.
 958         */
 959        cap_msg.min_page_cnt = 0;
 960        cap_msg.max_page_number = -1;
 961
 962        ret = vmbus_sendpacket(dev->channel, &cap_msg,
 963                                sizeof(struct dm_capabilities),
 964                                (unsigned long)NULL,
 965                                VM_PKT_DATA_INBAND,
 966                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 967        if (ret)
 968                goto probe_error2;
 969
 970        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
 971        if (t == 0) {
 972                ret = -ETIMEDOUT;
 973                goto probe_error2;
 974        }
 975
 976        /*
 977         * If the host does not like our capabilities,
 978         * fail the probe function.
 979         */
 980        if (dm_device.state == DM_INIT_ERROR) {
 981                ret = -ETIMEDOUT;
 982                goto probe_error2;
 983        }
 984
 985        dm_device.state = DM_INITIALIZED;
 986
 987        return 0;
 988
 989probe_error2:
 990        kthread_stop(dm_device.thread);
 991
 992probe_error1:
 993        vmbus_close(dev->channel);
 994probe_error0:
 995        kfree(send_buffer);
 996        return ret;
 997}
 998
 999static int balloon_remove(struct hv_device *dev)
1000{
1001        struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1002
1003        if (dm->num_pages_ballooned != 0)
1004                pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1005
1006        vmbus_close(dev->channel);
1007        kthread_stop(dm->thread);
1008        kfree(send_buffer);
1009
1010        return 0;
1011}
1012
1013static const struct hv_vmbus_device_id id_table[] = {
1014        /* Dynamic Memory Class ID */
1015        /* 525074DC-8985-46e2-8057-A307DC18A502 */
1016        { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
1017                       0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1018        },
1019        { },
1020};
1021
1022MODULE_DEVICE_TABLE(vmbus, id_table);
1023
1024static  struct hv_driver balloon_drv = {
1025        .name = "hv_balloon",
1026        .id_table = id_table,
1027        .probe =  balloon_probe,
1028        .remove =  balloon_remove,
1029};
1030
1031static int __init init_balloon_drv(void)
1032{
1033
1034        return vmbus_driver_register(&balloon_drv);
1035}
1036
1037static void exit_balloon_drv(void)
1038{
1039
1040        vmbus_driver_unregister(&balloon_drv);
1041}
1042
1043module_init(init_balloon_drv);
1044module_exit(exit_balloon_drv);
1045
1046MODULE_DESCRIPTION("Hyper-V Balloon");
1047MODULE_VERSION(HV_DRV_VERSION);
1048MODULE_LICENSE("GPL");
1049
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.