linux/drivers/acpi/acpi_ipmi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  acpi_ipmi.c - ACPI IPMI opregion
   4 *
   5 *  Copyright (C) 2010, 2013 Intel Corporation
   6 *    Author: Zhao Yakui <yakui.zhao@intel.com>
   7 *            Lv Zheng <lv.zheng@intel.com>
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/acpi.h>
  12#include <linux/ipmi.h>
  13#include <linux/spinlock.h>
  14
  15MODULE_AUTHOR("Zhao Yakui");
  16MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
  17MODULE_LICENSE("GPL");
  18
  19#define ACPI_IPMI_OK                    0
  20#define ACPI_IPMI_TIMEOUT               0x10
  21#define ACPI_IPMI_UNKNOWN               0x07
  22/* the IPMI timeout is 5s */
  23#define IPMI_TIMEOUT                    (5000)
  24#define ACPI_IPMI_MAX_MSG_LENGTH        64
  25
  26struct acpi_ipmi_device {
  27        /* the device list attached to driver_data.ipmi_devices */
  28        struct list_head head;
  29
  30        /* the IPMI request message list */
  31        struct list_head tx_msg_list;
  32
  33        spinlock_t tx_msg_lock;
  34        acpi_handle handle;
  35        struct device *dev;
  36        struct ipmi_user *user_interface;
  37        int ipmi_ifnum; /* IPMI interface number */
  38        long curr_msgid;
  39        bool dead;
  40        struct kref kref;
  41};
  42
  43struct ipmi_driver_data {
  44        struct list_head ipmi_devices;
  45        struct ipmi_smi_watcher bmc_events;
  46        const struct ipmi_user_hndl ipmi_hndlrs;
  47        struct mutex ipmi_lock;
  48
  49        /*
  50         * NOTE: IPMI System Interface Selection
  51         * There is no system interface specified by the IPMI operation
  52         * region access.  We try to select one system interface with ACPI
  53         * handle set.  IPMI messages passed from the ACPI codes are sent
  54         * to this selected global IPMI system interface.
  55         */
  56        struct acpi_ipmi_device *selected_smi;
  57};
  58
  59struct acpi_ipmi_msg {
  60        struct list_head head;
  61
  62        /*
  63         * General speaking the addr type should be SI_ADDR_TYPE. And
  64         * the addr channel should be BMC.
  65         * In fact it can also be IPMB type. But we will have to
  66         * parse it from the Netfn command buffer. It is so complex
  67         * that it is skipped.
  68         */
  69        struct ipmi_addr addr;
  70        long tx_msgid;
  71
  72        /* it is used to track whether the IPMI message is finished */
  73        struct completion tx_complete;
  74
  75        struct kernel_ipmi_msg tx_message;
  76        int msg_done;
  77
  78        /* tx/rx data . And copy it from/to ACPI object buffer */
  79        u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  80        u8 rx_len;
  81
  82        struct acpi_ipmi_device *device;
  83        struct kref kref;
  84};
  85
  86/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
  87struct acpi_ipmi_buffer {
  88        u8 status;
  89        u8 length;
  90        u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  91};
  92
  93static void ipmi_register_bmc(int iface, struct device *dev);
  94static void ipmi_bmc_gone(int iface);
  95static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
  96
  97static struct ipmi_driver_data driver_data = {
  98        .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
  99        .bmc_events = {
 100                .owner = THIS_MODULE,
 101                .new_smi = ipmi_register_bmc,
 102                .smi_gone = ipmi_bmc_gone,
 103        },
 104        .ipmi_hndlrs = {
 105                .ipmi_recv_hndl = ipmi_msg_handler,
 106        },
 107        .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
 108};
 109
 110static struct acpi_ipmi_device *
 111ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
 112{
 113        struct acpi_ipmi_device *ipmi_device;
 114        int err;
 115        struct ipmi_user *user;
 116
 117        ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
 118        if (!ipmi_device)
 119                return NULL;
 120
 121        kref_init(&ipmi_device->kref);
 122        INIT_LIST_HEAD(&ipmi_device->head);
 123        INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
 124        spin_lock_init(&ipmi_device->tx_msg_lock);
 125        ipmi_device->handle = handle;
 126        ipmi_device->dev = get_device(dev);
 127        ipmi_device->ipmi_ifnum = iface;
 128
 129        err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
 130                               ipmi_device, &user);
 131        if (err) {
 132                put_device(dev);
 133                kfree(ipmi_device);
 134                return NULL;
 135        }
 136        ipmi_device->user_interface = user;
 137
 138        return ipmi_device;
 139}
 140
 141static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
 142{
 143        ipmi_destroy_user(ipmi_device->user_interface);
 144        put_device(ipmi_device->dev);
 145        kfree(ipmi_device);
 146}
 147
 148static void ipmi_dev_release_kref(struct kref *kref)
 149{
 150        struct acpi_ipmi_device *ipmi =
 151                container_of(kref, struct acpi_ipmi_device, kref);
 152
 153        ipmi_dev_release(ipmi);
 154}
 155
 156static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
 157{
 158        list_del(&ipmi_device->head);
 159        if (driver_data.selected_smi == ipmi_device)
 160                driver_data.selected_smi = NULL;
 161
 162        /*
 163         * Always setting dead flag after deleting from the list or
 164         * list_for_each_entry() codes must get changed.
 165         */
 166        ipmi_device->dead = true;
 167}
 168
 169static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
 170{
 171        struct acpi_ipmi_device *ipmi_device = NULL;
 172
 173        mutex_lock(&driver_data.ipmi_lock);
 174        if (driver_data.selected_smi) {
 175                ipmi_device = driver_data.selected_smi;
 176                kref_get(&ipmi_device->kref);
 177        }
 178        mutex_unlock(&driver_data.ipmi_lock);
 179
 180        return ipmi_device;
 181}
 182
 183static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
 184{
 185        kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
 186}
 187
 188static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
 189{
 190        struct acpi_ipmi_device *ipmi;
 191        struct acpi_ipmi_msg *ipmi_msg;
 192
 193        ipmi = acpi_ipmi_dev_get();
 194        if (!ipmi)
 195                return NULL;
 196
 197        ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
 198        if (!ipmi_msg) {
 199                acpi_ipmi_dev_put(ipmi);
 200                return NULL;
 201        }
 202
 203        kref_init(&ipmi_msg->kref);
 204        init_completion(&ipmi_msg->tx_complete);
 205        INIT_LIST_HEAD(&ipmi_msg->head);
 206        ipmi_msg->device = ipmi;
 207        ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
 208
 209        return ipmi_msg;
 210}
 211
 212static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
 213{
 214        acpi_ipmi_dev_put(tx_msg->device);
 215        kfree(tx_msg);
 216}
 217
 218static void ipmi_msg_release_kref(struct kref *kref)
 219{
 220        struct acpi_ipmi_msg *tx_msg =
 221                container_of(kref, struct acpi_ipmi_msg, kref);
 222
 223        ipmi_msg_release(tx_msg);
 224}
 225
 226static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
 227{
 228        kref_get(&tx_msg->kref);
 229
 230        return tx_msg;
 231}
 232
 233static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
 234{
 235        kref_put(&tx_msg->kref, ipmi_msg_release_kref);
 236}
 237
 238#define IPMI_OP_RGN_NETFN(offset)       ((offset >> 8) & 0xff)
 239#define IPMI_OP_RGN_CMD(offset)         (offset & 0xff)
 240static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
 241                                    acpi_physical_address address,
 242                                    acpi_integer *value)
 243{
 244        struct kernel_ipmi_msg *msg;
 245        struct acpi_ipmi_buffer *buffer;
 246        struct acpi_ipmi_device *device;
 247        unsigned long flags;
 248
 249        msg = &tx_msg->tx_message;
 250
 251        /*
 252         * IPMI network function and command are encoded in the address
 253         * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
 254         */
 255        msg->netfn = IPMI_OP_RGN_NETFN(address);
 256        msg->cmd = IPMI_OP_RGN_CMD(address);
 257        msg->data = tx_msg->data;
 258
 259        /*
 260         * value is the parameter passed by the IPMI opregion space handler.
 261         * It points to the IPMI request message buffer
 262         */
 263        buffer = (struct acpi_ipmi_buffer *)value;
 264
 265        /* copy the tx message data */
 266        if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
 267                dev_WARN_ONCE(tx_msg->device->dev, true,
 268                              "Unexpected request (msg len %d).\n",
 269                              buffer->length);
 270                return -EINVAL;
 271        }
 272        msg->data_len = buffer->length;
 273        memcpy(tx_msg->data, buffer->data, msg->data_len);
 274
 275        /*
 276         * now the default type is SYSTEM_INTERFACE and channel type is BMC.
 277         * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
 278         * the addr type should be changed to IPMB. Then we will have to parse
 279         * the IPMI request message buffer to get the IPMB address.
 280         * If so, please fix me.
 281         */
 282        tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
 283        tx_msg->addr.channel = IPMI_BMC_CHANNEL;
 284        tx_msg->addr.data[0] = 0;
 285
 286        /* Get the msgid */
 287        device = tx_msg->device;
 288
 289        spin_lock_irqsave(&device->tx_msg_lock, flags);
 290        device->curr_msgid++;
 291        tx_msg->tx_msgid = device->curr_msgid;
 292        spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 293
 294        return 0;
 295}
 296
 297static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
 298                                      acpi_integer *value)
 299{
 300        struct acpi_ipmi_buffer *buffer;
 301
 302        /*
 303         * value is also used as output parameter. It represents the response
 304         * IPMI message returned by IPMI command.
 305         */
 306        buffer = (struct acpi_ipmi_buffer *)value;
 307
 308        /*
 309         * If the flag of msg_done is not set, it means that the IPMI command is
 310         * not executed correctly.
 311         */
 312        buffer->status = msg->msg_done;
 313        if (msg->msg_done != ACPI_IPMI_OK)
 314                return;
 315
 316        /*
 317         * If the IPMI response message is obtained correctly, the status code
 318         * will be ACPI_IPMI_OK
 319         */
 320        buffer->length = msg->rx_len;
 321        memcpy(buffer->data, msg->data, msg->rx_len);
 322}
 323
 324static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 325{
 326        struct acpi_ipmi_msg *tx_msg;
 327        unsigned long flags;
 328
 329        /*
 330         * NOTE: On-going ipmi_recv_msg
 331         * ipmi_msg_handler() may still be invoked by ipmi_si after
 332         * flushing.  But it is safe to do a fast flushing on module_exit()
 333         * without waiting for all ipmi_recv_msg(s) to complete from
 334         * ipmi_msg_handler() as it is ensured by ipmi_si that all
 335         * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
 336         */
 337        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 338        while (!list_empty(&ipmi->tx_msg_list)) {
 339                tx_msg = list_first_entry(&ipmi->tx_msg_list,
 340                                          struct acpi_ipmi_msg,
 341                                          head);
 342                list_del(&tx_msg->head);
 343                spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 344
 345                /* wake up the sleep thread on the Tx msg */
 346                complete(&tx_msg->tx_complete);
 347                acpi_ipmi_msg_put(tx_msg);
 348                spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 349        }
 350        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 351}
 352
 353static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
 354                               struct acpi_ipmi_msg *msg)
 355{
 356        struct acpi_ipmi_msg *tx_msg, *temp;
 357        bool msg_found = false;
 358        unsigned long flags;
 359
 360        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 361        list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
 362                if (msg == tx_msg) {
 363                        msg_found = true;
 364                        list_del(&tx_msg->head);
 365                        break;
 366                }
 367        }
 368        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 369
 370        if (msg_found)
 371                acpi_ipmi_msg_put(tx_msg);
 372}
 373
 374static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 375{
 376        struct acpi_ipmi_device *ipmi_device = user_msg_data;
 377        bool msg_found = false;
 378        struct acpi_ipmi_msg *tx_msg, *temp;
 379        struct device *dev = ipmi_device->dev;
 380        unsigned long flags;
 381
 382        if (msg->user != ipmi_device->user_interface) {
 383                dev_warn(dev,
 384                         "Unexpected response is returned. returned user %p, expected user %p\n",
 385                         msg->user, ipmi_device->user_interface);
 386                goto out_msg;
 387        }
 388
 389        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 390        list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
 391                if (msg->msgid == tx_msg->tx_msgid) {
 392                        msg_found = true;
 393                        list_del(&tx_msg->head);
 394                        break;
 395                }
 396        }
 397        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 398
 399        if (!msg_found) {
 400                dev_warn(dev,
 401                         "Unexpected response (msg id %ld) is returned.\n",
 402                         msg->msgid);
 403                goto out_msg;
 404        }
 405
 406        /* copy the response data to Rx_data buffer */
 407        if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
 408                dev_WARN_ONCE(dev, true,
 409                              "Unexpected response (msg len %d).\n",
 410                              msg->msg.data_len);
 411                goto out_comp;
 412        }
 413
 414        /* response msg is an error msg */
 415        msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 416        if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
 417            msg->msg.data_len == 1) {
 418                if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
 419                        dev_dbg_once(dev, "Unexpected response (timeout).\n");
 420                        tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
 421                }
 422                goto out_comp;
 423        }
 424
 425        tx_msg->rx_len = msg->msg.data_len;
 426        memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
 427        tx_msg->msg_done = ACPI_IPMI_OK;
 428
 429out_comp:
 430        complete(&tx_msg->tx_complete);
 431        acpi_ipmi_msg_put(tx_msg);
 432out_msg:
 433        ipmi_free_recv_msg(msg);
 434}
 435
 436static void ipmi_register_bmc(int iface, struct device *dev)
 437{
 438        struct acpi_ipmi_device *ipmi_device, *temp;
 439        int err;
 440        struct ipmi_smi_info smi_data;
 441        acpi_handle handle;
 442
 443        err = ipmi_get_smi_info(iface, &smi_data);
 444        if (err)
 445                return;
 446
 447        if (smi_data.addr_src != SI_ACPI)
 448                goto err_ref;
 449        handle = smi_data.addr_info.acpi_info.acpi_handle;
 450        if (!handle)
 451                goto err_ref;
 452
 453        ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
 454        if (!ipmi_device) {
 455                dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
 456                goto err_ref;
 457        }
 458
 459        mutex_lock(&driver_data.ipmi_lock);
 460        list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
 461                /*
 462                 * if the corresponding ACPI handle is already added
 463                 * to the device list, don't add it again.
 464                 */
 465                if (temp->handle == handle)
 466                        goto err_lock;
 467        }
 468        if (!driver_data.selected_smi)
 469                driver_data.selected_smi = ipmi_device;
 470        list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
 471        mutex_unlock(&driver_data.ipmi_lock);
 472
 473        put_device(smi_data.dev);
 474        return;
 475
 476err_lock:
 477        mutex_unlock(&driver_data.ipmi_lock);
 478        ipmi_dev_release(ipmi_device);
 479err_ref:
 480        put_device(smi_data.dev);
 481}
 482
 483static void ipmi_bmc_gone(int iface)
 484{
 485        struct acpi_ipmi_device *ipmi_device, *temp;
 486        bool dev_found = false;
 487
 488        mutex_lock(&driver_data.ipmi_lock);
 489        list_for_each_entry_safe(ipmi_device, temp,
 490                                 &driver_data.ipmi_devices, head) {
 491                if (ipmi_device->ipmi_ifnum != iface) {
 492                        dev_found = true;
 493                        __ipmi_dev_kill(ipmi_device);
 494                        break;
 495                }
 496        }
 497        if (!driver_data.selected_smi)
 498                driver_data.selected_smi = list_first_entry_or_null(
 499                                        &driver_data.ipmi_devices,
 500                                        struct acpi_ipmi_device, head);
 501        mutex_unlock(&driver_data.ipmi_lock);
 502
 503        if (dev_found) {
 504                ipmi_flush_tx_msg(ipmi_device);
 505                acpi_ipmi_dev_put(ipmi_device);
 506        }
 507}
 508
 509/*
 510 * This is the IPMI opregion space handler.
 511 * @function: indicates the read/write. In fact as the IPMI message is driven
 512 * by command, only write is meaningful.
 513 * @address: This contains the netfn/command of IPMI request message.
 514 * @bits   : not used.
 515 * @value  : it is an in/out parameter. It points to the IPMI message buffer.
 516 *           Before the IPMI message is sent, it represents the actual request
 517 *           IPMI message. After the IPMI message is finished, it represents
 518 *           the response IPMI message returned by IPMI command.
 519 * @handler_context: IPMI device context.
 520 */
 521static acpi_status
 522acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
 523                        u32 bits, acpi_integer *value,
 524                        void *handler_context, void *region_context)
 525{
 526        struct acpi_ipmi_msg *tx_msg;
 527        struct acpi_ipmi_device *ipmi_device;
 528        int err;
 529        acpi_status status;
 530        unsigned long flags;
 531
 532        /*
 533         * IPMI opregion message.
 534         * IPMI message is firstly written to the BMC and system software
 535         * can get the respsonse. So it is unmeaningful for the read access
 536         * of IPMI opregion.
 537         */
 538        if ((function & ACPI_IO_MASK) == ACPI_READ)
 539                return AE_TYPE;
 540
 541        tx_msg = ipmi_msg_alloc();
 542        if (!tx_msg)
 543                return AE_NOT_EXIST;
 544        ipmi_device = tx_msg->device;
 545
 546        if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
 547                ipmi_msg_release(tx_msg);
 548                return AE_TYPE;
 549        }
 550
 551        acpi_ipmi_msg_get(tx_msg);
 552        mutex_lock(&driver_data.ipmi_lock);
 553        /* Do not add a tx_msg that can not be flushed. */
 554        if (ipmi_device->dead) {
 555                mutex_unlock(&driver_data.ipmi_lock);
 556                ipmi_msg_release(tx_msg);
 557                return AE_NOT_EXIST;
 558        }
 559        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 560        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
 561        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 562        mutex_unlock(&driver_data.ipmi_lock);
 563
 564        err = ipmi_request_settime(ipmi_device->user_interface,
 565                                   &tx_msg->addr,
 566                                   tx_msg->tx_msgid,
 567                                   &tx_msg->tx_message,
 568                                   NULL, 0, 0, IPMI_TIMEOUT);
 569        if (err) {
 570                status = AE_ERROR;
 571                goto out_msg;
 572        }
 573        wait_for_completion(&tx_msg->tx_complete);
 574
 575        acpi_format_ipmi_response(tx_msg, value);
 576        status = AE_OK;
 577
 578out_msg:
 579        ipmi_cancel_tx_msg(ipmi_device, tx_msg);
 580        acpi_ipmi_msg_put(tx_msg);
 581        return status;
 582}
 583
 584static int __init acpi_ipmi_init(void)
 585{
 586        int result;
 587        acpi_status status;
 588
 589        if (acpi_disabled)
 590                return 0;
 591
 592        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
 593                                                    ACPI_ADR_SPACE_IPMI,
 594                                                    &acpi_ipmi_space_handler,
 595                                                    NULL, NULL);
 596        if (ACPI_FAILURE(status)) {
 597                pr_warn("Can't register IPMI opregion space handle\n");
 598                return -EINVAL;
 599        }
 600        result = ipmi_smi_watcher_register(&driver_data.bmc_events);
 601        if (result)
 602                pr_err("Can't register IPMI system interface watcher\n");
 603
 604        return result;
 605}
 606
 607static void __exit acpi_ipmi_exit(void)
 608{
 609        struct acpi_ipmi_device *ipmi_device;
 610
 611        if (acpi_disabled)
 612                return;
 613
 614        ipmi_smi_watcher_unregister(&driver_data.bmc_events);
 615
 616        /*
 617         * When one smi_watcher is unregistered, it is only deleted
 618         * from the smi_watcher list. But the smi_gone callback function
 619         * is not called. So explicitly uninstall the ACPI IPMI oregion
 620         * handler and free it.
 621         */
 622        mutex_lock(&driver_data.ipmi_lock);
 623        while (!list_empty(&driver_data.ipmi_devices)) {
 624                ipmi_device = list_first_entry(&driver_data.ipmi_devices,
 625                                               struct acpi_ipmi_device,
 626                                               head);
 627                __ipmi_dev_kill(ipmi_device);
 628                mutex_unlock(&driver_data.ipmi_lock);
 629
 630                ipmi_flush_tx_msg(ipmi_device);
 631                acpi_ipmi_dev_put(ipmi_device);
 632
 633                mutex_lock(&driver_data.ipmi_lock);
 634        }
 635        mutex_unlock(&driver_data.ipmi_lock);
 636        acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
 637                                          ACPI_ADR_SPACE_IPMI,
 638                                          &acpi_ipmi_space_handler);
 639}
 640
 641module_init(acpi_ipmi_init);
 642module_exit(acpi_ipmi_exit);
 643