linux/net/bluetooth/hci_core.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4   Copyright (C) 2011 ProFUSION Embedded Systems
   5
   6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   7
   8   This program is free software; you can redistribute it and/or modify
   9   it under the terms of the GNU General Public License version 2 as
  10   published by the Free Software Foundation;
  11
  12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  20
  21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  23   SOFTWARE IS DISCLAIMED.
  24*/
  25
  26/* Bluetooth HCI core. */
  27
  28#include <linux/export.h>
  29#include <linux/idr.h>
  30
  31#include <linux/rfkill.h>
  32
  33#include <net/bluetooth/bluetooth.h>
  34#include <net/bluetooth/hci_core.h>
  35
  36static void hci_rx_work(struct work_struct *work);
  37static void hci_cmd_work(struct work_struct *work);
  38static void hci_tx_work(struct work_struct *work);
  39
  40/* HCI device list */
  41LIST_HEAD(hci_dev_list);
  42DEFINE_RWLOCK(hci_dev_list_lock);
  43
  44/* HCI callback list */
  45LIST_HEAD(hci_cb_list);
  46DEFINE_RWLOCK(hci_cb_list_lock);
  47
  48/* HCI ID Numbering */
  49static DEFINE_IDA(hci_index_ida);
  50
  51/* ---- HCI notifications ---- */
  52
  53static void hci_notify(struct hci_dev *hdev, int event)
  54{
  55        hci_sock_dev_event(hdev, event);
  56}
  57
  58/* ---- HCI requests ---- */
  59
  60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
  61{
  62        BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
  63
  64        /* If this is the init phase check if the completed command matches
  65         * the last init command, and if not just return.
  66         */
  67        if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
  68                struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
  69                u16 opcode = __le16_to_cpu(sent->opcode);
  70                struct sk_buff *skb;
  71
  72                /* Some CSR based controllers generate a spontaneous
  73                 * reset complete event during init and any pending
  74                 * command will never be completed. In such a case we
  75                 * need to resend whatever was the last sent
  76                 * command.
  77                 */
  78
  79                if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
  80                        return;
  81
  82                skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
  83                if (skb) {
  84                        skb_queue_head(&hdev->cmd_q, skb);
  85                        queue_work(hdev->workqueue, &hdev->cmd_work);
  86                }
  87
  88                return;
  89        }
  90
  91        if (hdev->req_status == HCI_REQ_PEND) {
  92                hdev->req_result = result;
  93                hdev->req_status = HCI_REQ_DONE;
  94                wake_up_interruptible(&hdev->req_wait_q);
  95        }
  96}
  97
  98static void hci_req_cancel(struct hci_dev *hdev, int err)
  99{
 100        BT_DBG("%s err 0x%2.2x", hdev->name, err);
 101
 102        if (hdev->req_status == HCI_REQ_PEND) {
 103                hdev->req_result = err;
 104                hdev->req_status = HCI_REQ_CANCELED;
 105                wake_up_interruptible(&hdev->req_wait_q);
 106        }
 107}
 108
 109/* Execute request and wait for completion. */
 110static int __hci_request(struct hci_dev *hdev,
 111                         void (*req)(struct hci_dev *hdev, unsigned long opt),
 112                         unsigned long opt, __u32 timeout)
 113{
 114        DECLARE_WAITQUEUE(wait, current);
 115        int err = 0;
 116
 117        BT_DBG("%s start", hdev->name);
 118
 119        hdev->req_status = HCI_REQ_PEND;
 120
 121        add_wait_queue(&hdev->req_wait_q, &wait);
 122        set_current_state(TASK_INTERRUPTIBLE);
 123
 124        req(hdev, opt);
 125        schedule_timeout(timeout);
 126
 127        remove_wait_queue(&hdev->req_wait_q, &wait);
 128
 129        if (signal_pending(current))
 130                return -EINTR;
 131
 132        switch (hdev->req_status) {
 133        case HCI_REQ_DONE:
 134                err = -bt_to_errno(hdev->req_result);
 135                break;
 136
 137        case HCI_REQ_CANCELED:
 138                err = -hdev->req_result;
 139                break;
 140
 141        default:
 142                err = -ETIMEDOUT;
 143                break;
 144        }
 145
 146        hdev->req_status = hdev->req_result = 0;
 147
 148        BT_DBG("%s end: err %d", hdev->name, err);
 149
 150        return err;
 151}
 152
 153static int hci_request(struct hci_dev *hdev,
 154                       void (*req)(struct hci_dev *hdev, unsigned long opt),
 155                       unsigned long opt, __u32 timeout)
 156{
 157        int ret;
 158
 159        if (!test_bit(HCI_UP, &hdev->flags))
 160                return -ENETDOWN;
 161
 162        /* Serialize all requests */
 163        hci_req_lock(hdev);
 164        ret = __hci_request(hdev, req, opt, timeout);
 165        hci_req_unlock(hdev);
 166
 167        return ret;
 168}
 169
 170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
 171{
 172        BT_DBG("%s %ld", hdev->name, opt);
 173
 174        /* Reset device */
 175        set_bit(HCI_RESET, &hdev->flags);
 176        hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
 177}
 178
 179static void bredr_init(struct hci_dev *hdev)
 180{
 181        struct hci_cp_delete_stored_link_key cp;
 182        __le16 param;
 183        __u8 flt_type;
 184
 185        hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
 186
 187        /* Mandatory initialization */
 188
 189        /* Read Local Supported Features */
 190        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
 191
 192        /* Read Local Version */
 193        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 194
 195        /* Read Buffer Size (ACL mtu, max pkt, etc.) */
 196        hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 197
 198        /* Read BD Address */
 199        hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
 200
 201        /* Read Class of Device */
 202        hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
 203
 204        /* Read Local Name */
 205        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
 206
 207        /* Read Voice Setting */
 208        hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
 209
 210        /* Optional initialization */
 211
 212        /* Clear Event Filters */
 213        flt_type = HCI_FLT_CLEAR_ALL;
 214        hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
 215
 216        /* Connection accept timeout ~20 secs */
 217        param = __constant_cpu_to_le16(0x7d00);
 218        hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 219
 220        bacpy(&cp.bdaddr, BDADDR_ANY);
 221        cp.delete_all = 1;
 222        hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
 223}
 224
 225static void amp_init(struct hci_dev *hdev)
 226{
 227        hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
 228
 229        /* Read Local Version */
 230        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 231
 232        /* Read Local AMP Info */
 233        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 234
 235        /* Read Data Blk size */
 236        hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
 237}
 238
 239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
 240{
 241        struct sk_buff *skb;
 242
 243        BT_DBG("%s %ld", hdev->name, opt);
 244
 245        /* Driver initialization */
 246
 247        /* Special commands */
 248        while ((skb = skb_dequeue(&hdev->driver_init))) {
 249                bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
 250                skb->dev = (void *) hdev;
 251
 252                skb_queue_tail(&hdev->cmd_q, skb);
 253                queue_work(hdev->workqueue, &hdev->cmd_work);
 254        }
 255        skb_queue_purge(&hdev->driver_init);
 256
 257        /* Reset */
 258        if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
 259                hci_reset_req(hdev, 0);
 260
 261        switch (hdev->dev_type) {
 262        case HCI_BREDR:
 263                bredr_init(hdev);
 264                break;
 265
 266        case HCI_AMP:
 267                amp_init(hdev);
 268                break;
 269
 270        default:
 271                BT_ERR("Unknown device type %d", hdev->dev_type);
 272                break;
 273        }
 274}
 275
 276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
 277{
 278        BT_DBG("%s", hdev->name);
 279
 280        /* Read LE buffer size */
 281        hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
 282}
 283
 284static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
 285{
 286        __u8 scan = opt;
 287
 288        BT_DBG("%s %x", hdev->name, scan);
 289
 290        /* Inquiry and Page scans */
 291        hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 292}
 293
 294static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
 295{
 296        __u8 auth = opt;
 297
 298        BT_DBG("%s %x", hdev->name, auth);
 299
 300        /* Authentication */
 301        hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
 302}
 303
 304static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
 305{
 306        __u8 encrypt = opt;
 307
 308        BT_DBG("%s %x", hdev->name, encrypt);
 309
 310        /* Encryption */
 311        hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
 312}
 313
 314static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
 315{
 316        __le16 policy = cpu_to_le16(opt);
 317
 318        BT_DBG("%s %x", hdev->name, policy);
 319
 320        /* Default link policy */
 321        hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
 322}
 323
 324/* Get HCI device by index.
 325 * Device is held on return. */
 326struct hci_dev *hci_dev_get(int index)
 327{
 328        struct hci_dev *hdev = NULL, *d;
 329
 330        BT_DBG("%d", index);
 331
 332        if (index < 0)
 333                return NULL;
 334
 335        read_lock(&hci_dev_list_lock);
 336        list_for_each_entry(d, &hci_dev_list, list) {
 337                if (d->id == index) {
 338                        hdev = hci_dev_hold(d);
 339                        break;
 340                }
 341        }
 342        read_unlock(&hci_dev_list_lock);
 343        return hdev;
 344}
 345
 346/* ---- Inquiry support ---- */
 347
 348bool hci_discovery_active(struct hci_dev *hdev)
 349{
 350        struct discovery_state *discov = &hdev->discovery;
 351
 352        switch (discov->state) {
 353        case DISCOVERY_FINDING:
 354        case DISCOVERY_RESOLVING:
 355                return true;
 356
 357        default:
 358                return false;
 359        }
 360}
 361
 362void hci_discovery_set_state(struct hci_dev *hdev, int state)
 363{
 364        BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
 365
 366        if (hdev->discovery.state == state)
 367                return;
 368
 369        switch (state) {
 370        case DISCOVERY_STOPPED:
 371                if (hdev->discovery.state != DISCOVERY_STARTING)
 372                        mgmt_discovering(hdev, 0);
 373                break;
 374        case DISCOVERY_STARTING:
 375                break;
 376        case DISCOVERY_FINDING:
 377                mgmt_discovering(hdev, 1);
 378                break;
 379        case DISCOVERY_RESOLVING:
 380                break;
 381        case DISCOVERY_STOPPING:
 382                break;
 383        }
 384
 385        hdev->discovery.state = state;
 386}
 387
 388static void inquiry_cache_flush(struct hci_dev *hdev)
 389{
 390        struct discovery_state *cache = &hdev->discovery;
 391        struct inquiry_entry *p, *n;
 392
 393        list_for_each_entry_safe(p, n, &cache->all, all) {
 394                list_del(&p->all);
 395                kfree(p);
 396        }
 397
 398        INIT_LIST_HEAD(&cache->unknown);
 399        INIT_LIST_HEAD(&cache->resolve);
 400}
 401
 402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
 403                                               bdaddr_t *bdaddr)
 404{
 405        struct discovery_state *cache = &hdev->discovery;
 406        struct inquiry_entry *e;
 407
 408        BT_DBG("cache %p, %s", cache, batostr(bdaddr));
 409
 410        list_for_each_entry(e, &cache->all, all) {
 411                if (!bacmp(&e->data.bdaddr, bdaddr))
 412                        return e;
 413        }
 414
 415        return NULL;
 416}
 417
 418struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
 419                                                       bdaddr_t *bdaddr)
 420{
 421        struct discovery_state *cache = &hdev->discovery;
 422        struct inquiry_entry *e;
 423
 424        BT_DBG("cache %p, %s", cache, batostr(bdaddr));
 425
 426        list_for_each_entry(e, &cache->unknown, list) {
 427                if (!bacmp(&e->data.bdaddr, bdaddr))
 428                        return e;
 429        }
 430
 431        return NULL;
 432}
 433
 434struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
 435                                                       bdaddr_t *bdaddr,
 436                                                       int state)
 437{
 438        struct discovery_state *cache = &hdev->discovery;
 439        struct inquiry_entry *e;
 440
 441        BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
 442
 443        list_for_each_entry(e, &cache->resolve, list) {
 444                if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
 445                        return e;
 446                if (!bacmp(&e->data.bdaddr, bdaddr))
 447                        return e;
 448        }
 449
 450        return NULL;
 451}
 452
 453void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
 454                                      struct inquiry_entry *ie)
 455{
 456        struct discovery_state *cache = &hdev->discovery;
 457        struct list_head *pos = &cache->resolve;
 458        struct inquiry_entry *p;
 459
 460        list_del(&ie->list);
 461
 462        list_for_each_entry(p, &cache->resolve, list) {
 463                if (p->name_state != NAME_PENDING &&
 464                    abs(p->data.rssi) >= abs(ie->data.rssi))
 465                        break;
 466                pos = &p->list;
 467        }
 468
 469        list_add(&ie->list, pos);
 470}
 471
 472bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
 473                              bool name_known, bool *ssp)
 474{
 475        struct discovery_state *cache = &hdev->discovery;
 476        struct inquiry_entry *ie;
 477
 478        BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
 479
 480        if (ssp)
 481                *ssp = data->ssp_mode;
 482
 483        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
 484        if (ie) {
 485                if (ie->data.ssp_mode && ssp)
 486                        *ssp = true;
 487
 488                if (ie->name_state == NAME_NEEDED &&
 489                    data->rssi != ie->data.rssi) {
 490                        ie->data.rssi = data->rssi;
 491                        hci_inquiry_cache_update_resolve(hdev, ie);
 492                }
 493
 494                goto update;
 495        }
 496
 497        /* Entry not in the cache. Add new one. */
 498        ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
 499        if (!ie)
 500                return false;
 501
 502        list_add(&ie->all, &cache->all);
 503
 504        if (name_known) {
 505                ie->name_state = NAME_KNOWN;
 506        } else {
 507                ie->name_state = NAME_NOT_KNOWN;
 508                list_add(&ie->list, &cache->unknown);
 509        }
 510
 511update:
 512        if (name_known && ie->name_state != NAME_KNOWN &&
 513            ie->name_state != NAME_PENDING) {
 514                ie->name_state = NAME_KNOWN;
 515                list_del(&ie->list);
 516        }
 517
 518        memcpy(&ie->data, data, sizeof(*data));
 519        ie->timestamp = jiffies;
 520        cache->timestamp = jiffies;
 521
 522        if (ie->name_state == NAME_NOT_KNOWN)
 523                return false;
 524
 525        return true;
 526}
 527
 528static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
 529{
 530        struct discovery_state *cache = &hdev->discovery;
 531        struct inquiry_info *info = (struct inquiry_info *) buf;
 532        struct inquiry_entry *e;
 533        int copied = 0;
 534
 535        list_for_each_entry(e, &cache->all, all) {
 536                struct inquiry_data *data = &e->data;
 537
 538                if (copied >= num)
 539                        break;
 540
 541                bacpy(&info->bdaddr, &data->bdaddr);
 542                info->pscan_rep_mode    = data->pscan_rep_mode;
 543                info->pscan_period_mode = data->pscan_period_mode;
 544                info->pscan_mode        = data->pscan_mode;
 545                memcpy(info->dev_class, data->dev_class, 3);
 546                info->clock_offset      = data->clock_offset;
 547
 548                info++;
 549                copied++;
 550        }
 551
 552        BT_DBG("cache %p, copied %d", cache, copied);
 553        return copied;
 554}
 555
 556static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
 557{
 558        struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
 559        struct hci_cp_inquiry cp;
 560
 561        BT_DBG("%s", hdev->name);
 562
 563        if (test_bit(HCI_INQUIRY, &hdev->flags))
 564                return;
 565
 566        /* Start Inquiry */
 567        memcpy(&cp.lap, &ir->lap, 3);
 568        cp.length  = ir->length;
 569        cp.num_rsp = ir->num_rsp;
 570        hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
 571}
 572
 573int hci_inquiry(void __user *arg)
 574{
 575        __u8 __user *ptr = arg;
 576        struct hci_inquiry_req ir;
 577        struct hci_dev *hdev;
 578        int err = 0, do_inquiry = 0, max_rsp;
 579        long timeo;
 580        __u8 *buf;
 581
 582        if (copy_from_user(&ir, ptr, sizeof(ir)))
 583                return -EFAULT;
 584
 585        hdev = hci_dev_get(ir.dev_id);
 586        if (!hdev)
 587                return -ENODEV;
 588
 589        hci_dev_lock(hdev);
 590        if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
 591            inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
 592                inquiry_cache_flush(hdev);
 593                do_inquiry = 1;
 594        }
 595        hci_dev_unlock(hdev);
 596
 597        timeo = ir.length * msecs_to_jiffies(2000);
 598
 599        if (do_inquiry) {
 600                err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
 601                if (err < 0)
 602                        goto done;
 603        }
 604
 605        /* for unlimited number of responses we will use buffer with
 606         * 255 entries
 607         */
 608        max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
 609
 610        /* cache_dump can't sleep. Therefore we allocate temp buffer and then
 611         * copy it to the user space.
 612         */
 613        buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
 614        if (!buf) {
 615                err = -ENOMEM;
 616                goto done;
 617        }
 618
 619        hci_dev_lock(hdev);
 620        ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
 621        hci_dev_unlock(hdev);
 622
 623        BT_DBG("num_rsp %d", ir.num_rsp);
 624
 625        if (!copy_to_user(ptr, &ir, sizeof(ir))) {
 626                ptr += sizeof(ir);
 627                if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
 628                                 ir.num_rsp))
 629                        err = -EFAULT;
 630        } else
 631                err = -EFAULT;
 632
 633        kfree(buf);
 634
 635done:
 636        hci_dev_put(hdev);
 637        return err;
 638}
 639
 640/* ---- HCI ioctl helpers ---- */
 641
 642int hci_dev_open(__u16 dev)
 643{
 644        struct hci_dev *hdev;
 645        int ret = 0;
 646
 647        hdev = hci_dev_get(dev);
 648        if (!hdev)
 649                return -ENODEV;
 650
 651        BT_DBG("%s %p", hdev->name, hdev);
 652
 653        hci_req_lock(hdev);
 654
 655        if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
 656                ret = -ENODEV;
 657                goto done;
 658        }
 659
 660        if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
 661                ret = -ERFKILL;
 662                goto done;
 663        }
 664
 665        if (test_bit(HCI_UP, &hdev->flags)) {
 666                ret = -EALREADY;
 667                goto done;
 668        }
 669
 670        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 671                set_bit(HCI_RAW, &hdev->flags);
 672
 673        /* Treat all non BR/EDR controllers as raw devices if
 674           enable_hs is not set */
 675        if (hdev->dev_type != HCI_BREDR && !enable_hs)
 676                set_bit(HCI_RAW, &hdev->flags);
 677
 678        if (hdev->open(hdev)) {
 679                ret = -EIO;
 680                goto done;
 681        }
 682
 683        if (!test_bit(HCI_RAW, &hdev->flags)) {
 684                atomic_set(&hdev->cmd_cnt, 1);
 685                set_bit(HCI_INIT, &hdev->flags);
 686                hdev->init_last_cmd = 0;
 687
 688                ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
 689
 690                if (lmp_host_le_capable(hdev))
 691                        ret = __hci_request(hdev, hci_le_init_req, 0,
 692                                            HCI_INIT_TIMEOUT);
 693
 694                clear_bit(HCI_INIT, &hdev->flags);
 695        }
 696
 697        if (!ret) {
 698                hci_dev_hold(hdev);
 699                set_bit(HCI_UP, &hdev->flags);
 700                hci_notify(hdev, HCI_DEV_UP);
 701                if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
 702                    mgmt_valid_hdev(hdev)) {
 703                        hci_dev_lock(hdev);
 704                        mgmt_powered(hdev, 1);
 705                        hci_dev_unlock(hdev);
 706                }
 707        } else {
 708                /* Init failed, cleanup */
 709                flush_work(&hdev->tx_work);
 710                flush_work(&hdev->cmd_work);
 711                flush_work(&hdev->rx_work);
 712
 713                skb_queue_purge(&hdev->cmd_q);
 714                skb_queue_purge(&hdev->rx_q);
 715
 716                if (hdev->flush)
 717                        hdev->flush(hdev);
 718
 719                if (hdev->sent_cmd) {
 720                        kfree_skb(hdev->sent_cmd);
 721                        hdev->sent_cmd = NULL;
 722                }
 723
 724                hdev->close(hdev);
 725                hdev->flags = 0;
 726        }
 727
 728done:
 729        hci_req_unlock(hdev);
 730        hci_dev_put(hdev);
 731        return ret;
 732}
 733
 734static int hci_dev_do_close(struct hci_dev *hdev)
 735{
 736        BT_DBG("%s %p", hdev->name, hdev);
 737
 738        cancel_work_sync(&hdev->le_scan);
 739
 740        cancel_delayed_work(&hdev->power_off);
 741
 742        hci_req_cancel(hdev, ENODEV);
 743        hci_req_lock(hdev);
 744
 745        if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
 746                del_timer_sync(&hdev->cmd_timer);
 747                hci_req_unlock(hdev);
 748                return 0;
 749        }
 750
 751        /* Flush RX and TX works */
 752        flush_work(&hdev->tx_work);
 753        flush_work(&hdev->rx_work);
 754
 755        if (hdev->discov_timeout > 0) {
 756                cancel_delayed_work(&hdev->discov_off);
 757                hdev->discov_timeout = 0;
 758                clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
 759        }
 760
 761        if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
 762                cancel_delayed_work(&hdev->service_cache);
 763
 764        cancel_delayed_work_sync(&hdev->le_scan_disable);
 765
 766        hci_dev_lock(hdev);
 767        inquiry_cache_flush(hdev);
 768        hci_conn_hash_flush(hdev);
 769        hci_dev_unlock(hdev);
 770
 771        hci_notify(hdev, HCI_DEV_DOWN);
 772
 773        if (hdev->flush)
 774                hdev->flush(hdev);
 775
 776        /* Reset device */
 777        skb_queue_purge(&hdev->cmd_q);
 778        atomic_set(&hdev->cmd_cnt, 1);
 779        if (!test_bit(HCI_RAW, &hdev->flags) &&
 780            test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
 781                set_bit(HCI_INIT, &hdev->flags);
 782                __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
 783                clear_bit(HCI_INIT, &hdev->flags);
 784        }
 785
 786        /* flush cmd  work */
 787        flush_work(&hdev->cmd_work);
 788
 789        /* Drop queues */
 790        skb_queue_purge(&hdev->rx_q);
 791        skb_queue_purge(&hdev->cmd_q);
 792        skb_queue_purge(&hdev->raw_q);
 793
 794        /* Drop last sent command */
 795        if (hdev->sent_cmd) {
 796                del_timer_sync(&hdev->cmd_timer);
 797                kfree_skb(hdev->sent_cmd);
 798                hdev->sent_cmd = NULL;
 799        }
 800
 801        /* After this point our queues are empty
 802         * and no tasks are scheduled. */
 803        hdev->close(hdev);
 804
 805        if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
 806            mgmt_valid_hdev(hdev)) {
 807                hci_dev_lock(hdev);
 808                mgmt_powered(hdev, 0);
 809                hci_dev_unlock(hdev);
 810        }
 811
 812        /* Clear flags */
 813        hdev->flags = 0;
 814
 815        memset(hdev->eir, 0, sizeof(hdev->eir));
 816        memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
 817
 818        hci_req_unlock(hdev);
 819
 820        hci_dev_put(hdev);
 821        return 0;
 822}
 823
 824int hci_dev_close(__u16 dev)
 825{
 826        struct hci_dev *hdev;
 827        int err;
 828
 829        hdev = hci_dev_get(dev);
 830        if (!hdev)
 831                return -ENODEV;
 832
 833        if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
 834                cancel_delayed_work(&hdev->power_off);
 835
 836        err = hci_dev_do_close(hdev);
 837
 838        hci_dev_put(hdev);
 839        return err;
 840}
 841
 842int hci_dev_reset(__u16 dev)
 843{
 844        struct hci_dev *hdev;
 845        int ret = 0;
 846
 847        hdev = hci_dev_get(dev);
 848        if (!hdev)
 849                return -ENODEV;
 850
 851        hci_req_lock(hdev);
 852
 853        if (!test_bit(HCI_UP, &hdev->flags))
 854                goto done;
 855
 856        /* Drop queues */
 857        skb_queue_purge(&hdev->rx_q);
 858        skb_queue_purge(&hdev->cmd_q);
 859
 860        hci_dev_lock(hdev);
 861        inquiry_cache_flush(hdev);
 862        hci_conn_hash_flush(hdev);
 863        hci_dev_unlock(hdev);
 864
 865        if (hdev->flush)
 866                hdev->flush(hdev);
 867
 868        atomic_set(&hdev->cmd_cnt, 1);
 869        hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
 870
 871        if (!test_bit(HCI_RAW, &hdev->flags))
 872                ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
 873
 874done:
 875        hci_req_unlock(hdev);
 876        hci_dev_put(hdev);
 877        return ret;
 878}
 879
 880int hci_dev_reset_stat(__u16 dev)
 881{
 882        struct hci_dev *hdev;
 883        int ret = 0;
 884
 885        hdev = hci_dev_get(dev);
 886        if (!hdev)
 887                return -ENODEV;
 888
 889        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 890
 891        hci_dev_put(hdev);
 892
 893        return ret;
 894}
 895
 896int hci_dev_cmd(unsigned int cmd, void __user *arg)
 897{
 898        struct hci_dev *hdev;
 899        struct hci_dev_req dr;
 900        int err = 0;
 901
 902        if (copy_from_user(&dr, arg, sizeof(dr)))
 903                return -EFAULT;
 904
 905        hdev = hci_dev_get(dr.dev_id);
 906        if (!hdev)
 907                return -ENODEV;
 908
 909        switch (cmd) {
 910        case HCISETAUTH:
 911                err = hci_request(hdev, hci_auth_req, dr.dev_opt,
 912                                  HCI_INIT_TIMEOUT);
 913                break;
 914
 915        case HCISETENCRYPT:
 916                if (!lmp_encrypt_capable(hdev)) {
 917                        err = -EOPNOTSUPP;
 918                        break;
 919                }
 920
 921                if (!test_bit(HCI_AUTH, &hdev->flags)) {
 922                        /* Auth must be enabled first */
 923                        err = hci_request(hdev, hci_auth_req, dr.dev_opt,
 924                                          HCI_INIT_TIMEOUT);
 925                        if (err)
 926                                break;
 927                }
 928
 929                err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
 930                                  HCI_INIT_TIMEOUT);
 931                break;
 932
 933        case HCISETSCAN:
 934                err = hci_request(hdev, hci_scan_req, dr.dev_opt,
 935                                  HCI_INIT_TIMEOUT);
 936                break;
 937
 938        case HCISETLINKPOL:
 939                err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
 940                                  HCI_INIT_TIMEOUT);
 941                break;
 942
 943        case HCISETLINKMODE:
 944                hdev->link_mode = ((__u16) dr.dev_opt) &
 945                                        (HCI_LM_MASTER | HCI_LM_ACCEPT);
 946                break;
 947
 948        case HCISETPTYPE:
 949                hdev->pkt_type = (__u16) dr.dev_opt;
 950                break;
 951
 952        case HCISETACLMTU:
 953                hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
 954                hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
 955                break;
 956
 957        case HCISETSCOMTU:
 958                hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
 959                hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
 960                break;
 961
 962        default:
 963                err = -EINVAL;
 964                break;
 965        }
 966
 967        hci_dev_put(hdev);
 968        return err;
 969}
 970
 971int hci_get_dev_list(void __user *arg)
 972{
 973        struct hci_dev *hdev;
 974        struct hci_dev_list_req *dl;
 975        struct hci_dev_req *dr;
 976        int n = 0, size, err;
 977        __u16 dev_num;
 978
 979        if (get_user(dev_num, (__u16 __user *) arg))
 980                return -EFAULT;
 981
 982        if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
 983                return -EINVAL;
 984
 985        size = sizeof(*dl) + dev_num * sizeof(*dr);
 986
 987        dl = kzalloc(size, GFP_KERNEL);
 988        if (!dl)
 989                return -ENOMEM;
 990
 991        dr = dl->dev_req;
 992
 993        read_lock(&hci_dev_list_lock);
 994        list_for_each_entry(hdev, &hci_dev_list, list) {
 995                if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
 996                        cancel_delayed_work(&hdev->power_off);
 997
 998                if (!test_bit(HCI_MGMT, &hdev->dev_flags))
 999                        set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1000
1001                (dr + n)->dev_id  = hdev->id;
1002                (dr + n)->dev_opt = hdev->flags;
1003
1004                if (++n >= dev_num)
1005                        break;
1006        }
1007        read_unlock(&hci_dev_list_lock);
1008
1009        dl->dev_num = n;
1010        size = sizeof(*dl) + n * sizeof(*dr);
1011
1012        err = copy_to_user(arg, dl, size);
1013        kfree(dl);
1014
1015        return err ? -EFAULT : 0;
1016}
1017
1018int hci_get_dev_info(void __user *arg)
1019{
1020        struct hci_dev *hdev;
1021        struct hci_dev_info di;
1022        int err = 0;
1023
1024        if (copy_from_user(&di, arg, sizeof(di)))
1025                return -EFAULT;
1026
1027        hdev = hci_dev_get(di.dev_id);
1028        if (!hdev)
1029                return -ENODEV;
1030
1031        if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1032                cancel_delayed_work_sync(&hdev->power_off);
1033
1034        if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035                set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1036
1037        strcpy(di.name, hdev->name);
1038        di.bdaddr   = hdev->bdaddr;
1039        di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040        di.flags    = hdev->flags;
1041        di.pkt_type = hdev->pkt_type;
1042        di.acl_mtu  = hdev->acl_mtu;
1043        di.acl_pkts = hdev->acl_pkts;
1044        di.sco_mtu  = hdev->sco_mtu;
1045        di.sco_pkts = hdev->sco_pkts;
1046        di.link_policy = hdev->link_policy;
1047        di.link_mode   = hdev->link_mode;
1048
1049        memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050        memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052        if (copy_to_user(arg, &di, sizeof(di)))
1053                err = -EFAULT;
1054
1055        hci_dev_put(hdev);
1056
1057        return err;
1058}
1059
1060/* ---- Interface to HCI drivers ---- */
1061
1062static int hci_rfkill_set_block(void *data, bool blocked)
1063{
1064        struct hci_dev *hdev = data;
1065
1066        BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068        if (!blocked)
1069                return 0;
1070
1071        hci_dev_do_close(hdev);
1072
1073        return 0;
1074}
1075
1076static const struct rfkill_ops hci_rfkill_ops = {
1077        .set_block = hci_rfkill_set_block,
1078};
1079
1080static void hci_power_on(struct work_struct *work)
1081{
1082        struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084        BT_DBG("%s", hdev->name);
1085
1086        if (hci_dev_open(hdev->id) < 0)
1087                return;
1088
1089        if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1090                schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1091
1092        if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1093                mgmt_index_added(hdev);
1094}
1095
1096static void hci_power_off(struct work_struct *work)
1097{
1098        struct hci_dev *hdev = container_of(work, struct hci_dev,
1099                                            power_off.work);
1100
1101        BT_DBG("%s", hdev->name);
1102
1103        hci_dev_do_close(hdev);
1104}
1105
1106static void hci_discov_off(struct work_struct *work)
1107{
1108        struct hci_dev *hdev;
1109        u8 scan = SCAN_PAGE;
1110
1111        hdev = container_of(work, struct hci_dev, discov_off.work);
1112
1113        BT_DBG("%s", hdev->name);
1114
1115        hci_dev_lock(hdev);
1116
1117        hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1118
1119        hdev->discov_timeout = 0;
1120
1121        hci_dev_unlock(hdev);
1122}
1123
1124int hci_uuids_clear(struct hci_dev *hdev)
1125{
1126        struct list_head *p, *n;
1127
1128        list_for_each_safe(p, n, &hdev->uuids) {
1129                struct bt_uuid *uuid;
1130
1131                uuid = list_entry(p, struct bt_uuid, list);
1132
1133                list_del(p);
1134                kfree(uuid);
1135        }
1136
1137        return 0;
1138}
1139
1140int hci_link_keys_clear(struct hci_dev *hdev)
1141{
1142        struct list_head *p, *n;
1143
1144        list_for_each_safe(p, n, &hdev->link_keys) {
1145                struct link_key *key;
1146
1147                key = list_entry(p, struct link_key, list);
1148
1149                list_del(p);
1150                kfree(key);
1151        }
1152
1153        return 0;
1154}
1155
1156int hci_smp_ltks_clear(struct hci_dev *hdev)
1157{
1158        struct smp_ltk *k, *tmp;
1159
1160        list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1161                list_del(&k->list);
1162                kfree(k);
1163        }
1164
1165        return 0;
1166}
1167
1168struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169{
1170        struct link_key *k;
1171
1172        list_for_each_entry(k, &hdev->link_keys, list)
1173                if (bacmp(bdaddr, &k->bdaddr) == 0)
1174                        return k;
1175
1176        return NULL;
1177}
1178
1179static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1180                               u8 key_type, u8 old_key_type)
1181{
1182        /* Legacy key */
1183        if (key_type < 0x03)
1184                return true;
1185
1186        /* Debug keys are insecure so don't store them persistently */
1187        if (key_type == HCI_LK_DEBUG_COMBINATION)
1188                return false;
1189
1190        /* Changed combination key and there's no previous one */
1191        if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1192                return false;
1193
1194        /* Security mode 3 case */
1195        if (!conn)
1196                return true;
1197
1198        /* Neither local nor remote side had no-bonding as requirement */
1199        if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1200                return true;
1201
1202        /* Local side had dedicated bonding as requirement */
1203        if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1204                return true;
1205
1206        /* Remote side had dedicated bonding as requirement */
1207        if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1208                return true;
1209
1210        /* If none of the above criteria match, then don't store the key
1211         * persistently */
1212        return false;
1213}
1214
1215struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1216{
1217        struct smp_ltk *k;
1218
1219        list_for_each_entry(k, &hdev->long_term_keys, list) {
1220                if (k->ediv != ediv ||
1221                    memcmp(rand, k->rand, sizeof(k->rand)))
1222                        continue;
1223
1224                return k;
1225        }
1226
1227        return NULL;
1228}
1229
1230struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1231                                     u8 addr_type)
1232{
1233        struct smp_ltk *k;
1234
1235        list_for_each_entry(k, &hdev->long_term_keys, list)
1236                if (addr_type == k->bdaddr_type &&
1237                    bacmp(bdaddr, &k->bdaddr) == 0)
1238                        return k;
1239
1240        return NULL;
1241}
1242
1243int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1244                     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1245{
1246        struct link_key *key, *old_key;
1247        u8 old_key_type;
1248        bool persistent;
1249
1250        old_key = hci_find_link_key(hdev, bdaddr);
1251        if (old_key) {
1252                old_key_type = old_key->type;
1253                key = old_key;
1254        } else {
1255                old_key_type = conn ? conn->key_type : 0xff;
1256                key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257                if (!key)
1258                        return -ENOMEM;
1259                list_add(&key->list, &hdev->link_keys);
1260        }
1261
1262        BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1263
1264        /* Some buggy controller combinations generate a changed
1265         * combination key for legacy pairing even when there's no
1266         * previous key */
1267        if (type == HCI_LK_CHANGED_COMBINATION &&
1268            (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1269                type = HCI_LK_COMBINATION;
1270                if (conn)
1271                        conn->key_type = type;
1272        }
1273
1274        bacpy(&key->bdaddr, bdaddr);
1275        memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1276        key->pin_len = pin_len;
1277
1278        if (type == HCI_LK_CHANGED_COMBINATION)
1279                key->type = old_key_type;
1280        else
1281                key->type = type;
1282
1283        if (!new_key)
1284                return 0;
1285
1286        persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1287
1288        mgmt_new_link_key(hdev, key, persistent);
1289
1290        if (conn)
1291                conn->flush_key = !persistent;
1292
1293        return 0;
1294}
1295
1296int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1297                int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1298                ediv, u8 rand[8])
1299{
1300        struct smp_ltk *key, *old_key;
1301
1302        if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303                return 0;
1304
1305        old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1306        if (old_key)
1307                key = old_key;
1308        else {
1309                key = kzalloc(sizeof(*key), GFP_ATOMIC);
1310                if (!key)
1311                        return -ENOMEM;
1312                list_add(&key->list, &hdev->long_term_keys);
1313        }
1314
1315        bacpy(&key->bdaddr, bdaddr);
1316        key->bdaddr_type = addr_type;
1317        memcpy(key->val, tk, sizeof(key->val));
1318        key->authenticated = authenticated;
1319        key->ediv = ediv;
1320        key->enc_size = enc_size;
1321        key->type = type;
1322        memcpy(key->rand, rand, sizeof(key->rand));
1323
1324        if (!new_key)
1325                return 0;
1326
1327        if (type & HCI_SMP_LTK)
1328                mgmt_new_ltk(hdev, key, 1);
1329
1330        return 0;
1331}
1332
1333int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334{
1335        struct link_key *key;
1336
1337        key = hci_find_link_key(hdev, bdaddr);
1338        if (!key)
1339                return -ENOENT;
1340
1341        BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1342
1343        list_del(&key->list);
1344        kfree(key);
1345
1346        return 0;
1347}
1348
1349int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351        struct smp_ltk *k, *tmp;
1352
1353        list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1354                if (bacmp(bdaddr, &k->bdaddr))
1355                        continue;
1356
1357                BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1358
1359                list_del(&k->list);
1360                kfree(k);
1361        }
1362
1363        return 0;
1364}
1365
1366/* HCI command timer function */
1367static void hci_cmd_timeout(unsigned long arg)
1368{
1369        struct hci_dev *hdev = (void *) arg;
1370
1371        if (hdev->sent_cmd) {
1372                struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1373                u16 opcode = __le16_to_cpu(sent->opcode);
1374
1375                BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1376        } else {
1377                BT_ERR("%s command tx timeout", hdev->name);
1378        }
1379
1380        atomic_set(&hdev->cmd_cnt, 1);
1381        queue_work(hdev->workqueue, &hdev->cmd_work);
1382}
1383
1384struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1385                                          bdaddr_t *bdaddr)
1386{
1387        struct oob_data *data;
1388
1389        list_for_each_entry(data, &hdev->remote_oob_data, list)
1390                if (bacmp(bdaddr, &data->bdaddr) == 0)
1391                        return data;
1392
1393        return NULL;
1394}
1395
1396int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397{
1398        struct oob_data *data;
1399
1400        data = hci_find_remote_oob_data(hdev, bdaddr);
1401        if (!data)
1402                return -ENOENT;
1403
1404        BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1405
1406        list_del(&data->list);
1407        kfree(data);
1408
1409        return 0;
1410}
1411
1412int hci_remote_oob_data_clear(struct hci_dev *hdev)
1413{
1414        struct oob_data *data, *n;
1415
1416        list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417                list_del(&data->list);
1418                kfree(data);
1419        }
1420
1421        return 0;
1422}
1423
1424int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1425                            u8 *randomizer)
1426{
1427        struct oob_data *data;
1428
1429        data = hci_find_remote_oob_data(hdev, bdaddr);
1430
1431        if (!data) {
1432                data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433                if (!data)
1434                        return -ENOMEM;
1435
1436                bacpy(&data->bdaddr, bdaddr);
1437                list_add(&data->list, &hdev->remote_oob_data);
1438        }
1439
1440        memcpy(data->hash, hash, sizeof(data->hash));
1441        memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442
1443        BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1444
1445        return 0;
1446}
1447
1448struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1449{
1450        struct bdaddr_list *b;
1451
1452        list_for_each_entry(b, &hdev->blacklist, list)
1453                if (bacmp(bdaddr, &b->bdaddr) == 0)
1454                        return b;
1455
1456        return NULL;
1457}
1458
1459int hci_blacklist_clear(struct hci_dev *hdev)
1460{
1461        struct list_head *p, *n;
1462
1463        list_for_each_safe(p, n, &hdev->blacklist) {
1464                struct bdaddr_list *b;
1465
1466                b = list_entry(p, struct bdaddr_list, list);
1467
1468                list_del(p);
1469                kfree(b);
1470        }
1471
1472        return 0;
1473}
1474
1475int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1476{
1477        struct bdaddr_list *entry;
1478
1479        if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480                return -EBADF;
1481
1482        if (hci_blacklist_lookup(hdev, bdaddr))
1483                return -EEXIST;
1484
1485        entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1486        if (!entry)
1487                return -ENOMEM;
1488
1489        bacpy(&entry->bdaddr, bdaddr);
1490
1491        list_add(&entry->list, &hdev->blacklist);
1492
1493        return mgmt_device_blocked(hdev, bdaddr, type);
1494}
1495
1496int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1497{
1498        struct bdaddr_list *entry;
1499
1500        if (bacmp(bdaddr, BDADDR_ANY) == 0)
1501                return hci_blacklist_clear(hdev);
1502
1503        entry = hci_blacklist_lookup(hdev, bdaddr);
1504        if (!entry)
1505                return -ENOENT;
1506
1507        list_del(&entry->list);
1508        kfree(entry);
1509
1510        return mgmt_device_unblocked(hdev, bdaddr, type);
1511}
1512
1513static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1514{
1515        struct le_scan_params *param =  (struct le_scan_params *) opt;
1516        struct hci_cp_le_set_scan_param cp;
1517
1518        memset(&cp, 0, sizeof(cp));
1519        cp.type = param->type;
1520        cp.interval = cpu_to_le16(param->interval);
1521        cp.window = cpu_to_le16(param->window);
1522
1523        hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1524}
1525
1526static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1527{
1528        struct hci_cp_le_set_scan_enable cp;
1529
1530        memset(&cp, 0, sizeof(cp));
1531        cp.enable = 1;
1532        cp.filter_dup = 1;
1533
1534        hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1535}
1536
1537static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1538                          u16 window, int timeout)
1539{
1540        long timeo = msecs_to_jiffies(3000);
1541        struct le_scan_params param;
1542        int err;
1543
1544        BT_DBG("%s", hdev->name);
1545
1546        if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1547                return -EINPROGRESS;
1548
1549        param.type = type;
1550        param.interval = interval;
1551        param.window = window;
1552
1553        hci_req_lock(hdev);
1554
1555        err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1556                            timeo);
1557        if (!err)
1558                err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1559
1560        hci_req_unlock(hdev);
1561
1562        if (err < 0)
1563                return err;
1564
1565        schedule_delayed_work(&hdev->le_scan_disable,
1566                              msecs_to_jiffies(timeout));
1567
1568        return 0;
1569}
1570
1571int hci_cancel_le_scan(struct hci_dev *hdev)
1572{
1573        BT_DBG("%s", hdev->name);
1574
1575        if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576                return -EALREADY;
1577
1578        if (cancel_delayed_work(&hdev->le_scan_disable)) {
1579                struct hci_cp_le_set_scan_enable cp;
1580
1581                /* Send HCI command to disable LE Scan */
1582                memset(&cp, 0, sizeof(cp));
1583                hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1584        }
1585
1586        return 0;
1587}
1588
1589static void le_scan_disable_work(struct work_struct *work)
1590{
1591        struct hci_dev *hdev = container_of(work, struct hci_dev,
1592                                            le_scan_disable.work);
1593        struct hci_cp_le_set_scan_enable cp;
1594
1595        BT_DBG("%s", hdev->name);
1596
1597        memset(&cp, 0, sizeof(cp));
1598
1599        hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600}
1601
1602static void le_scan_work(struct work_struct *work)
1603{
1604        struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1605        struct le_scan_params *param = &hdev->le_scan_params;
1606
1607        BT_DBG("%s", hdev->name);
1608
1609        hci_do_le_scan(hdev, param->type, param->interval, param->window,
1610                       param->timeout);
1611}
1612
1613int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1614                int timeout)
1615{
1616        struct le_scan_params *param = &hdev->le_scan_params;
1617
1618        BT_DBG("%s", hdev->name);
1619
1620        if (work_busy(&hdev->le_scan))
1621                return -EINPROGRESS;
1622
1623        param->type = type;
1624        param->interval = interval;
1625        param->window = window;
1626        param->timeout = timeout;
1627
1628        queue_work(system_long_wq, &hdev->le_scan);
1629
1630        return 0;
1631}
1632
1633/* Alloc HCI device */
1634struct hci_dev *hci_alloc_dev(void)
1635{
1636        struct hci_dev *hdev;
1637
1638        hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1639        if (!hdev)
1640                return NULL;
1641
1642        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1643        hdev->esco_type = (ESCO_HV1);
1644        hdev->link_mode = (HCI_LM_ACCEPT);
1645        hdev->io_capability = 0x03; /* No Input No Output */
1646
1647        hdev->sniff_max_interval = 800;
1648        hdev->sniff_min_interval = 80;
1649
1650        mutex_init(&hdev->lock);
1651        mutex_init(&hdev->req_lock);
1652
1653        INIT_LIST_HEAD(&hdev->mgmt_pending);
1654        INIT_LIST_HEAD(&hdev->blacklist);
1655        INIT_LIST_HEAD(&hdev->uuids);
1656        INIT_LIST_HEAD(&hdev->link_keys);
1657        INIT_LIST_HEAD(&hdev->long_term_keys);
1658        INIT_LIST_HEAD(&hdev->remote_oob_data);
1659        INIT_LIST_HEAD(&hdev->conn_hash.list);
1660
1661        INIT_WORK(&hdev->rx_work, hci_rx_work);
1662        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1663        INIT_WORK(&hdev->tx_work, hci_tx_work);
1664        INIT_WORK(&hdev->power_on, hci_power_on);
1665        INIT_WORK(&hdev->le_scan, le_scan_work);
1666
1667        INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1668        INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1669        INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1670
1671        skb_queue_head_init(&hdev->driver_init);
1672        skb_queue_head_init(&hdev->rx_q);
1673        skb_queue_head_init(&hdev->cmd_q);
1674        skb_queue_head_init(&hdev->raw_q);
1675
1676        init_waitqueue_head(&hdev->req_wait_q);
1677
1678        setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1679
1680        hci_init_sysfs(hdev);
1681        discovery_init(hdev);
1682
1683        return hdev;
1684}
1685EXPORT_SYMBOL(hci_alloc_dev);
1686
1687/* Free HCI device */
1688void hci_free_dev(struct hci_dev *hdev)
1689{
1690        skb_queue_purge(&hdev->driver_init);
1691
1692        /* will free via device release */
1693        put_device(&hdev->dev);
1694}
1695EXPORT_SYMBOL(hci_free_dev);
1696
1697/* Register HCI device */
1698int hci_register_dev(struct hci_dev *hdev)
1699{
1700        int id, error;
1701
1702        if (!hdev->open || !hdev->close)
1703                return -EINVAL;
1704
1705        /* Do not allow HCI_AMP devices to register at index 0,
1706         * so the index can be used as the AMP controller ID.
1707         */
1708        switch (hdev->dev_type) {
1709        case HCI_BREDR:
1710                id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711                break;
1712        case HCI_AMP:
1713                id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1714                break;
1715        default:
1716                return -EINVAL;
1717        }
1718
1719        if (id < 0)
1720                return id;
1721
1722        sprintf(hdev->name, "hci%d", id);
1723        hdev->id = id;
1724
1725        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1726
1727        write_lock(&hci_dev_list_lock);
1728        list_add(&hdev->list, &hci_dev_list);
1729        write_unlock(&hci_dev_list_lock);
1730
1731        hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1732                                          WQ_MEM_RECLAIM, 1);
1733        if (!hdev->workqueue) {
1734                error = -ENOMEM;
1735                goto err;
1736        }
1737
1738        error = hci_add_sysfs(hdev);
1739        if (error < 0)
1740                goto err_wqueue;
1741
1742        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1743                                    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744                                    hdev);
1745        if (hdev->rfkill) {
1746                if (rfkill_register(hdev->rfkill) < 0) {
1747                        rfkill_destroy(hdev->rfkill);
1748                        hdev->rfkill = NULL;
1749                }
1750        }
1751
1752        set_bit(HCI_SETUP, &hdev->dev_flags);
1753
1754        if (hdev->dev_type != HCI_AMP)
1755                set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1756
1757        hci_notify(hdev, HCI_DEV_REG);
1758        hci_dev_hold(hdev);
1759
1760        schedule_work(&hdev->power_on);
1761
1762        return id;
1763
1764err_wqueue:
1765        destroy_workqueue(hdev->workqueue);
1766err:
1767        ida_simple_remove(&hci_index_ida, hdev->id);
1768        write_lock(&hci_dev_list_lock);
1769        list_del(&hdev->list);
1770        write_unlock(&hci_dev_list_lock);
1771
1772        return error;
1773}
1774EXPORT_SYMBOL(hci_register_dev);
1775
1776/* Unregister HCI device */
1777void hci_unregister_dev(struct hci_dev *hdev)
1778{
1779        int i, id;
1780
1781        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1782
1783        set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1784
1785        id = hdev->id;
1786
1787        write_lock(&hci_dev_list_lock);
1788        list_del(&hdev->list);
1789        write_unlock(&hci_dev_list_lock);
1790
1791        hci_dev_do_close(hdev);
1792
1793        for (i = 0; i < NUM_REASSEMBLY; i++)
1794                kfree_skb(hdev->reassembly[i]);
1795
1796        cancel_work_sync(&hdev->power_on);
1797
1798        if (!test_bit(HCI_INIT, &hdev->flags) &&
1799            !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1800                hci_dev_lock(hdev);
1801                mgmt_index_removed(hdev);
1802                hci_dev_unlock(hdev);
1803        }
1804
1805        /* mgmt_index_removed should take care of emptying the
1806         * pending list */
1807        BUG_ON(!list_empty(&hdev->mgmt_pending));
1808
1809        hci_notify(hdev, HCI_DEV_UNREG);
1810
1811        if (hdev->rfkill) {
1812                rfkill_unregister(hdev->rfkill);
1813                rfkill_destroy(hdev->rfkill);
1814        }
1815
1816        hci_del_sysfs(hdev);
1817
1818        destroy_workqueue(hdev->workqueue);
1819
1820        hci_dev_lock(hdev);
1821        hci_blacklist_clear(hdev);
1822        hci_uuids_clear(hdev);
1823        hci_link_keys_clear(hdev);
1824        hci_smp_ltks_clear(hdev);
1825        hci_remote_oob_data_clear(hdev);
1826        hci_dev_unlock(hdev);
1827
1828        hci_dev_put(hdev);
1829
1830        ida_simple_remove(&hci_index_ida, id);
1831}
1832EXPORT_SYMBOL(hci_unregister_dev);
1833
1834/* Suspend HCI device */
1835int hci_suspend_dev(struct hci_dev *hdev)
1836{
1837        hci_notify(hdev, HCI_DEV_SUSPEND);
1838        return 0;
1839}
1840EXPORT_SYMBOL(hci_suspend_dev);
1841
1842/* Resume HCI device */
1843int hci_resume_dev(struct hci_dev *hdev)
1844{
1845        hci_notify(hdev, HCI_DEV_RESUME);
1846        return 0;
1847}
1848EXPORT_SYMBOL(hci_resume_dev);
1849
1850/* Receive frame from HCI drivers */
1851int hci_recv_frame(struct sk_buff *skb)
1852{
1853        struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1854        if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1855                      && !test_bit(HCI_INIT, &hdev->flags))) {
1856                kfree_skb(skb);
1857                return -ENXIO;
1858        }
1859
1860        /* Incomming skb */
1861        bt_cb(skb)->incoming = 1;
1862
1863        /* Time stamp */
1864        __net_timestamp(skb);
1865
1866        skb_queue_tail(&hdev->rx_q, skb);
1867        queue_work(hdev->workqueue, &hdev->rx_work);
1868
1869        return 0;
1870}
1871EXPORT_SYMBOL(hci_recv_frame);
1872
1873static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1874                          int count, __u8 index)
1875{
1876        int len = 0;
1877        int hlen = 0;
1878        int remain = count;
1879        struct sk_buff *skb;
1880        struct bt_skb_cb *scb;
1881
1882        if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1883            index >= NUM_REASSEMBLY)
1884                return -EILSEQ;
1885
1886        skb = hdev->reassembly[index];
1887
1888        if (!skb) {
1889                switch (type) {
1890                case HCI_ACLDATA_PKT:
1891                        len = HCI_MAX_FRAME_SIZE;
1892                        hlen = HCI_ACL_HDR_SIZE;
1893                        break;
1894                case HCI_EVENT_PKT:
1895                        len = HCI_MAX_EVENT_SIZE;
1896                        hlen = HCI_EVENT_HDR_SIZE;
1897                        break;
1898                case HCI_SCODATA_PKT:
1899                        len = HCI_MAX_SCO_SIZE;
1900                        hlen = HCI_SCO_HDR_SIZE;
1901                        break;
1902                }
1903
1904                skb = bt_skb_alloc(len, GFP_ATOMIC);
1905                if (!skb)
1906                        return -ENOMEM;
1907
1908                scb = (void *) skb->cb;
1909                scb->expect = hlen;
1910                scb->pkt_type = type;
1911
1912                skb->dev = (void *) hdev;
1913                hdev->reassembly[index] = skb;
1914        }
1915
1916        while (count) {
1917                scb = (void *) skb->cb;
1918                len = min_t(uint, scb->expect, count);
1919
1920                memcpy(skb_put(skb, len), data, len);
1921
1922                count -= len;
1923                data += len;
1924                scb->expect -= len;
1925                remain = count;
1926
1927                switch (type) {
1928                case HCI_EVENT_PKT:
1929                        if (skb->len == HCI_EVENT_HDR_SIZE) {
1930                                struct hci_event_hdr *h = hci_event_hdr(skb);
1931                                scb->expect = h->plen;
1932
1933                                if (skb_tailroom(skb) < scb->expect) {
1934                                        kfree_skb(skb);
1935                                        hdev->reassembly[index] = NULL;
1936                                        return -ENOMEM;
1937                                }
1938                        }
1939                        break;
1940
1941                case HCI_ACLDATA_PKT:
1942                        if (skb->len  == HCI_ACL_HDR_SIZE) {
1943                                struct hci_acl_hdr *h = hci_acl_hdr(skb);
1944                                scb->expect = __le16_to_cpu(h->dlen);
1945
1946                                if (skb_tailroom(skb) < scb->expect) {
1947                                        kfree_skb(skb);
1948                                        hdev->reassembly[index] = NULL;
1949                                        return -ENOMEM;
1950                                }
1951                        }
1952                        break;
1953
1954                case HCI_SCODATA_PKT:
1955                        if (skb->len == HCI_SCO_HDR_SIZE) {
1956                                struct hci_sco_hdr *h = hci_sco_hdr(skb);
1957                                scb->expect = h->dlen;
1958
1959                                if (skb_tailroom(skb) < scb->expect) {
1960                                        kfree_skb(skb);
1961                                        hdev->reassembly[index] = NULL;
1962                                        return -ENOMEM;
1963                                }
1964                        }
1965                        break;
1966                }
1967
1968                if (scb->expect == 0) {
1969                        /* Complete frame */
1970
1971                        bt_cb(skb)->pkt_type = type;
1972                        hci_recv_frame(skb);
1973
1974                        hdev->reassembly[index] = NULL;
1975                        return remain;
1976                }
1977        }
1978
1979        return remain;
1980}
1981
1982int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1983{
1984        int rem = 0;
1985
1986        if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1987                return -EILSEQ;
1988
1989        while (count) {
1990                rem = hci_reassembly(hdev, type, data, count, type - 1);
1991                if (rem < 0)
1992                        return rem;
1993
1994                data += (count - rem);
1995                count = rem;
1996        }
1997
1998        return rem;
1999}
2000EXPORT_SYMBOL(hci_recv_fragment);
2001
2002#define STREAM_REASSEMBLY 0
2003
2004int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2005{
2006        int type;
2007        int rem = 0;
2008
2009        while (count) {
2010                struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2011
2012                if (!skb) {
2013                        struct { char type; } *pkt;
2014
2015                        /* Start of the frame */
2016                        pkt = data;
2017                        type = pkt->type;
2018
2019                        data++;
2020                        count--;
2021                } else
2022                        type = bt_cb(skb)->pkt_type;
2023
2024                rem = hci_reassembly(hdev, type, data, count,
2025                                     STREAM_REASSEMBLY);
2026                if (rem < 0)
2027                        return rem;
2028
2029                data += (count - rem);
2030                count = rem;
2031        }
2032
2033        return rem;
2034}
2035EXPORT_SYMBOL(hci_recv_stream_fragment);
2036
2037/* ---- Interface to upper protocols ---- */
2038
2039int hci_register_cb(struct hci_cb *cb)
2040{
2041        BT_DBG("%p name %s", cb, cb->name);
2042
2043        write_lock(&hci_cb_list_lock);
2044        list_add(&cb->list, &hci_cb_list);
2045        write_unlock(&hci_cb_list_lock);
2046
2047        return 0;
2048}
2049EXPORT_SYMBOL(hci_register_cb);
2050
2051int hci_unregister_cb(struct hci_cb *cb)
2052{
2053        BT_DBG("%p name %s", cb, cb->name);
2054
2055        write_lock(&hci_cb_list_lock);
2056        list_del(&cb->list);
2057        write_unlock(&hci_cb_list_lock);
2058
2059        return 0;
2060}
2061EXPORT_SYMBOL(hci_unregister_cb);
2062
2063static int hci_send_frame(struct sk_buff *skb)
2064{
2065        struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2066
2067        if (!hdev) {
2068                kfree_skb(skb);
2069                return -ENODEV;
2070        }
2071
2072        BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2073
2074        /* Time stamp */
2075        __net_timestamp(skb);
2076
2077        /* Send copy to monitor */
2078        hci_send_to_monitor(hdev, skb);
2079
2080        if (atomic_read(&hdev->promisc)) {
2081                /* Send copy to the sockets */
2082                hci_send_to_sock(hdev, skb);
2083        }
2084
2085        /* Get rid of skb owner, prior to sending to the driver. */
2086        skb_orphan(skb);
2087
2088        return hdev->send(skb);
2089}
2090
2091/* Send HCI command */
2092int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2093{
2094        int len = HCI_COMMAND_HDR_SIZE + plen;
2095        struct hci_command_hdr *hdr;
2096        struct sk_buff *skb;
2097
2098        BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2099
2100        skb = bt_skb_alloc(len, GFP_ATOMIC);
2101        if (!skb) {
2102                BT_ERR("%s no memory for command", hdev->name);
2103                return -ENOMEM;
2104        }
2105
2106        hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2107        hdr->opcode = cpu_to_le16(opcode);
2108        hdr->plen   = plen;
2109
2110        if (plen)
2111                memcpy(skb_put(skb, plen), param, plen);
2112
2113        BT_DBG("skb len %d", skb->len);
2114
2115        bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2116        skb->dev = (void *) hdev;
2117
2118        if (test_bit(HCI_INIT, &hdev->flags))
2119                hdev->init_last_cmd = opcode;
2120
2121        skb_queue_tail(&hdev->cmd_q, skb);
2122        queue_work(hdev->workqueue, &hdev->cmd_work);
2123
2124        return 0;
2125}
2126
2127/* Get data from the previously sent command */
2128void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2129{
2130        struct hci_command_hdr *hdr;
2131
2132        if (!hdev->sent_cmd)
2133                return NULL;
2134
2135        hdr = (void *) hdev->sent_cmd->data;
2136
2137        if (hdr->opcode != cpu_to_le16(opcode))
2138                return NULL;
2139
2140        BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2141
2142        return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2143}
2144
2145/* Send ACL data */
2146static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2147{
2148        struct hci_acl_hdr *hdr;
2149        int len = skb->len;
2150
2151        skb_push(skb, HCI_ACL_HDR_SIZE);
2152        skb_reset_transport_header(skb);
2153        hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2154        hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2155        hdr->dlen   = cpu_to_le16(len);
2156}
2157
2158static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2159                          struct sk_buff *skb, __u16 flags)
2160{
2161        struct hci_dev *hdev = conn->hdev;
2162        struct sk_buff *list;
2163
2164        skb->len = skb_headlen(skb);
2165        skb->data_len = 0;
2166
2167        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2168        hci_add_acl_hdr(skb, conn->handle, flags);
2169
2170        list = skb_shinfo(skb)->frag_list;
2171        if (!list) {
2172                /* Non fragmented */
2173                BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2174
2175                skb_queue_tail(queue, skb);
2176        } else {
2177                /* Fragmented */
2178                BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2179
2180                skb_shinfo(skb)->frag_list = NULL;
2181
2182                /* Queue all fragments atomically */
2183                spin_lock(&queue->lock);
2184
2185                __skb_queue_tail(queue, skb);
2186
2187                flags &= ~ACL_START;
2188                flags |= ACL_CONT;
2189                do {
2190                        skb = list; list = list->next;
2191
2192                        skb->dev = (void *) hdev;
2193                        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2194                        hci_add_acl_hdr(skb, conn->handle, flags);
2195
2196                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2197
2198                        __skb_queue_tail(queue, skb);
2199                } while (list);
2200
2201                spin_unlock(&queue->lock);
2202        }
2203}
2204
2205void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2206{
2207        struct hci_conn *conn = chan->conn;
2208        struct hci_dev *hdev = conn->hdev;
2209
2210        BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2211
2212        skb->dev = (void *) hdev;
2213
2214        hci_queue_acl(conn, &chan->data_q, skb, flags);
2215
2216        queue_work(hdev->workqueue, &hdev->tx_work);
2217}
2218
2219/* Send SCO data */
2220void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2221{
2222        struct hci_dev *hdev = conn->hdev;
2223        struct hci_sco_hdr hdr;
2224
2225        BT_DBG("%s len %d", hdev->name, skb->len);
2226
2227        hdr.handle = cpu_to_le16(conn->handle);
2228        hdr.dlen   = skb->len;
2229
2230        skb_push(skb, HCI_SCO_HDR_SIZE);
2231        skb_reset_transport_header(skb);
2232        memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2233
2234        skb->dev = (void *) hdev;
2235        bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2236
2237        skb_queue_tail(&conn->data_q, skb);
2238        queue_work(hdev->workqueue, &hdev->tx_work);
2239}
2240
2241/* ---- HCI TX task (outgoing data) ---- */
2242
2243/* HCI Connection scheduler */
2244static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2245                                     int *quote)
2246{
2247        struct hci_conn_hash *h = &hdev->conn_hash;
2248        struct hci_conn *conn = NULL, *c;
2249        unsigned int num = 0, min = ~0;
2250
2251        /* We don't have to lock device here. Connections are always
2252         * added and removed with TX task disabled. */
2253
2254        rcu_read_lock();
2255
2256        list_for_each_entry_rcu(c, &h->list, list) {
2257                if (c->type != type || skb_queue_empty(&c->data_q))
2258                        continue;
2259
2260                if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2261                        continue;
2262
2263                num++;
2264
2265                if (c->sent < min) {
2266                        min  = c->sent;
2267                        conn = c;
2268                }
2269
2270                if (hci_conn_num(hdev, type) == num)
2271                        break;
2272        }
2273
2274        rcu_read_unlock();
2275
2276        if (conn) {
2277                int cnt, q;
2278
2279                switch (conn->type) {
2280                case ACL_LINK:
2281                        cnt = hdev->acl_cnt;
2282                        break;
2283                case SCO_LINK:
2284                case ESCO_LINK:
2285                        cnt = hdev->sco_cnt;
2286                        break;
2287                case LE_LINK:
2288                        cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2289                        break;
2290                default:
2291                        cnt = 0;
2292                        BT_ERR("Unknown link type");
2293                }
2294
2295                q = cnt / num;
2296                *quote = q ? q : 1;
2297        } else
2298                *quote = 0;
2299
2300        BT_DBG("conn %p quote %d", conn, *quote);
2301        return conn;
2302}
2303
2304static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2305{
2306        struct hci_conn_hash *h = &hdev->conn_hash;
2307        struct hci_conn *c;
2308
2309        BT_ERR("%s link tx timeout", hdev->name);
2310
2311        rcu_read_lock();
2312
2313        /* Kill stalled connections */
2314        list_for_each_entry_rcu(c, &h->list, list) {
2315                if (c->type == type && c->sent) {
2316                        BT_ERR("%s killing stalled connection %s",
2317                               hdev->name, batostr(&c->dst));
2318                        hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2319                }
2320        }
2321
2322        rcu_read_unlock();
2323}
2324
2325static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2326                                      int *quote)
2327{
2328        struct hci_conn_hash *h = &hdev->conn_hash;
2329        struct hci_chan *chan = NULL;
2330        unsigned int num = 0, min = ~0, cur_prio = 0;
2331        struct hci_conn *conn;
2332        int cnt, q, conn_num = 0;
2333
2334        BT_DBG("%s", hdev->name);
2335
2336        rcu_read_lock();
2337
2338        list_for_each_entry_rcu(conn, &h->list, list) {
2339                struct hci_chan *tmp;
2340
2341                if (conn->type != type)
2342                        continue;
2343
2344                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2345                        continue;
2346
2347                conn_num++;
2348
2349                list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2350                        struct sk_buff *skb;
2351
2352                        if (skb_queue_empty(&tmp->data_q))
2353                                continue;
2354
2355                        skb = skb_peek(&tmp->data_q);
2356                        if (skb->priority < cur_prio)
2357                                continue;
2358
2359                        if (skb->priority > cur_prio) {
2360                                num = 0;
2361                                min = ~0;
2362                                cur_prio = skb->priority;
2363                        }
2364
2365                        num++;
2366
2367                        if (conn->sent < min) {
2368                                min  = conn->sent;
2369                                chan = tmp;
2370                        }
2371                }
2372
2373                if (hci_conn_num(hdev, type) == conn_num)
2374                        break;
2375        }
2376
2377        rcu_read_unlock();
2378
2379        if (!chan)
2380                return NULL;
2381
2382        switch (chan->conn->type) {
2383        case ACL_LINK:
2384                cnt = hdev->acl_cnt;
2385                break;
2386        case SCO_LINK:
2387        case ESCO_LINK:
2388                cnt = hdev->sco_cnt;
2389                break;
2390        case LE_LINK:
2391                cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2392                break;
2393        default:
2394                cnt = 0;
2395                BT_ERR("Unknown link type");
2396        }
2397
2398        q = cnt / num;
2399        *quote = q ? q : 1;
2400        BT_DBG("chan %p quote %d", chan, *quote);
2401        return chan;
2402}
2403
2404static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2405{
2406        struct hci_conn_hash *h = &hdev->conn_hash;
2407        struct hci_conn *conn;
2408        int num = 0;
2409
2410        BT_DBG("%s", hdev->name);
2411
2412        rcu_read_lock();
2413
2414        list_for_each_entry_rcu(conn, &h->list, list) {
2415                struct hci_chan *chan;
2416
2417                if (conn->type != type)
2418                        continue;
2419
2420                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2421                        continue;
2422
2423                num++;
2424
2425                list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2426                        struct sk_buff *skb;
2427
2428                        if (chan->sent) {
2429                                chan->sent = 0;
2430                                continue;
2431                        }
2432
2433                        if (skb_queue_empty(&chan->data_q))
2434                                continue;
2435
2436                        skb = skb_peek(&chan->data_q);
2437                        if (skb->priority >= HCI_PRIO_MAX - 1)
2438                                continue;
2439
2440                        skb->priority = HCI_PRIO_MAX - 1;
2441
2442                        BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2443                               skb->priority);
2444                }
2445
2446                if (hci_conn_num(hdev, type) == num)
2447                        break;
2448        }
2449
2450        rcu_read_unlock();
2451
2452}
2453
2454static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2455{
2456        /* Calculate count of blocks used by this packet */
2457        return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2458}
2459
2460static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2461{
2462        if (!test_bit(HCI_RAW, &hdev->flags)) {
2463                /* ACL tx timeout must be longer than maximum
2464                 * link supervision timeout (40.9 seconds) */
2465                if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2466                                       HCI_ACL_TX_TIMEOUT))
2467                        hci_link_tx_to(hdev, ACL_LINK);
2468        }
2469}
2470
2471static void hci_sched_acl_pkt(struct hci_dev *hdev)
2472{
2473        unsigned int cnt = hdev->acl_cnt;
2474        struct hci_chan *chan;
2475        struct sk_buff *skb;
2476        int quote;
2477
2478        __check_timeout(hdev, cnt);
2479
2480        while (hdev->acl_cnt &&
2481               (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2482                u32 priority = (skb_peek(&chan->data_q))->priority;
2483                while (quote-- && (skb = skb_peek(&chan->data_q))) {
2484                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2485                               skb->len, skb->priority);
2486
2487                        /* Stop if priority has changed */
2488                        if (skb->priority < priority)
2489                                break;
2490
2491                        skb = skb_dequeue(&chan->data_q);
2492
2493                        hci_conn_enter_active_mode(chan->conn,
2494                                                   bt_cb(skb)->force_active);
2495
2496                        hci_send_frame(skb);
2497                        hdev->acl_last_tx = jiffies;
2498
2499                        hdev->acl_cnt--;
2500                        chan->sent++;
2501                        chan->conn->sent++;
2502                }
2503        }
2504
2505        if (cnt != hdev->acl_cnt)
2506                hci_prio_recalculate(hdev, ACL_LINK);
2507}
2508
2509static void hci_sched_acl_blk(struct hci_dev *hdev)
2510{
2511        unsigned int cnt = hdev->block_cnt;
2512        struct hci_chan *chan;
2513        struct sk_buff *skb;
2514        int quote;
2515
2516        __check_timeout(hdev, cnt);
2517
2518        while (hdev->block_cnt > 0 &&
2519               (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2520                u32 priority = (skb_peek(&chan->data_q))->priority;
2521                while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2522                        int blocks;
2523
2524                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2525                               skb->len, skb->priority);
2526
2527                        /* Stop if priority has changed */
2528                        if (skb->priority < priority)
2529                                break;
2530
2531                        skb = skb_dequeue(&chan->data_q);
2532
2533                        blocks = __get_blocks(hdev, skb);
2534                        if (blocks > hdev->block_cnt)
2535                                return;
2536
2537                        hci_conn_enter_active_mode(chan->conn,
2538                                                   bt_cb(skb)->force_active);
2539
2540                        hci_send_frame(skb);
2541                        hdev->acl_last_tx = jiffies;
2542
2543                        hdev->block_cnt -= blocks;
2544                        quote -= blocks;
2545
2546                        chan->sent += blocks;
2547                        chan->conn->sent += blocks;
2548                }
2549        }
2550
2551        if (cnt != hdev->block_cnt)
2552                hci_prio_recalculate(hdev, ACL_LINK);
2553}
2554
2555static void hci_sched_acl(struct hci_dev *hdev)
2556{
2557        BT_DBG("%s", hdev->name);
2558
2559        if (!hci_conn_num(hdev, ACL_LINK))
2560                return;
2561
2562        switch (hdev->flow_ctl_mode) {
2563        case HCI_FLOW_CTL_MODE_PACKET_BASED:
2564                hci_sched_acl_pkt(hdev);
2565                break;
2566
2567        case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2568                hci_sched_acl_blk(hdev);
2569                break;
2570        }
2571}
2572
2573/* Schedule SCO */
2574static void hci_sched_sco(struct hci_dev *hdev)
2575{
2576        struct hci_conn *conn;
2577        struct sk_buff *skb;
2578        int quote;
2579
2580        BT_DBG("%s", hdev->name);
2581
2582        if (!hci_conn_num(hdev, SCO_LINK))
2583                return;
2584
2585        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2586                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2587                        BT_DBG("skb %p len %d", skb, skb->len);
2588                        hci_send_frame(skb);
2589
2590                        conn->sent++;
2591                        if (conn->sent == ~0)
2592                                conn->sent = 0;
2593                }
2594        }
2595}
2596
2597static void hci_sched_esco(struct hci_dev *hdev)
2598{
2599        struct hci_conn *conn;
2600        struct sk_buff *skb;
2601        int quote;
2602
2603        BT_DBG("%s", hdev->name);
2604
2605        if (!hci_conn_num(hdev, ESCO_LINK))
2606                return;
2607
2608        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2609                                                     &quote))) {
2610                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2611                        BT_DBG("skb %p len %d", skb, skb->len);
2612                        hci_send_frame(skb);
2613
2614                        conn->sent++;
2615                        if (conn->sent == ~0)
2616                                conn->sent = 0;
2617                }
2618        }
2619}
2620
2621static void hci_sched_le(struct hci_dev *hdev)
2622{
2623        struct hci_chan *chan;
2624        struct sk_buff *skb;
2625        int quote, cnt, tmp;
2626
2627        BT_DBG("%s", hdev->name);
2628
2629        if (!hci_conn_num(hdev, LE_LINK))
2630                return;
2631
2632        if (!test_bit(HCI_RAW, &hdev->flags)) {
2633                /* LE tx timeout must be longer than maximum
2634                 * link supervision timeout (40.9 seconds) */
2635                if (!hdev->le_cnt && hdev->le_pkts &&
2636                    time_after(jiffies, hdev->le_last_tx + HZ * 45))
2637                        hci_link_tx_to(hdev, LE_LINK);
2638        }
2639
2640        cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2641        tmp = cnt;
2642        while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2643                u32 priority = (skb_peek(&chan->data_q))->priority;
2644                while (quote-- && (skb = skb_peek(&chan->data_q))) {
2645                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2646                               skb->len, skb->priority);
2647
2648                        /* Stop if priority has changed */
2649                        if (skb->priority < priority)
2650                                break;
2651
2652                        skb = skb_dequeue(&chan->data_q);
2653
2654                        hci_send_frame(skb);
2655                        hdev->le_last_tx = jiffies;
2656
2657                        cnt--;
2658                        chan->sent++;
2659                        chan->conn->sent++;
2660                }
2661        }
2662
2663        if (hdev->le_pkts)
2664                hdev->le_cnt = cnt;
2665        else
2666                hdev->acl_cnt = cnt;
2667
2668        if (cnt != tmp)
2669                hci_prio_recalculate(hdev, LE_LINK);
2670}
2671
2672static void hci_tx_work(struct work_struct *work)
2673{
2674        struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2675        struct sk_buff *skb;
2676
2677        BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2678               hdev->sco_cnt, hdev->le_cnt);
2679
2680        /* Schedule queues and send stuff to HCI driver */
2681
2682        hci_sched_acl(hdev);
2683
2684        hci_sched_sco(hdev);
2685
2686        hci_sched_esco(hdev);
2687
2688        hci_sched_le(hdev);
2689
2690        /* Send next queued raw (unknown type) packet */
2691        while ((skb = skb_dequeue(&hdev->raw_q)))
2692                hci_send_frame(skb);
2693}
2694
2695/* ----- HCI RX task (incoming data processing) ----- */
2696
2697/* ACL data packet */
2698static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2699{
2700        struct hci_acl_hdr *hdr = (void *) skb->data;
2701        struct hci_conn *conn;
2702        __u16 handle, flags;
2703
2704        skb_pull(skb, HCI_ACL_HDR_SIZE);
2705
2706        handle = __le16_to_cpu(hdr->handle);
2707        flags  = hci_flags(handle);
2708        handle = hci_handle(handle);
2709
2710        BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2711               handle, flags);
2712
2713        hdev->stat.acl_rx++;
2714
2715        hci_dev_lock(hdev);
2716        conn = hci_conn_hash_lookup_handle(hdev, handle);
2717        hci_dev_unlock(hdev);
2718
2719        if (conn) {
2720                hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2721
2722                hci_dev_lock(hdev);
2723                if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2724                    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2725                        mgmt_device_connected(hdev, &conn->dst, conn->type,
2726                                              conn->dst_type, 0, NULL, 0,
2727                                              conn->dev_class);
2728                hci_dev_unlock(hdev);
2729
2730                /* Send to upper protocol */
2731                l2cap_recv_acldata(conn, skb, flags);
2732                return;
2733        } else {
2734                BT_ERR("%s ACL packet for unknown connection handle %d",
2735                       hdev->name, handle);
2736        }
2737
2738        kfree_skb(skb);
2739}
2740
2741/* SCO data packet */
2742static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743{
2744        struct hci_sco_hdr *hdr = (void *) skb->data;
2745        struct hci_conn *conn;
2746        __u16 handle;
2747
2748        skb_pull(skb, HCI_SCO_HDR_SIZE);
2749
2750        handle = __le16_to_cpu(hdr->handle);
2751
2752        BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2753
2754        hdev->stat.sco_rx++;
2755
2756        hci_dev_lock(hdev);
2757        conn = hci_conn_hash_lookup_handle(hdev, handle);
2758        hci_dev_unlock(hdev);
2759
2760        if (conn) {
2761                /* Send to upper protocol */
2762                sco_recv_scodata(conn, skb);
2763                return;
2764        } else {
2765                BT_ERR("%s SCO packet for unknown connection handle %d",
2766                       hdev->name, handle);
2767        }
2768
2769        kfree_skb(skb);
2770}
2771
2772static void hci_rx_work(struct work_struct *work)
2773{
2774        struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2775        struct sk_buff *skb;
2776
2777        BT_DBG("%s", hdev->name);
2778
2779        while ((skb = skb_dequeue(&hdev->rx_q))) {
2780                /* Send copy to monitor */
2781                hci_send_to_monitor(hdev, skb);
2782
2783                if (atomic_read(&hdev->promisc)) {
2784                        /* Send copy to the sockets */
2785                        hci_send_to_sock(hdev, skb);
2786                }
2787
2788                if (test_bit(HCI_RAW, &hdev->flags)) {
2789                        kfree_skb(skb);
2790                        continue;
2791                }
2792
2793                if (test_bit(HCI_INIT, &hdev->flags)) {
2794                        /* Don't process data packets in this states. */
2795                        switch (bt_cb(skb)->pkt_type) {
2796                        case HCI_ACLDATA_PKT:
2797                        case HCI_SCODATA_PKT:
2798                                kfree_skb(skb);
2799                                continue;
2800                        }
2801                }
2802
2803                /* Process frame */
2804                switch (bt_cb(skb)->pkt_type) {
2805                case HCI_EVENT_PKT:
2806                        BT_DBG("%s Event packet", hdev->name);
2807                        hci_event_packet(hdev, skb);
2808                        break;
2809
2810                case HCI_ACLDATA_PKT:
2811                        BT_DBG("%s ACL data packet", hdev->name);
2812                        hci_acldata_packet(hdev, skb);
2813                        break;
2814
2815                case HCI_SCODATA_PKT:
2816                        BT_DBG("%s SCO data packet", hdev->name);
2817                        hci_scodata_packet(hdev, skb);
2818                        break;
2819
2820                default:
2821                        kfree_skb(skb);
2822                        break;
2823                }
2824        }
2825}
2826
2827static void hci_cmd_work(struct work_struct *work)
2828{
2829        struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2830        struct sk_buff *skb;
2831
2832        BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2833               atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2834
2835        /* Send queued commands */
2836        if (atomic_read(&hdev->cmd_cnt)) {
2837                skb = skb_dequeue(&hdev->cmd_q);
2838                if (!skb)
2839                        return;
2840
2841                kfree_skb(hdev->sent_cmd);
2842
2843                hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2844                if (hdev->sent_cmd) {
2845                        atomic_dec(&hdev->cmd_cnt);
2846                        hci_send_frame(skb);
2847                        if (test_bit(HCI_RESET, &hdev->flags))
2848                                del_timer(&hdev->cmd_timer);
2849                        else
2850                                mod_timer(&hdev->cmd_timer,
2851                                          jiffies + HCI_CMD_TIMEOUT);
2852                } else {
2853                        skb_queue_head(&hdev->cmd_q, skb);
2854                        queue_work(hdev->workqueue, &hdev->cmd_work);
2855                }
2856        }
2857}
2858
2859int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2860{
2861        /* General inquiry access code (GIAC) */
2862        u8 lap[3] = { 0x33, 0x8b, 0x9e };
2863        struct hci_cp_inquiry cp;
2864
2865        BT_DBG("%s", hdev->name);
2866
2867        if (test_bit(HCI_INQUIRY, &hdev->flags))
2868                return -EINPROGRESS;
2869
2870        inquiry_cache_flush(hdev);
2871
2872        memset(&cp, 0, sizeof(cp));
2873        memcpy(&cp.lap, lap, sizeof(cp.lap));
2874        cp.length  = length;
2875
2876        return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2877}
2878
2879int hci_cancel_inquiry(struct hci_dev *hdev)
2880{
2881        BT_DBG("%s", hdev->name);
2882
2883        if (!test_bit(HCI_INQUIRY, &hdev->flags))
2884                return -EALREADY;
2885
2886        return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2887}
2888
2889u8 bdaddr_to_le(u8 bdaddr_type)
2890{
2891        switch (bdaddr_type) {
2892        case BDADDR_LE_PUBLIC:
2893                return ADDR_LE_DEV_PUBLIC;
2894
2895        default:
2896                /* Fallback to LE Random address type */
2897                return ADDR_LE_DEV_RANDOM;
2898        }
2899}
2900
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.