1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/idr.h>
30
31#include <linux/rfkill.h>
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
36static void hci_rx_work(struct work_struct *work);
37static void hci_cmd_work(struct work_struct *work);
38static void hci_tx_work(struct work_struct *work);
39
40
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
48
49static DEFINE_IDA(hci_index_ida);
50
51
52
53static void hci_notify(struct hci_dev *hdev, int event)
54{
55 hci_sock_dev_event(hdev, event);
56}
57
58
59
60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61{
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64
65
66
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72
73
74
75
76
77
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
144 }
145
146 hdev->req_status = hdev->req_result = 0;
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
156{
157 int ret;
158
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
162
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177}
178
179static void bredr_init(struct hci_dev *hdev)
180{
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
183
184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185
186
187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
188}
189
190static void amp_init(struct hci_dev *hdev)
191{
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
194
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196
197
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199
200
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210
211
212
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
222
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
239}
240
241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247
248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257
258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
267
268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269}
270
271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
275 BT_DBG("%s %x", hdev->name, policy);
276
277
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
281
282
283struct hci_dev *hci_dev_get(int index)
284{
285 struct hci_dev *hdev = NULL, *d;
286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
293 list_for_each_entry(d, &hci_dev_list, list) {
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
302
303
304
305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
309 switch (discov->state) {
310 case DISCOVERY_FINDING:
311 case DISCOVERY_RESOLVING:
312 return true;
313
314 default:
315 return false;
316 }
317}
318
319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
330 break;
331 case DISCOVERY_STARTING:
332 break;
333 case DISCOVERY_FINDING:
334 mgmt_discovering(hdev, 1);
335 break;
336 case DISCOVERY_RESOLVING:
337 break;
338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
347 struct discovery_state *cache = &hdev->discovery;
348 struct inquiry_entry *p, *n;
349
350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
352 kfree(p);
353 }
354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
357}
358
359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
361{
362 struct discovery_state *cache = &hdev->discovery;
363 struct inquiry_entry *e;
364
365 BT_DBG("cache %p, %pMR", cache, bdaddr);
366
367 list_for_each_entry(e, &cache->all, all) {
368 if (!bacmp(&e->data.bdaddr, bdaddr))
369 return e;
370 }
371
372 return NULL;
373}
374
375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
376 bdaddr_t *bdaddr)
377{
378 struct discovery_state *cache = &hdev->discovery;
379 struct inquiry_entry *e;
380
381 BT_DBG("cache %p, %pMR", cache, bdaddr);
382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389}
390
391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
392 bdaddr_t *bdaddr,
393 int state)
394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
411 struct inquiry_entry *ie)
412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
421 abs(p->data.rssi) >= abs(ie->data.rssi))
422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
430 bool name_known, bool *ssp)
431{
432 struct discovery_state *cache = &hdev->discovery;
433 struct inquiry_entry *ie;
434
435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436
437 hci_remove_remote_oob_data(hdev, &data->bdaddr);
438
439 if (ssp)
440 *ssp = data->ssp_mode;
441
442 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
443 if (ie) {
444 if (ie->data.ssp_mode && ssp)
445 *ssp = true;
446
447 if (ie->name_state == NAME_NEEDED &&
448 data->rssi != ie->data.rssi) {
449 ie->data.rssi = data->rssi;
450 hci_inquiry_cache_update_resolve(hdev, ie);
451 }
452
453 goto update;
454 }
455
456
457 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
458 if (!ie)
459 return false;
460
461 list_add(&ie->all, &cache->all);
462
463 if (name_known) {
464 ie->name_state = NAME_KNOWN;
465 } else {
466 ie->name_state = NAME_NOT_KNOWN;
467 list_add(&ie->list, &cache->unknown);
468 }
469
470update:
471 if (name_known && ie->name_state != NAME_KNOWN &&
472 ie->name_state != NAME_PENDING) {
473 ie->name_state = NAME_KNOWN;
474 list_del(&ie->list);
475 }
476
477 memcpy(&ie->data, data, sizeof(*data));
478 ie->timestamp = jiffies;
479 cache->timestamp = jiffies;
480
481 if (ie->name_state == NAME_NOT_KNOWN)
482 return false;
483
484 return true;
485}
486
487static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
488{
489 struct discovery_state *cache = &hdev->discovery;
490 struct inquiry_info *info = (struct inquiry_info *) buf;
491 struct inquiry_entry *e;
492 int copied = 0;
493
494 list_for_each_entry(e, &cache->all, all) {
495 struct inquiry_data *data = &e->data;
496
497 if (copied >= num)
498 break;
499
500 bacpy(&info->bdaddr, &data->bdaddr);
501 info->pscan_rep_mode = data->pscan_rep_mode;
502 info->pscan_period_mode = data->pscan_period_mode;
503 info->pscan_mode = data->pscan_mode;
504 memcpy(info->dev_class, data->dev_class, 3);
505 info->clock_offset = data->clock_offset;
506
507 info++;
508 copied++;
509 }
510
511 BT_DBG("cache %p, copied %d", cache, copied);
512 return copied;
513}
514
515static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
516{
517 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
518 struct hci_cp_inquiry cp;
519
520 BT_DBG("%s", hdev->name);
521
522 if (test_bit(HCI_INQUIRY, &hdev->flags))
523 return;
524
525
526 memcpy(&cp.lap, &ir->lap, 3);
527 cp.length = ir->length;
528 cp.num_rsp = ir->num_rsp;
529 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
530}
531
532int hci_inquiry(void __user *arg)
533{
534 __u8 __user *ptr = arg;
535 struct hci_inquiry_req ir;
536 struct hci_dev *hdev;
537 int err = 0, do_inquiry = 0, max_rsp;
538 long timeo;
539 __u8 *buf;
540
541 if (copy_from_user(&ir, ptr, sizeof(ir)))
542 return -EFAULT;
543
544 hdev = hci_dev_get(ir.dev_id);
545 if (!hdev)
546 return -ENODEV;
547
548 hci_dev_lock(hdev);
549 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
550 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
551 inquiry_cache_flush(hdev);
552 do_inquiry = 1;
553 }
554 hci_dev_unlock(hdev);
555
556 timeo = ir.length * msecs_to_jiffies(2000);
557
558 if (do_inquiry) {
559 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
560 if (err < 0)
561 goto done;
562 }
563
564
565
566
567 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
568
569
570
571
572 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
573 if (!buf) {
574 err = -ENOMEM;
575 goto done;
576 }
577
578 hci_dev_lock(hdev);
579 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
580 hci_dev_unlock(hdev);
581
582 BT_DBG("num_rsp %d", ir.num_rsp);
583
584 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
585 ptr += sizeof(ir);
586 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
587 ir.num_rsp))
588 err = -EFAULT;
589 } else
590 err = -EFAULT;
591
592 kfree(buf);
593
594done:
595 hci_dev_put(hdev);
596 return err;
597}
598
599static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
600{
601 u8 ad_len = 0, flags = 0;
602 size_t name_len;
603
604 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
605 flags |= LE_AD_GENERAL;
606
607 if (!lmp_bredr_capable(hdev))
608 flags |= LE_AD_NO_BREDR;
609
610 if (lmp_le_br_capable(hdev))
611 flags |= LE_AD_SIM_LE_BREDR_CTRL;
612
613 if (lmp_host_le_br_capable(hdev))
614 flags |= LE_AD_SIM_LE_BREDR_HOST;
615
616 if (flags) {
617 BT_DBG("adv flags 0x%02x", flags);
618
619 ptr[0] = 2;
620 ptr[1] = EIR_FLAGS;
621 ptr[2] = flags;
622
623 ad_len += 3;
624 ptr += 3;
625 }
626
627 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
628 ptr[0] = 2;
629 ptr[1] = EIR_TX_POWER;
630 ptr[2] = (u8) hdev->adv_tx_power;
631
632 ad_len += 3;
633 ptr += 3;
634 }
635
636 name_len = strlen(hdev->dev_name);
637 if (name_len > 0) {
638 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
639
640 if (name_len > max_len) {
641 name_len = max_len;
642 ptr[1] = EIR_NAME_SHORT;
643 } else
644 ptr[1] = EIR_NAME_COMPLETE;
645
646 ptr[0] = name_len + 1;
647
648 memcpy(ptr + 2, hdev->dev_name, name_len);
649
650 ad_len += (name_len + 2);
651 ptr += (name_len + 2);
652 }
653
654 return ad_len;
655}
656
657int hci_update_ad(struct hci_dev *hdev)
658{
659 struct hci_cp_le_set_adv_data cp;
660 u8 len;
661 int err;
662
663 hci_dev_lock(hdev);
664
665 if (!lmp_le_capable(hdev)) {
666 err = -EINVAL;
667 goto unlock;
668 }
669
670 memset(&cp, 0, sizeof(cp));
671
672 len = create_ad(hdev, cp.data);
673
674 if (hdev->adv_data_len == len &&
675 memcmp(cp.data, hdev->adv_data, len) == 0) {
676 err = 0;
677 goto unlock;
678 }
679
680 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
681 hdev->adv_data_len = len;
682
683 cp.length = len;
684 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
685
686unlock:
687 hci_dev_unlock(hdev);
688
689 return err;
690}
691
692
693
694int hci_dev_open(__u16 dev)
695{
696 struct hci_dev *hdev;
697 int ret = 0;
698
699 hdev = hci_dev_get(dev);
700 if (!hdev)
701 return -ENODEV;
702
703 BT_DBG("%s %p", hdev->name, hdev);
704
705 hci_req_lock(hdev);
706
707 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
708 ret = -ENODEV;
709 goto done;
710 }
711
712 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
713 ret = -ERFKILL;
714 goto done;
715 }
716
717 if (test_bit(HCI_UP, &hdev->flags)) {
718 ret = -EALREADY;
719 goto done;
720 }
721
722 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
723 set_bit(HCI_RAW, &hdev->flags);
724
725
726
727 if (hdev->dev_type != HCI_BREDR && !enable_hs)
728 set_bit(HCI_RAW, &hdev->flags);
729
730 if (hdev->open(hdev)) {
731 ret = -EIO;
732 goto done;
733 }
734
735 if (!test_bit(HCI_RAW, &hdev->flags)) {
736 atomic_set(&hdev->cmd_cnt, 1);
737 set_bit(HCI_INIT, &hdev->flags);
738 hdev->init_last_cmd = 0;
739
740 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
741
742 clear_bit(HCI_INIT, &hdev->flags);
743 }
744
745 if (!ret) {
746 hci_dev_hold(hdev);
747 set_bit(HCI_UP, &hdev->flags);
748 hci_notify(hdev, HCI_DEV_UP);
749 hci_update_ad(hdev);
750 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
751 mgmt_valid_hdev(hdev)) {
752 hci_dev_lock(hdev);
753 mgmt_powered(hdev, 1);
754 hci_dev_unlock(hdev);
755 }
756 } else {
757
758 flush_work(&hdev->tx_work);
759 flush_work(&hdev->cmd_work);
760 flush_work(&hdev->rx_work);
761
762 skb_queue_purge(&hdev->cmd_q);
763 skb_queue_purge(&hdev->rx_q);
764
765 if (hdev->flush)
766 hdev->flush(hdev);
767
768 if (hdev->sent_cmd) {
769 kfree_skb(hdev->sent_cmd);
770 hdev->sent_cmd = NULL;
771 }
772
773 hdev->close(hdev);
774 hdev->flags = 0;
775 }
776
777done:
778 hci_req_unlock(hdev);
779 hci_dev_put(hdev);
780 return ret;
781}
782
783static int hci_dev_do_close(struct hci_dev *hdev)
784{
785 BT_DBG("%s %p", hdev->name, hdev);
786
787 cancel_work_sync(&hdev->le_scan);
788
789 cancel_delayed_work(&hdev->power_off);
790
791 hci_req_cancel(hdev, ENODEV);
792 hci_req_lock(hdev);
793
794 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
795 del_timer_sync(&hdev->cmd_timer);
796 hci_req_unlock(hdev);
797 return 0;
798 }
799
800
801 flush_work(&hdev->tx_work);
802 flush_work(&hdev->rx_work);
803
804 if (hdev->discov_timeout > 0) {
805 cancel_delayed_work(&hdev->discov_off);
806 hdev->discov_timeout = 0;
807 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
808 }
809
810 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
811 cancel_delayed_work(&hdev->service_cache);
812
813 cancel_delayed_work_sync(&hdev->le_scan_disable);
814
815 hci_dev_lock(hdev);
816 inquiry_cache_flush(hdev);
817 hci_conn_hash_flush(hdev);
818 hci_dev_unlock(hdev);
819
820 hci_notify(hdev, HCI_DEV_DOWN);
821
822 if (hdev->flush)
823 hdev->flush(hdev);
824
825
826 skb_queue_purge(&hdev->cmd_q);
827 atomic_set(&hdev->cmd_cnt, 1);
828 if (!test_bit(HCI_RAW, &hdev->flags) &&
829 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
830 set_bit(HCI_INIT, &hdev->flags);
831 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
832 clear_bit(HCI_INIT, &hdev->flags);
833 }
834
835
836 flush_work(&hdev->cmd_work);
837
838
839 skb_queue_purge(&hdev->rx_q);
840 skb_queue_purge(&hdev->cmd_q);
841 skb_queue_purge(&hdev->raw_q);
842
843
844 if (hdev->sent_cmd) {
845 del_timer_sync(&hdev->cmd_timer);
846 kfree_skb(hdev->sent_cmd);
847 hdev->sent_cmd = NULL;
848 }
849
850
851
852 hdev->close(hdev);
853
854 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
855 mgmt_valid_hdev(hdev)) {
856 hci_dev_lock(hdev);
857 mgmt_powered(hdev, 0);
858 hci_dev_unlock(hdev);
859 }
860
861
862 hdev->flags = 0;
863
864
865 hdev->amp_status = 0;
866
867 memset(hdev->eir, 0, sizeof(hdev->eir));
868 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
869
870 hci_req_unlock(hdev);
871
872 hci_dev_put(hdev);
873 return 0;
874}
875
876int hci_dev_close(__u16 dev)
877{
878 struct hci_dev *hdev;
879 int err;
880
881 hdev = hci_dev_get(dev);
882 if (!hdev)
883 return -ENODEV;
884
885 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
886 cancel_delayed_work(&hdev->power_off);
887
888 err = hci_dev_do_close(hdev);
889
890 hci_dev_put(hdev);
891 return err;
892}
893
894int hci_dev_reset(__u16 dev)
895{
896 struct hci_dev *hdev;
897 int ret = 0;
898
899 hdev = hci_dev_get(dev);
900 if (!hdev)
901 return -ENODEV;
902
903 hci_req_lock(hdev);
904
905 if (!test_bit(HCI_UP, &hdev->flags))
906 goto done;
907
908
909 skb_queue_purge(&hdev->rx_q);
910 skb_queue_purge(&hdev->cmd_q);
911
912 hci_dev_lock(hdev);
913 inquiry_cache_flush(hdev);
914 hci_conn_hash_flush(hdev);
915 hci_dev_unlock(hdev);
916
917 if (hdev->flush)
918 hdev->flush(hdev);
919
920 atomic_set(&hdev->cmd_cnt, 1);
921 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
922
923 if (!test_bit(HCI_RAW, &hdev->flags))
924 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
925
926done:
927 hci_req_unlock(hdev);
928 hci_dev_put(hdev);
929 return ret;
930}
931
932int hci_dev_reset_stat(__u16 dev)
933{
934 struct hci_dev *hdev;
935 int ret = 0;
936
937 hdev = hci_dev_get(dev);
938 if (!hdev)
939 return -ENODEV;
940
941 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
942
943 hci_dev_put(hdev);
944
945 return ret;
946}
947
948int hci_dev_cmd(unsigned int cmd, void __user *arg)
949{
950 struct hci_dev *hdev;
951 struct hci_dev_req dr;
952 int err = 0;
953
954 if (copy_from_user(&dr, arg, sizeof(dr)))
955 return -EFAULT;
956
957 hdev = hci_dev_get(dr.dev_id);
958 if (!hdev)
959 return -ENODEV;
960
961 switch (cmd) {
962 case HCISETAUTH:
963 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
964 HCI_INIT_TIMEOUT);
965 break;
966
967 case HCISETENCRYPT:
968 if (!lmp_encrypt_capable(hdev)) {
969 err = -EOPNOTSUPP;
970 break;
971 }
972
973 if (!test_bit(HCI_AUTH, &hdev->flags)) {
974
975 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
976 HCI_INIT_TIMEOUT);
977 if (err)
978 break;
979 }
980
981 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
982 HCI_INIT_TIMEOUT);
983 break;
984
985 case HCISETSCAN:
986 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
987 HCI_INIT_TIMEOUT);
988 break;
989
990 case HCISETLINKPOL:
991 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
992 HCI_INIT_TIMEOUT);
993 break;
994
995 case HCISETLINKMODE:
996 hdev->link_mode = ((__u16) dr.dev_opt) &
997 (HCI_LM_MASTER | HCI_LM_ACCEPT);
998 break;
999
1000 case HCISETPTYPE:
1001 hdev->pkt_type = (__u16) dr.dev_opt;
1002 break;
1003
1004 case HCISETACLMTU:
1005 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1006 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1007 break;
1008
1009 case HCISETSCOMTU:
1010 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1011 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1012 break;
1013
1014 default:
1015 err = -EINVAL;
1016 break;
1017 }
1018
1019 hci_dev_put(hdev);
1020 return err;
1021}
1022
1023int hci_get_dev_list(void __user *arg)
1024{
1025 struct hci_dev *hdev;
1026 struct hci_dev_list_req *dl;
1027 struct hci_dev_req *dr;
1028 int n = 0, size, err;
1029 __u16 dev_num;
1030
1031 if (get_user(dev_num, (__u16 __user *) arg))
1032 return -EFAULT;
1033
1034 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1035 return -EINVAL;
1036
1037 size = sizeof(*dl) + dev_num * sizeof(*dr);
1038
1039 dl = kzalloc(size, GFP_KERNEL);
1040 if (!dl)
1041 return -ENOMEM;
1042
1043 dr = dl->dev_req;
1044
1045 read_lock(&hci_dev_list_lock);
1046 list_for_each_entry(hdev, &hci_dev_list, list) {
1047 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1048 cancel_delayed_work(&hdev->power_off);
1049
1050 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1051 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1052
1053 (dr + n)->dev_id = hdev->id;
1054 (dr + n)->dev_opt = hdev->flags;
1055
1056 if (++n >= dev_num)
1057 break;
1058 }
1059 read_unlock(&hci_dev_list_lock);
1060
1061 dl->dev_num = n;
1062 size = sizeof(*dl) + n * sizeof(*dr);
1063
1064 err = copy_to_user(arg, dl, size);
1065 kfree(dl);
1066
1067 return err ? -EFAULT : 0;
1068}
1069
1070int hci_get_dev_info(void __user *arg)
1071{
1072 struct hci_dev *hdev;
1073 struct hci_dev_info di;
1074 int err = 0;
1075
1076 if (copy_from_user(&di, arg, sizeof(di)))
1077 return -EFAULT;
1078
1079 hdev = hci_dev_get(di.dev_id);
1080 if (!hdev)
1081 return -ENODEV;
1082
1083 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1084 cancel_delayed_work_sync(&hdev->power_off);
1085
1086 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1087 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1088
1089 strcpy(di.name, hdev->name);
1090 di.bdaddr = hdev->bdaddr;
1091 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1092 di.flags = hdev->flags;
1093 di.pkt_type = hdev->pkt_type;
1094 if (lmp_bredr_capable(hdev)) {
1095 di.acl_mtu = hdev->acl_mtu;
1096 di.acl_pkts = hdev->acl_pkts;
1097 di.sco_mtu = hdev->sco_mtu;
1098 di.sco_pkts = hdev->sco_pkts;
1099 } else {
1100 di.acl_mtu = hdev->le_mtu;
1101 di.acl_pkts = hdev->le_pkts;
1102 di.sco_mtu = 0;
1103 di.sco_pkts = 0;
1104 }
1105 di.link_policy = hdev->link_policy;
1106 di.link_mode = hdev->link_mode;
1107
1108 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1109 memcpy(&di.features, &hdev->features, sizeof(di.features));
1110
1111 if (copy_to_user(arg, &di, sizeof(di)))
1112 err = -EFAULT;
1113
1114 hci_dev_put(hdev);
1115
1116 return err;
1117}
1118
1119
1120
1121static int hci_rfkill_set_block(void *data, bool blocked)
1122{
1123 struct hci_dev *hdev = data;
1124
1125 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1126
1127 if (!blocked)
1128 return 0;
1129
1130 hci_dev_do_close(hdev);
1131
1132 return 0;
1133}
1134
1135static const struct rfkill_ops hci_rfkill_ops = {
1136 .set_block = hci_rfkill_set_block,
1137};
1138
1139static void hci_power_on(struct work_struct *work)
1140{
1141 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1142
1143 BT_DBG("%s", hdev->name);
1144
1145 if (hci_dev_open(hdev->id) < 0)
1146 return;
1147
1148 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1149 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1150
1151 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1152 mgmt_index_added(hdev);
1153}
1154
1155static void hci_power_off(struct work_struct *work)
1156{
1157 struct hci_dev *hdev = container_of(work, struct hci_dev,
1158 power_off.work);
1159
1160 BT_DBG("%s", hdev->name);
1161
1162 hci_dev_do_close(hdev);
1163}
1164
1165static void hci_discov_off(struct work_struct *work)
1166{
1167 struct hci_dev *hdev;
1168 u8 scan = SCAN_PAGE;
1169
1170 hdev = container_of(work, struct hci_dev, discov_off.work);
1171
1172 BT_DBG("%s", hdev->name);
1173
1174 hci_dev_lock(hdev);
1175
1176 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1177
1178 hdev->discov_timeout = 0;
1179
1180 hci_dev_unlock(hdev);
1181}
1182
1183int hci_uuids_clear(struct hci_dev *hdev)
1184{
1185 struct list_head *p, *n;
1186
1187 list_for_each_safe(p, n, &hdev->uuids) {
1188 struct bt_uuid *uuid;
1189
1190 uuid = list_entry(p, struct bt_uuid, list);
1191
1192 list_del(p);
1193 kfree(uuid);
1194 }
1195
1196 return 0;
1197}
1198
1199int hci_link_keys_clear(struct hci_dev *hdev)
1200{
1201 struct list_head *p, *n;
1202
1203 list_for_each_safe(p, n, &hdev->link_keys) {
1204 struct link_key *key;
1205
1206 key = list_entry(p, struct link_key, list);
1207
1208 list_del(p);
1209 kfree(key);
1210 }
1211
1212 return 0;
1213}
1214
1215int hci_smp_ltks_clear(struct hci_dev *hdev)
1216{
1217 struct smp_ltk *k, *tmp;
1218
1219 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1220 list_del(&k->list);
1221 kfree(k);
1222 }
1223
1224 return 0;
1225}
1226
1227struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1228{
1229 struct link_key *k;
1230
1231 list_for_each_entry(k, &hdev->link_keys, list)
1232 if (bacmp(bdaddr, &k->bdaddr) == 0)
1233 return k;
1234
1235 return NULL;
1236}
1237
1238static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1239 u8 key_type, u8 old_key_type)
1240{
1241
1242 if (key_type < 0x03)
1243 return true;
1244
1245
1246 if (key_type == HCI_LK_DEBUG_COMBINATION)
1247 return false;
1248
1249
1250 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1251 return false;
1252
1253
1254 if (!conn)
1255 return true;
1256
1257
1258 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1259 return true;
1260
1261
1262 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1263 return true;
1264
1265
1266 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1267 return true;
1268
1269
1270
1271 return false;
1272}
1273
1274struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1275{
1276 struct smp_ltk *k;
1277
1278 list_for_each_entry(k, &hdev->long_term_keys, list) {
1279 if (k->ediv != ediv ||
1280 memcmp(rand, k->rand, sizeof(k->rand)))
1281 continue;
1282
1283 return k;
1284 }
1285
1286 return NULL;
1287}
1288
1289struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1290 u8 addr_type)
1291{
1292 struct smp_ltk *k;
1293
1294 list_for_each_entry(k, &hdev->long_term_keys, list)
1295 if (addr_type == k->bdaddr_type &&
1296 bacmp(bdaddr, &k->bdaddr) == 0)
1297 return k;
1298
1299 return NULL;
1300}
1301
1302int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1303 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1304{
1305 struct link_key *key, *old_key;
1306 u8 old_key_type;
1307 bool persistent;
1308
1309 old_key = hci_find_link_key(hdev, bdaddr);
1310 if (old_key) {
1311 old_key_type = old_key->type;
1312 key = old_key;
1313 } else {
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1316 if (!key)
1317 return -ENOMEM;
1318 list_add(&key->list, &hdev->link_keys);
1319 }
1320
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1322
1323
1324
1325
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1329 if (conn)
1330 conn->key_type = type;
1331 }
1332
1333 bacpy(&key->bdaddr, bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1336
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1339 else
1340 key->type = type;
1341
1342 if (!new_key)
1343 return 0;
1344
1345 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1346
1347 mgmt_new_link_key(hdev, key, persistent);
1348
1349 if (conn)
1350 conn->flush_key = !persistent;
1351
1352 return 0;
1353}
1354
1355int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1356 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1357 ediv, u8 rand[8])
1358{
1359 struct smp_ltk *key, *old_key;
1360
1361 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1362 return 0;
1363
1364 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1365 if (old_key)
1366 key = old_key;
1367 else {
1368 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1369 if (!key)
1370 return -ENOMEM;
1371 list_add(&key->list, &hdev->long_term_keys);
1372 }
1373
1374 bacpy(&key->bdaddr, bdaddr);
1375 key->bdaddr_type = addr_type;
1376 memcpy(key->val, tk, sizeof(key->val));
1377 key->authenticated = authenticated;
1378 key->ediv = ediv;
1379 key->enc_size = enc_size;
1380 key->type = type;
1381 memcpy(key->rand, rand, sizeof(key->rand));
1382
1383 if (!new_key)
1384 return 0;
1385
1386 if (type & HCI_SMP_LTK)
1387 mgmt_new_ltk(hdev, key, 1);
1388
1389 return 0;
1390}
1391
1392int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393{
1394 struct link_key *key;
1395
1396 key = hci_find_link_key(hdev, bdaddr);
1397 if (!key)
1398 return -ENOENT;
1399
1400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1401
1402 list_del(&key->list);
1403 kfree(key);
1404
1405 return 0;
1406}
1407
1408int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409{
1410 struct smp_ltk *k, *tmp;
1411
1412 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1413 if (bacmp(bdaddr, &k->bdaddr))
1414 continue;
1415
1416 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1417
1418 list_del(&k->list);
1419 kfree(k);
1420 }
1421
1422 return 0;
1423}
1424
1425
1426static void hci_cmd_timeout(unsigned long arg)
1427{
1428 struct hci_dev *hdev = (void *) arg;
1429
1430 if (hdev->sent_cmd) {
1431 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1432 u16 opcode = __le16_to_cpu(sent->opcode);
1433
1434 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1435 } else {
1436 BT_ERR("%s command tx timeout", hdev->name);
1437 }
1438
1439 atomic_set(&hdev->cmd_cnt, 1);
1440 queue_work(hdev->workqueue, &hdev->cmd_work);
1441}
1442
1443struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1444 bdaddr_t *bdaddr)
1445{
1446 struct oob_data *data;
1447
1448 list_for_each_entry(data, &hdev->remote_oob_data, list)
1449 if (bacmp(bdaddr, &data->bdaddr) == 0)
1450 return data;
1451
1452 return NULL;
1453}
1454
1455int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1456{
1457 struct oob_data *data;
1458
1459 data = hci_find_remote_oob_data(hdev, bdaddr);
1460 if (!data)
1461 return -ENOENT;
1462
1463 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1464
1465 list_del(&data->list);
1466 kfree(data);
1467
1468 return 0;
1469}
1470
1471int hci_remote_oob_data_clear(struct hci_dev *hdev)
1472{
1473 struct oob_data *data, *n;
1474
1475 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1476 list_del(&data->list);
1477 kfree(data);
1478 }
1479
1480 return 0;
1481}
1482
1483int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1484 u8 *randomizer)
1485{
1486 struct oob_data *data;
1487
1488 data = hci_find_remote_oob_data(hdev, bdaddr);
1489
1490 if (!data) {
1491 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1492 if (!data)
1493 return -ENOMEM;
1494
1495 bacpy(&data->bdaddr, bdaddr);
1496 list_add(&data->list, &hdev->remote_oob_data);
1497 }
1498
1499 memcpy(data->hash, hash, sizeof(data->hash));
1500 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1501
1502 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1503
1504 return 0;
1505}
1506
1507struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1508{
1509 struct bdaddr_list *b;
1510
1511 list_for_each_entry(b, &hdev->blacklist, list)
1512 if (bacmp(bdaddr, &b->bdaddr) == 0)
1513 return b;
1514
1515 return NULL;
1516}
1517
1518int hci_blacklist_clear(struct hci_dev *hdev)
1519{
1520 struct list_head *p, *n;
1521
1522 list_for_each_safe(p, n, &hdev->blacklist) {
1523 struct bdaddr_list *b;
1524
1525 b = list_entry(p, struct bdaddr_list, list);
1526
1527 list_del(p);
1528 kfree(b);
1529 }
1530
1531 return 0;
1532}
1533
1534int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1535{
1536 struct bdaddr_list *entry;
1537
1538 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1539 return -EBADF;
1540
1541 if (hci_blacklist_lookup(hdev, bdaddr))
1542 return -EEXIST;
1543
1544 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1545 if (!entry)
1546 return -ENOMEM;
1547
1548 bacpy(&entry->bdaddr, bdaddr);
1549
1550 list_add(&entry->list, &hdev->blacklist);
1551
1552 return mgmt_device_blocked(hdev, bdaddr, type);
1553}
1554
1555int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1556{
1557 struct bdaddr_list *entry;
1558
1559 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1560 return hci_blacklist_clear(hdev);
1561
1562 entry = hci_blacklist_lookup(hdev, bdaddr);
1563 if (!entry)
1564 return -ENOENT;
1565
1566 list_del(&entry->list);
1567 kfree(entry);
1568
1569 return mgmt_device_unblocked(hdev, bdaddr, type);
1570}
1571
1572static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1573{
1574 struct le_scan_params *param = (struct le_scan_params *) opt;
1575 struct hci_cp_le_set_scan_param cp;
1576
1577 memset(&cp, 0, sizeof(cp));
1578 cp.type = param->type;
1579 cp.interval = cpu_to_le16(param->interval);
1580 cp.window = cpu_to_le16(param->window);
1581
1582 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1583}
1584
1585static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1586{
1587 struct hci_cp_le_set_scan_enable cp;
1588
1589 memset(&cp, 0, sizeof(cp));
1590 cp.enable = 1;
1591 cp.filter_dup = 1;
1592
1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594}
1595
1596static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1597 u16 window, int timeout)
1598{
1599 long timeo = msecs_to_jiffies(3000);
1600 struct le_scan_params param;
1601 int err;
1602
1603 BT_DBG("%s", hdev->name);
1604
1605 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1606 return -EINPROGRESS;
1607
1608 param.type = type;
1609 param.interval = interval;
1610 param.window = window;
1611
1612 hci_req_lock(hdev);
1613
1614 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1615 timeo);
1616 if (!err)
1617 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1618
1619 hci_req_unlock(hdev);
1620
1621 if (err < 0)
1622 return err;
1623
1624 schedule_delayed_work(&hdev->le_scan_disable,
1625 msecs_to_jiffies(timeout));
1626
1627 return 0;
1628}
1629
1630int hci_cancel_le_scan(struct hci_dev *hdev)
1631{
1632 BT_DBG("%s", hdev->name);
1633
1634 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1635 return -EALREADY;
1636
1637 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1638 struct hci_cp_le_set_scan_enable cp;
1639
1640
1641 memset(&cp, 0, sizeof(cp));
1642 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1643 }
1644
1645 return 0;
1646}
1647
1648static void le_scan_disable_work(struct work_struct *work)
1649{
1650 struct hci_dev *hdev = container_of(work, struct hci_dev,
1651 le_scan_disable.work);
1652 struct hci_cp_le_set_scan_enable cp;
1653
1654 BT_DBG("%s", hdev->name);
1655
1656 memset(&cp, 0, sizeof(cp));
1657
1658 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1659}
1660
1661static void le_scan_work(struct work_struct *work)
1662{
1663 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1664 struct le_scan_params *param = &hdev->le_scan_params;
1665
1666 BT_DBG("%s", hdev->name);
1667
1668 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1669 param->timeout);
1670}
1671
1672int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1673 int timeout)
1674{
1675 struct le_scan_params *param = &hdev->le_scan_params;
1676
1677 BT_DBG("%s", hdev->name);
1678
1679 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1680 return -ENOTSUPP;
1681
1682 if (work_busy(&hdev->le_scan))
1683 return -EINPROGRESS;
1684
1685 param->type = type;
1686 param->interval = interval;
1687 param->window = window;
1688 param->timeout = timeout;
1689
1690 queue_work(system_long_wq, &hdev->le_scan);
1691
1692 return 0;
1693}
1694
1695
1696struct hci_dev *hci_alloc_dev(void)
1697{
1698 struct hci_dev *hdev;
1699
1700 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1701 if (!hdev)
1702 return NULL;
1703
1704 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1705 hdev->esco_type = (ESCO_HV1);
1706 hdev->link_mode = (HCI_LM_ACCEPT);
1707 hdev->io_capability = 0x03;
1708 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1709 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1710
1711 hdev->sniff_max_interval = 800;
1712 hdev->sniff_min_interval = 80;
1713
1714 mutex_init(&hdev->lock);
1715 mutex_init(&hdev->req_lock);
1716
1717 INIT_LIST_HEAD(&hdev->mgmt_pending);
1718 INIT_LIST_HEAD(&hdev->blacklist);
1719 INIT_LIST_HEAD(&hdev->uuids);
1720 INIT_LIST_HEAD(&hdev->link_keys);
1721 INIT_LIST_HEAD(&hdev->long_term_keys);
1722 INIT_LIST_HEAD(&hdev->remote_oob_data);
1723 INIT_LIST_HEAD(&hdev->conn_hash.list);
1724
1725 INIT_WORK(&hdev->rx_work, hci_rx_work);
1726 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1727 INIT_WORK(&hdev->tx_work, hci_tx_work);
1728 INIT_WORK(&hdev->power_on, hci_power_on);
1729 INIT_WORK(&hdev->le_scan, le_scan_work);
1730
1731 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1732 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1733 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1734
1735 skb_queue_head_init(&hdev->driver_init);
1736 skb_queue_head_init(&hdev->rx_q);
1737 skb_queue_head_init(&hdev->cmd_q);
1738 skb_queue_head_init(&hdev->raw_q);
1739
1740 init_waitqueue_head(&hdev->req_wait_q);
1741
1742 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1743
1744 hci_init_sysfs(hdev);
1745 discovery_init(hdev);
1746
1747 return hdev;
1748}
1749EXPORT_SYMBOL(hci_alloc_dev);
1750
1751
1752void hci_free_dev(struct hci_dev *hdev)
1753{
1754 skb_queue_purge(&hdev->driver_init);
1755
1756
1757 put_device(&hdev->dev);
1758}
1759EXPORT_SYMBOL(hci_free_dev);
1760
1761
1762int hci_register_dev(struct hci_dev *hdev)
1763{
1764 int id, error;
1765
1766 if (!hdev->open || !hdev->close)
1767 return -EINVAL;
1768
1769
1770
1771
1772 switch (hdev->dev_type) {
1773 case HCI_BREDR:
1774 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1775 break;
1776 case HCI_AMP:
1777 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1778 break;
1779 default:
1780 return -EINVAL;
1781 }
1782
1783 if (id < 0)
1784 return id;
1785
1786 sprintf(hdev->name, "hci%d", id);
1787 hdev->id = id;
1788
1789 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1790
1791 write_lock(&hci_dev_list_lock);
1792 list_add(&hdev->list, &hci_dev_list);
1793 write_unlock(&hci_dev_list_lock);
1794
1795 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1796 WQ_MEM_RECLAIM, 1);
1797 if (!hdev->workqueue) {
1798 error = -ENOMEM;
1799 goto err;
1800 }
1801
1802 error = hci_add_sysfs(hdev);
1803 if (error < 0)
1804 goto err_wqueue;
1805
1806 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1807 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1808 hdev);
1809 if (hdev->rfkill) {
1810 if (rfkill_register(hdev->rfkill) < 0) {
1811 rfkill_destroy(hdev->rfkill);
1812 hdev->rfkill = NULL;
1813 }
1814 }
1815
1816 set_bit(HCI_SETUP, &hdev->dev_flags);
1817
1818 if (hdev->dev_type != HCI_AMP)
1819 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1820
1821 hci_notify(hdev, HCI_DEV_REG);
1822 hci_dev_hold(hdev);
1823
1824 schedule_work(&hdev->power_on);
1825
1826 return id;
1827
1828err_wqueue:
1829 destroy_workqueue(hdev->workqueue);
1830err:
1831 ida_simple_remove(&hci_index_ida, hdev->id);
1832 write_lock(&hci_dev_list_lock);
1833 list_del(&hdev->list);
1834 write_unlock(&hci_dev_list_lock);
1835
1836 return error;
1837}
1838EXPORT_SYMBOL(hci_register_dev);
1839
1840
1841void hci_unregister_dev(struct hci_dev *hdev)
1842{
1843 int i, id;
1844
1845 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1846
1847 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1848
1849 id = hdev->id;
1850
1851 write_lock(&hci_dev_list_lock);
1852 list_del(&hdev->list);
1853 write_unlock(&hci_dev_list_lock);
1854
1855 hci_dev_do_close(hdev);
1856
1857 for (i = 0; i < NUM_REASSEMBLY; i++)
1858 kfree_skb(hdev->reassembly[i]);
1859
1860 cancel_work_sync(&hdev->power_on);
1861
1862 if (!test_bit(HCI_INIT, &hdev->flags) &&
1863 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1864 hci_dev_lock(hdev);
1865 mgmt_index_removed(hdev);
1866 hci_dev_unlock(hdev);
1867 }
1868
1869
1870
1871 BUG_ON(!list_empty(&hdev->mgmt_pending));
1872
1873 hci_notify(hdev, HCI_DEV_UNREG);
1874
1875 if (hdev->rfkill) {
1876 rfkill_unregister(hdev->rfkill);
1877 rfkill_destroy(hdev->rfkill);
1878 }
1879
1880 hci_del_sysfs(hdev);
1881
1882 destroy_workqueue(hdev->workqueue);
1883
1884 hci_dev_lock(hdev);
1885 hci_blacklist_clear(hdev);
1886 hci_uuids_clear(hdev);
1887 hci_link_keys_clear(hdev);
1888 hci_smp_ltks_clear(hdev);
1889 hci_remote_oob_data_clear(hdev);
1890 hci_dev_unlock(hdev);
1891
1892 hci_dev_put(hdev);
1893
1894 ida_simple_remove(&hci_index_ida, id);
1895}
1896EXPORT_SYMBOL(hci_unregister_dev);
1897
1898
1899int hci_suspend_dev(struct hci_dev *hdev)
1900{
1901 hci_notify(hdev, HCI_DEV_SUSPEND);
1902 return 0;
1903}
1904EXPORT_SYMBOL(hci_suspend_dev);
1905
1906
1907int hci_resume_dev(struct hci_dev *hdev)
1908{
1909 hci_notify(hdev, HCI_DEV_RESUME);
1910 return 0;
1911}
1912EXPORT_SYMBOL(hci_resume_dev);
1913
1914
1915int hci_recv_frame(struct sk_buff *skb)
1916{
1917 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1918 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1919 && !test_bit(HCI_INIT, &hdev->flags))) {
1920 kfree_skb(skb);
1921 return -ENXIO;
1922 }
1923
1924
1925 bt_cb(skb)->incoming = 1;
1926
1927
1928 __net_timestamp(skb);
1929
1930 skb_queue_tail(&hdev->rx_q, skb);
1931 queue_work(hdev->workqueue, &hdev->rx_work);
1932
1933 return 0;
1934}
1935EXPORT_SYMBOL(hci_recv_frame);
1936
1937static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1938 int count, __u8 index)
1939{
1940 int len = 0;
1941 int hlen = 0;
1942 int remain = count;
1943 struct sk_buff *skb;
1944 struct bt_skb_cb *scb;
1945
1946 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1947 index >= NUM_REASSEMBLY)
1948 return -EILSEQ;
1949
1950 skb = hdev->reassembly[index];
1951
1952 if (!skb) {
1953 switch (type) {
1954 case HCI_ACLDATA_PKT:
1955 len = HCI_MAX_FRAME_SIZE;
1956 hlen = HCI_ACL_HDR_SIZE;
1957 break;
1958 case HCI_EVENT_PKT:
1959 len = HCI_MAX_EVENT_SIZE;
1960 hlen = HCI_EVENT_HDR_SIZE;
1961 break;
1962 case HCI_SCODATA_PKT:
1963 len = HCI_MAX_SCO_SIZE;
1964 hlen = HCI_SCO_HDR_SIZE;
1965 break;
1966 }
1967
1968 skb = bt_skb_alloc(len, GFP_ATOMIC);
1969 if (!skb)
1970 return -ENOMEM;
1971
1972 scb = (void *) skb->cb;
1973 scb->expect = hlen;
1974 scb->pkt_type = type;
1975
1976 skb->dev = (void *) hdev;
1977 hdev->reassembly[index] = skb;
1978 }
1979
1980 while (count) {
1981 scb = (void *) skb->cb;
1982 len = min_t(uint, scb->expect, count);
1983
1984 memcpy(skb_put(skb, len), data, len);
1985
1986 count -= len;
1987 data += len;
1988 scb->expect -= len;
1989 remain = count;
1990
1991 switch (type) {
1992 case HCI_EVENT_PKT:
1993 if (skb->len == HCI_EVENT_HDR_SIZE) {
1994 struct hci_event_hdr *h = hci_event_hdr(skb);
1995 scb->expect = h->plen;
1996
1997 if (skb_tailroom(skb) < scb->expect) {
1998 kfree_skb(skb);
1999 hdev->reassembly[index] = NULL;
2000 return -ENOMEM;
2001 }
2002 }
2003 break;
2004
2005 case HCI_ACLDATA_PKT:
2006 if (skb->len == HCI_ACL_HDR_SIZE) {
2007 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2008 scb->expect = __le16_to_cpu(h->dlen);
2009
2010 if (skb_tailroom(skb) < scb->expect) {
2011 kfree_skb(skb);
2012 hdev->reassembly[index] = NULL;
2013 return -ENOMEM;
2014 }
2015 }
2016 break;
2017
2018 case HCI_SCODATA_PKT:
2019 if (skb->len == HCI_SCO_HDR_SIZE) {
2020 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2021 scb->expect = h->dlen;
2022
2023 if (skb_tailroom(skb) < scb->expect) {
2024 kfree_skb(skb);
2025 hdev->reassembly[index] = NULL;
2026 return -ENOMEM;
2027 }
2028 }
2029 break;
2030 }
2031
2032 if (scb->expect == 0) {
2033
2034
2035 bt_cb(skb)->pkt_type = type;
2036 hci_recv_frame(skb);
2037
2038 hdev->reassembly[index] = NULL;
2039 return remain;
2040 }
2041 }
2042
2043 return remain;
2044}
2045
2046int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2047{
2048 int rem = 0;
2049
2050 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2051 return -EILSEQ;
2052
2053 while (count) {
2054 rem = hci_reassembly(hdev, type, data, count, type - 1);
2055 if (rem < 0)
2056 return rem;
2057
2058 data += (count - rem);
2059 count = rem;
2060 }
2061
2062 return rem;
2063}
2064EXPORT_SYMBOL(hci_recv_fragment);
2065
2066#define STREAM_REASSEMBLY 0
2067
2068int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2069{
2070 int type;
2071 int rem = 0;
2072
2073 while (count) {
2074 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2075
2076 if (!skb) {
2077 struct { char type; } *pkt;
2078
2079
2080 pkt = data;
2081 type = pkt->type;
2082
2083 data++;
2084 count--;
2085 } else
2086 type = bt_cb(skb)->pkt_type;
2087
2088 rem = hci_reassembly(hdev, type, data, count,
2089 STREAM_REASSEMBLY);
2090 if (rem < 0)
2091 return rem;
2092
2093 data += (count - rem);
2094 count = rem;
2095 }
2096
2097 return rem;
2098}
2099EXPORT_SYMBOL(hci_recv_stream_fragment);
2100
2101
2102
2103int hci_register_cb(struct hci_cb *cb)
2104{
2105 BT_DBG("%p name %s", cb, cb->name);
2106
2107 write_lock(&hci_cb_list_lock);
2108 list_add(&cb->list, &hci_cb_list);
2109 write_unlock(&hci_cb_list_lock);
2110
2111 return 0;
2112}
2113EXPORT_SYMBOL(hci_register_cb);
2114
2115int hci_unregister_cb(struct hci_cb *cb)
2116{
2117 BT_DBG("%p name %s", cb, cb->name);
2118
2119 write_lock(&hci_cb_list_lock);
2120 list_del(&cb->list);
2121 write_unlock(&hci_cb_list_lock);
2122
2123 return 0;
2124}
2125EXPORT_SYMBOL(hci_unregister_cb);
2126
2127static int hci_send_frame(struct sk_buff *skb)
2128{
2129 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2130
2131 if (!hdev) {
2132 kfree_skb(skb);
2133 return -ENODEV;
2134 }
2135
2136 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2137
2138
2139 __net_timestamp(skb);
2140
2141
2142 hci_send_to_monitor(hdev, skb);
2143
2144 if (atomic_read(&hdev->promisc)) {
2145
2146 hci_send_to_sock(hdev, skb);
2147 }
2148
2149
2150 skb_orphan(skb);
2151
2152 return hdev->send(skb);
2153}
2154
2155
2156int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2157{
2158 int len = HCI_COMMAND_HDR_SIZE + plen;
2159 struct hci_command_hdr *hdr;
2160 struct sk_buff *skb;
2161
2162 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2163
2164 skb = bt_skb_alloc(len, GFP_ATOMIC);
2165 if (!skb) {
2166 BT_ERR("%s no memory for command", hdev->name);
2167 return -ENOMEM;
2168 }
2169
2170 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2171 hdr->opcode = cpu_to_le16(opcode);
2172 hdr->plen = plen;
2173
2174 if (plen)
2175 memcpy(skb_put(skb, plen), param, plen);
2176
2177 BT_DBG("skb len %d", skb->len);
2178
2179 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2180 skb->dev = (void *) hdev;
2181
2182 if (test_bit(HCI_INIT, &hdev->flags))
2183 hdev->init_last_cmd = opcode;
2184
2185 skb_queue_tail(&hdev->cmd_q, skb);
2186 queue_work(hdev->workqueue, &hdev->cmd_work);
2187
2188 return 0;
2189}
2190
2191
2192void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2193{
2194 struct hci_command_hdr *hdr;
2195
2196 if (!hdev->sent_cmd)
2197 return NULL;
2198
2199 hdr = (void *) hdev->sent_cmd->data;
2200
2201 if (hdr->opcode != cpu_to_le16(opcode))
2202 return NULL;
2203
2204 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2205
2206 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2207}
2208
2209
2210static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2211{
2212 struct hci_acl_hdr *hdr;
2213 int len = skb->len;
2214
2215 skb_push(skb, HCI_ACL_HDR_SIZE);
2216 skb_reset_transport_header(skb);
2217 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2218 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2219 hdr->dlen = cpu_to_le16(len);
2220}
2221
2222static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2223 struct sk_buff *skb, __u16 flags)
2224{
2225 struct hci_conn *conn = chan->conn;
2226 struct hci_dev *hdev = conn->hdev;
2227 struct sk_buff *list;
2228
2229 skb->len = skb_headlen(skb);
2230 skb->data_len = 0;
2231
2232 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2233
2234 switch (hdev->dev_type) {
2235 case HCI_BREDR:
2236 hci_add_acl_hdr(skb, conn->handle, flags);
2237 break;
2238 case HCI_AMP:
2239 hci_add_acl_hdr(skb, chan->handle, flags);
2240 break;
2241 default:
2242 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2243 return;
2244 }
2245
2246 list = skb_shinfo(skb)->frag_list;
2247 if (!list) {
2248
2249 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2250
2251 skb_queue_tail(queue, skb);
2252 } else {
2253
2254 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2255
2256 skb_shinfo(skb)->frag_list = NULL;
2257
2258
2259 spin_lock(&queue->lock);
2260
2261 __skb_queue_tail(queue, skb);
2262
2263 flags &= ~ACL_START;
2264 flags |= ACL_CONT;
2265 do {
2266 skb = list; list = list->next;
2267
2268 skb->dev = (void *) hdev;
2269 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2270 hci_add_acl_hdr(skb, conn->handle, flags);
2271
2272 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2273
2274 __skb_queue_tail(queue, skb);
2275 } while (list);
2276
2277 spin_unlock(&queue->lock);
2278 }
2279}
2280
2281void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2282{
2283 struct hci_dev *hdev = chan->conn->hdev;
2284
2285 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2286
2287 skb->dev = (void *) hdev;
2288
2289 hci_queue_acl(chan, &chan->data_q, skb, flags);
2290
2291 queue_work(hdev->workqueue, &hdev->tx_work);
2292}
2293
2294
2295void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2296{
2297 struct hci_dev *hdev = conn->hdev;
2298 struct hci_sco_hdr hdr;
2299
2300 BT_DBG("%s len %d", hdev->name, skb->len);
2301
2302 hdr.handle = cpu_to_le16(conn->handle);
2303 hdr.dlen = skb->len;
2304
2305 skb_push(skb, HCI_SCO_HDR_SIZE);
2306 skb_reset_transport_header(skb);
2307 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2308
2309 skb->dev = (void *) hdev;
2310 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2311
2312 skb_queue_tail(&conn->data_q, skb);
2313 queue_work(hdev->workqueue, &hdev->tx_work);
2314}
2315
2316
2317
2318
2319static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2320 int *quote)
2321{
2322 struct hci_conn_hash *h = &hdev->conn_hash;
2323 struct hci_conn *conn = NULL, *c;
2324 unsigned int num = 0, min = ~0;
2325
2326
2327
2328
2329 rcu_read_lock();
2330
2331 list_for_each_entry_rcu(c, &h->list, list) {
2332 if (c->type != type || skb_queue_empty(&c->data_q))
2333 continue;
2334
2335 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2336 continue;
2337
2338 num++;
2339
2340 if (c->sent < min) {
2341 min = c->sent;
2342 conn = c;
2343 }
2344
2345 if (hci_conn_num(hdev, type) == num)
2346 break;
2347 }
2348
2349 rcu_read_unlock();
2350
2351 if (conn) {
2352 int cnt, q;
2353
2354 switch (conn->type) {
2355 case ACL_LINK:
2356 cnt = hdev->acl_cnt;
2357 break;
2358 case SCO_LINK:
2359 case ESCO_LINK:
2360 cnt = hdev->sco_cnt;
2361 break;
2362 case LE_LINK:
2363 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2364 break;
2365 default:
2366 cnt = 0;
2367 BT_ERR("Unknown link type");
2368 }
2369
2370 q = cnt / num;
2371 *quote = q ? q : 1;
2372 } else
2373 *quote = 0;
2374
2375 BT_DBG("conn %p quote %d", conn, *quote);
2376 return conn;
2377}
2378
2379static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2380{
2381 struct hci_conn_hash *h = &hdev->conn_hash;
2382 struct hci_conn *c;
2383
2384 BT_ERR("%s link tx timeout", hdev->name);
2385
2386 rcu_read_lock();
2387
2388
2389 list_for_each_entry_rcu(c, &h->list, list) {
2390 if (c->type == type && c->sent) {
2391 BT_ERR("%s killing stalled connection %pMR",
2392 hdev->name, &c->dst);
2393 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2394 }
2395 }
2396
2397 rcu_read_unlock();
2398}
2399
2400static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2401 int *quote)
2402{
2403 struct hci_conn_hash *h = &hdev->conn_hash;
2404 struct hci_chan *chan = NULL;
2405 unsigned int num = 0, min = ~0, cur_prio = 0;
2406 struct hci_conn *conn;
2407 int cnt, q, conn_num = 0;
2408
2409 BT_DBG("%s", hdev->name);
2410
2411 rcu_read_lock();
2412
2413 list_for_each_entry_rcu(conn, &h->list, list) {
2414 struct hci_chan *tmp;
2415
2416 if (conn->type != type)
2417 continue;
2418
2419 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2420 continue;
2421
2422 conn_num++;
2423
2424 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2425 struct sk_buff *skb;
2426
2427 if (skb_queue_empty(&tmp->data_q))
2428 continue;
2429
2430 skb = skb_peek(&tmp->data_q);
2431 if (skb->priority < cur_prio)
2432 continue;
2433
2434 if (skb->priority > cur_prio) {
2435 num = 0;
2436 min = ~0;
2437 cur_prio = skb->priority;
2438 }
2439
2440 num++;
2441
2442 if (conn->sent < min) {
2443 min = conn->sent;
2444 chan = tmp;
2445 }
2446 }
2447
2448 if (hci_conn_num(hdev, type) == conn_num)
2449 break;
2450 }
2451
2452 rcu_read_unlock();
2453
2454 if (!chan)
2455 return NULL;
2456
2457 switch (chan->conn->type) {
2458 case ACL_LINK:
2459 cnt = hdev->acl_cnt;
2460 break;
2461 case AMP_LINK:
2462 cnt = hdev->block_cnt;
2463 break;
2464 case SCO_LINK:
2465 case ESCO_LINK:
2466 cnt = hdev->sco_cnt;
2467 break;
2468 case LE_LINK:
2469 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2470 break;
2471 default:
2472 cnt = 0;
2473 BT_ERR("Unknown link type");
2474 }
2475
2476 q = cnt / num;
2477 *quote = q ? q : 1;
2478 BT_DBG("chan %p quote %d", chan, *quote);
2479 return chan;
2480}
2481
2482static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2483{
2484 struct hci_conn_hash *h = &hdev->conn_hash;
2485 struct hci_conn *conn;
2486 int num = 0;
2487
2488 BT_DBG("%s", hdev->name);
2489
2490 rcu_read_lock();
2491
2492 list_for_each_entry_rcu(conn, &h->list, list) {
2493 struct hci_chan *chan;
2494
2495 if (conn->type != type)
2496 continue;
2497
2498 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2499 continue;
2500
2501 num++;
2502
2503 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2504 struct sk_buff *skb;
2505
2506 if (chan->sent) {
2507 chan->sent = 0;
2508 continue;
2509 }
2510
2511 if (skb_queue_empty(&chan->data_q))
2512 continue;
2513
2514 skb = skb_peek(&chan->data_q);
2515 if (skb->priority >= HCI_PRIO_MAX - 1)
2516 continue;
2517
2518 skb->priority = HCI_PRIO_MAX - 1;
2519
2520 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2521 skb->priority);
2522 }
2523
2524 if (hci_conn_num(hdev, type) == num)
2525 break;
2526 }
2527
2528 rcu_read_unlock();
2529
2530}
2531
2532static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2533{
2534
2535 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2536}
2537
2538static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2539{
2540 if (!test_bit(HCI_RAW, &hdev->flags)) {
2541
2542
2543 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2544 HCI_ACL_TX_TIMEOUT))
2545 hci_link_tx_to(hdev, ACL_LINK);
2546 }
2547}
2548
2549static void hci_sched_acl_pkt(struct hci_dev *hdev)
2550{
2551 unsigned int cnt = hdev->acl_cnt;
2552 struct hci_chan *chan;
2553 struct sk_buff *skb;
2554 int quote;
2555
2556 __check_timeout(hdev, cnt);
2557
2558 while (hdev->acl_cnt &&
2559 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2560 u32 priority = (skb_peek(&chan->data_q))->priority;
2561 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2562 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2563 skb->len, skb->priority);
2564
2565
2566 if (skb->priority < priority)
2567 break;
2568
2569 skb = skb_dequeue(&chan->data_q);
2570
2571 hci_conn_enter_active_mode(chan->conn,
2572 bt_cb(skb)->force_active);
2573
2574 hci_send_frame(skb);
2575 hdev->acl_last_tx = jiffies;
2576
2577 hdev->acl_cnt--;
2578 chan->sent++;
2579 chan->conn->sent++;
2580 }
2581 }
2582
2583 if (cnt != hdev->acl_cnt)
2584 hci_prio_recalculate(hdev, ACL_LINK);
2585}
2586
2587static void hci_sched_acl_blk(struct hci_dev *hdev)
2588{
2589 unsigned int cnt = hdev->block_cnt;
2590 struct hci_chan *chan;
2591 struct sk_buff *skb;
2592 int quote;
2593 u8 type;
2594
2595 __check_timeout(hdev, cnt);
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (hdev->dev_type == HCI_AMP)
2600 type = AMP_LINK;
2601 else
2602 type = ACL_LINK;
2603
2604 while (hdev->block_cnt > 0 &&
2605 (chan = hci_chan_sent(hdev, type, "e))) {
2606 u32 priority = (skb_peek(&chan->data_q))->priority;
2607 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2608 int blocks;
2609
2610 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2611 skb->len, skb->priority);
2612
2613
2614 if (skb->priority < priority)
2615 break;
2616
2617 skb = skb_dequeue(&chan->data_q);
2618
2619 blocks = __get_blocks(hdev, skb);
2620 if (blocks > hdev->block_cnt)
2621 return;
2622
2623 hci_conn_enter_active_mode(chan->conn,
2624 bt_cb(skb)->force_active);
2625
2626 hci_send_frame(skb);
2627 hdev->acl_last_tx = jiffies;
2628
2629 hdev->block_cnt -= blocks;
2630 quote -= blocks;
2631
2632 chan->sent += blocks;
2633 chan->conn->sent += blocks;
2634 }
2635 }
2636
2637 if (cnt != hdev->block_cnt)
2638 hci_prio_recalculate(hdev, type);
2639}
2640
2641static void hci_sched_acl(struct hci_dev *hdev)
2642{
2643 BT_DBG("%s", hdev->name);
2644
2645
2646 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2647 return;
2648
2649
2650 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2651 return;
2652
2653 switch (hdev->flow_ctl_mode) {
2654 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2655 hci_sched_acl_pkt(hdev);
2656 break;
2657
2658 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2659 hci_sched_acl_blk(hdev);
2660 break;
2661 }
2662}
2663
2664
2665static void hci_sched_sco(struct hci_dev *hdev)
2666{
2667 struct hci_conn *conn;
2668 struct sk_buff *skb;
2669 int quote;
2670
2671 BT_DBG("%s", hdev->name);
2672
2673 if (!hci_conn_num(hdev, SCO_LINK))
2674 return;
2675
2676 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2677 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2678 BT_DBG("skb %p len %d", skb, skb->len);
2679 hci_send_frame(skb);
2680
2681 conn->sent++;
2682 if (conn->sent == ~0)
2683 conn->sent = 0;
2684 }
2685 }
2686}
2687
2688static void hci_sched_esco(struct hci_dev *hdev)
2689{
2690 struct hci_conn *conn;
2691 struct sk_buff *skb;
2692 int quote;
2693
2694 BT_DBG("%s", hdev->name);
2695
2696 if (!hci_conn_num(hdev, ESCO_LINK))
2697 return;
2698
2699 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2700 "e))) {
2701 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2702 BT_DBG("skb %p len %d", skb, skb->len);
2703 hci_send_frame(skb);
2704
2705 conn->sent++;
2706 if (conn->sent == ~0)
2707 conn->sent = 0;
2708 }
2709 }
2710}
2711
2712static void hci_sched_le(struct hci_dev *hdev)
2713{
2714 struct hci_chan *chan;
2715 struct sk_buff *skb;
2716 int quote, cnt, tmp;
2717
2718 BT_DBG("%s", hdev->name);
2719
2720 if (!hci_conn_num(hdev, LE_LINK))
2721 return;
2722
2723 if (!test_bit(HCI_RAW, &hdev->flags)) {
2724
2725
2726 if (!hdev->le_cnt && hdev->le_pkts &&
2727 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2728 hci_link_tx_to(hdev, LE_LINK);
2729 }
2730
2731 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2732 tmp = cnt;
2733 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2734 u32 priority = (skb_peek(&chan->data_q))->priority;
2735 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2736 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2737 skb->len, skb->priority);
2738
2739
2740 if (skb->priority < priority)
2741 break;
2742
2743 skb = skb_dequeue(&chan->data_q);
2744
2745 hci_send_frame(skb);
2746 hdev->le_last_tx = jiffies;
2747
2748 cnt--;
2749 chan->sent++;
2750 chan->conn->sent++;
2751 }
2752 }
2753
2754 if (hdev->le_pkts)
2755 hdev->le_cnt = cnt;
2756 else
2757 hdev->acl_cnt = cnt;
2758
2759 if (cnt != tmp)
2760 hci_prio_recalculate(hdev, LE_LINK);
2761}
2762
2763static void hci_tx_work(struct work_struct *work)
2764{
2765 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2766 struct sk_buff *skb;
2767
2768 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2769 hdev->sco_cnt, hdev->le_cnt);
2770
2771
2772
2773 hci_sched_acl(hdev);
2774
2775 hci_sched_sco(hdev);
2776
2777 hci_sched_esco(hdev);
2778
2779 hci_sched_le(hdev);
2780
2781
2782 while ((skb = skb_dequeue(&hdev->raw_q)))
2783 hci_send_frame(skb);
2784}
2785
2786
2787
2788
2789static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2790{
2791 struct hci_acl_hdr *hdr = (void *) skb->data;
2792 struct hci_conn *conn;
2793 __u16 handle, flags;
2794
2795 skb_pull(skb, HCI_ACL_HDR_SIZE);
2796
2797 handle = __le16_to_cpu(hdr->handle);
2798 flags = hci_flags(handle);
2799 handle = hci_handle(handle);
2800
2801 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2802 handle, flags);
2803
2804 hdev->stat.acl_rx++;
2805
2806 hci_dev_lock(hdev);
2807 conn = hci_conn_hash_lookup_handle(hdev, handle);
2808 hci_dev_unlock(hdev);
2809
2810 if (conn) {
2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2812
2813
2814 l2cap_recv_acldata(conn, skb, flags);
2815 return;
2816 } else {
2817 BT_ERR("%s ACL packet for unknown connection handle %d",
2818 hdev->name, handle);
2819 }
2820
2821 kfree_skb(skb);
2822}
2823
2824
2825static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2826{
2827 struct hci_sco_hdr *hdr = (void *) skb->data;
2828 struct hci_conn *conn;
2829 __u16 handle;
2830
2831 skb_pull(skb, HCI_SCO_HDR_SIZE);
2832
2833 handle = __le16_to_cpu(hdr->handle);
2834
2835 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2836
2837 hdev->stat.sco_rx++;
2838
2839 hci_dev_lock(hdev);
2840 conn = hci_conn_hash_lookup_handle(hdev, handle);
2841 hci_dev_unlock(hdev);
2842
2843 if (conn) {
2844
2845 sco_recv_scodata(conn, skb);
2846 return;
2847 } else {
2848 BT_ERR("%s SCO packet for unknown connection handle %d",
2849 hdev->name, handle);
2850 }
2851
2852 kfree_skb(skb);
2853}
2854
2855static void hci_rx_work(struct work_struct *work)
2856{
2857 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2858 struct sk_buff *skb;
2859
2860 BT_DBG("%s", hdev->name);
2861
2862 while ((skb = skb_dequeue(&hdev->rx_q))) {
2863
2864 hci_send_to_monitor(hdev, skb);
2865
2866 if (atomic_read(&hdev->promisc)) {
2867
2868 hci_send_to_sock(hdev, skb);
2869 }
2870
2871 if (test_bit(HCI_RAW, &hdev->flags)) {
2872 kfree_skb(skb);
2873 continue;
2874 }
2875
2876 if (test_bit(HCI_INIT, &hdev->flags)) {
2877
2878 switch (bt_cb(skb)->pkt_type) {
2879 case HCI_ACLDATA_PKT:
2880 case HCI_SCODATA_PKT:
2881 kfree_skb(skb);
2882 continue;
2883 }
2884 }
2885
2886
2887 switch (bt_cb(skb)->pkt_type) {
2888 case HCI_EVENT_PKT:
2889 BT_DBG("%s Event packet", hdev->name);
2890 hci_event_packet(hdev, skb);
2891 break;
2892
2893 case HCI_ACLDATA_PKT:
2894 BT_DBG("%s ACL data packet", hdev->name);
2895 hci_acldata_packet(hdev, skb);
2896 break;
2897
2898 case HCI_SCODATA_PKT:
2899 BT_DBG("%s SCO data packet", hdev->name);
2900 hci_scodata_packet(hdev, skb);
2901 break;
2902
2903 default:
2904 kfree_skb(skb);
2905 break;
2906 }
2907 }
2908}
2909
2910static void hci_cmd_work(struct work_struct *work)
2911{
2912 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2913 struct sk_buff *skb;
2914
2915 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2916 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2917
2918
2919 if (atomic_read(&hdev->cmd_cnt)) {
2920 skb = skb_dequeue(&hdev->cmd_q);
2921 if (!skb)
2922 return;
2923
2924 kfree_skb(hdev->sent_cmd);
2925
2926 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2927 if (hdev->sent_cmd) {
2928 atomic_dec(&hdev->cmd_cnt);
2929 hci_send_frame(skb);
2930 if (test_bit(HCI_RESET, &hdev->flags))
2931 del_timer(&hdev->cmd_timer);
2932 else
2933 mod_timer(&hdev->cmd_timer,
2934 jiffies + HCI_CMD_TIMEOUT);
2935 } else {
2936 skb_queue_head(&hdev->cmd_q, skb);
2937 queue_work(hdev->workqueue, &hdev->cmd_work);
2938 }
2939 }
2940}
2941
2942int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2943{
2944
2945 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2946 struct hci_cp_inquiry cp;
2947
2948 BT_DBG("%s", hdev->name);
2949
2950 if (test_bit(HCI_INQUIRY, &hdev->flags))
2951 return -EINPROGRESS;
2952
2953 inquiry_cache_flush(hdev);
2954
2955 memset(&cp, 0, sizeof(cp));
2956 memcpy(&cp.lap, lap, sizeof(cp.lap));
2957 cp.length = length;
2958
2959 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2960}
2961
2962int hci_cancel_inquiry(struct hci_dev *hdev)
2963{
2964 BT_DBG("%s", hdev->name);
2965
2966 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2967 return -EALREADY;
2968
2969 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2970}
2971
2972u8 bdaddr_to_le(u8 bdaddr_type)
2973{
2974 switch (bdaddr_type) {
2975 case BDADDR_LE_PUBLIC:
2976 return ADDR_LE_DEV_PUBLIC;
2977
2978 default:
2979
2980 return ADDR_LE_DEV_RANDOM;
2981 }
2982}
2983