1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/idr.h>
30
31#include <linux/rfkill.h>
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
36static void hci_rx_work(struct work_struct *work);
37static void hci_cmd_work(struct work_struct *work);
38static void hci_tx_work(struct work_struct *work);
39
40
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
48
49static DEFINE_IDA(hci_index_ida);
50
51
52
53static void hci_notify(struct hci_dev *hdev, int event)
54{
55 hci_sock_dev_event(hdev, event);
56}
57
58
59
60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61{
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64
65
66
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72
73
74
75
76
77
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
144 }
145
146 hdev->req_status = hdev->req_result = 0;
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
156{
157 int ret;
158
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
162
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177}
178
179static void bredr_init(struct hci_dev *hdev)
180{
181 struct hci_cp_delete_stored_link_key cp;
182 __le16 param;
183 __u8 flt_type;
184
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
187
188
189
190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191
192
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194
195
196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197
198
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206
207
208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209
210
211
212
213 flt_type = HCI_FLT_CLEAR_ALL;
214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215
216
217 param = __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223}
224
225static void amp_init(struct hci_dev *hdev)
226{
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
229
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
231
232
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234}
235
236static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
237{
238 struct sk_buff *skb;
239
240 BT_DBG("%s %ld", hdev->name, opt);
241
242
243
244
245 while ((skb = skb_dequeue(&hdev->driver_init))) {
246 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
247 skb->dev = (void *) hdev;
248
249 skb_queue_tail(&hdev->cmd_q, skb);
250 queue_work(hdev->workqueue, &hdev->cmd_work);
251 }
252 skb_queue_purge(&hdev->driver_init);
253
254
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
258 switch (hdev->dev_type) {
259 case HCI_BREDR:
260 bredr_init(hdev);
261 break;
262
263 case HCI_AMP:
264 amp_init(hdev);
265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
271
272}
273
274static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
275{
276 BT_DBG("%s", hdev->name);
277
278
279 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280}
281
282static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
283{
284 __u8 scan = opt;
285
286 BT_DBG("%s %x", hdev->name, scan);
287
288
289 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
290}
291
292static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
293{
294 __u8 auth = opt;
295
296 BT_DBG("%s %x", hdev->name, auth);
297
298
299 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
300}
301
302static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
303{
304 __u8 encrypt = opt;
305
306 BT_DBG("%s %x", hdev->name, encrypt);
307
308
309 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
310}
311
312static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
313{
314 __le16 policy = cpu_to_le16(opt);
315
316 BT_DBG("%s %x", hdev->name, policy);
317
318
319 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
320}
321
322
323
324struct hci_dev *hci_dev_get(int index)
325{
326 struct hci_dev *hdev = NULL, *d;
327
328 BT_DBG("%d", index);
329
330 if (index < 0)
331 return NULL;
332
333 read_lock(&hci_dev_list_lock);
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (d->id == index) {
336 hdev = hci_dev_hold(d);
337 break;
338 }
339 }
340 read_unlock(&hci_dev_list_lock);
341 return hdev;
342}
343
344
345
346bool hci_discovery_active(struct hci_dev *hdev)
347{
348 struct discovery_state *discov = &hdev->discovery;
349
350 switch (discov->state) {
351 case DISCOVERY_FINDING:
352 case DISCOVERY_RESOLVING:
353 return true;
354
355 default:
356 return false;
357 }
358}
359
360void hci_discovery_set_state(struct hci_dev *hdev, int state)
361{
362 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
363
364 if (hdev->discovery.state == state)
365 return;
366
367 switch (state) {
368 case DISCOVERY_STOPPED:
369 if (hdev->discovery.state != DISCOVERY_STARTING)
370 mgmt_discovering(hdev, 0);
371 break;
372 case DISCOVERY_STARTING:
373 break;
374 case DISCOVERY_FINDING:
375 mgmt_discovering(hdev, 1);
376 break;
377 case DISCOVERY_RESOLVING:
378 break;
379 case DISCOVERY_STOPPING:
380 break;
381 }
382
383 hdev->discovery.state = state;
384}
385
386static void inquiry_cache_flush(struct hci_dev *hdev)
387{
388 struct discovery_state *cache = &hdev->discovery;
389 struct inquiry_entry *p, *n;
390
391 list_for_each_entry_safe(p, n, &cache->all, all) {
392 list_del(&p->all);
393 kfree(p);
394 }
395
396 INIT_LIST_HEAD(&cache->unknown);
397 INIT_LIST_HEAD(&cache->resolve);
398}
399
400struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 bdaddr_t *bdaddr)
402{
403 struct discovery_state *cache = &hdev->discovery;
404 struct inquiry_entry *e;
405
406 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
407
408 list_for_each_entry(e, &cache->all, all) {
409 if (!bacmp(&e->data.bdaddr, bdaddr))
410 return e;
411 }
412
413 return NULL;
414}
415
416struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
417 bdaddr_t *bdaddr)
418{
419 struct discovery_state *cache = &hdev->discovery;
420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424 list_for_each_entry(e, &cache->unknown, list) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
427 }
428
429 return NULL;
430}
431
432struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
433 bdaddr_t *bdaddr,
434 int state)
435{
436 struct discovery_state *cache = &hdev->discovery;
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
440
441 list_for_each_entry(e, &cache->resolve, list) {
442 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
443 return e;
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449}
450
451void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
452 struct inquiry_entry *ie)
453{
454 struct discovery_state *cache = &hdev->discovery;
455 struct list_head *pos = &cache->resolve;
456 struct inquiry_entry *p;
457
458 list_del(&ie->list);
459
460 list_for_each_entry(p, &cache->resolve, list) {
461 if (p->name_state != NAME_PENDING &&
462 abs(p->data.rssi) >= abs(ie->data.rssi))
463 break;
464 pos = &p->list;
465 }
466
467 list_add(&ie->list, pos);
468}
469
470bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
471 bool name_known, bool *ssp)
472{
473 struct discovery_state *cache = &hdev->discovery;
474 struct inquiry_entry *ie;
475
476 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
477
478 if (ssp)
479 *ssp = data->ssp_mode;
480
481 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
482 if (ie) {
483 if (ie->data.ssp_mode && ssp)
484 *ssp = true;
485
486 if (ie->name_state == NAME_NEEDED &&
487 data->rssi != ie->data.rssi) {
488 ie->data.rssi = data->rssi;
489 hci_inquiry_cache_update_resolve(hdev, ie);
490 }
491
492 goto update;
493 }
494
495
496 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
497 if (!ie)
498 return false;
499
500 list_add(&ie->all, &cache->all);
501
502 if (name_known) {
503 ie->name_state = NAME_KNOWN;
504 } else {
505 ie->name_state = NAME_NOT_KNOWN;
506 list_add(&ie->list, &cache->unknown);
507 }
508
509update:
510 if (name_known && ie->name_state != NAME_KNOWN &&
511 ie->name_state != NAME_PENDING) {
512 ie->name_state = NAME_KNOWN;
513 list_del(&ie->list);
514 }
515
516 memcpy(&ie->data, data, sizeof(*data));
517 ie->timestamp = jiffies;
518 cache->timestamp = jiffies;
519
520 if (ie->name_state == NAME_NOT_KNOWN)
521 return false;
522
523 return true;
524}
525
526static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
527{
528 struct discovery_state *cache = &hdev->discovery;
529 struct inquiry_info *info = (struct inquiry_info *) buf;
530 struct inquiry_entry *e;
531 int copied = 0;
532
533 list_for_each_entry(e, &cache->all, all) {
534 struct inquiry_data *data = &e->data;
535
536 if (copied >= num)
537 break;
538
539 bacpy(&info->bdaddr, &data->bdaddr);
540 info->pscan_rep_mode = data->pscan_rep_mode;
541 info->pscan_period_mode = data->pscan_period_mode;
542 info->pscan_mode = data->pscan_mode;
543 memcpy(info->dev_class, data->dev_class, 3);
544 info->clock_offset = data->clock_offset;
545
546 info++;
547 copied++;
548 }
549
550 BT_DBG("cache %p, copied %d", cache, copied);
551 return copied;
552}
553
554static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
555{
556 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
557 struct hci_cp_inquiry cp;
558
559 BT_DBG("%s", hdev->name);
560
561 if (test_bit(HCI_INQUIRY, &hdev->flags))
562 return;
563
564
565 memcpy(&cp.lap, &ir->lap, 3);
566 cp.length = ir->length;
567 cp.num_rsp = ir->num_rsp;
568 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
569}
570
571int hci_inquiry(void __user *arg)
572{
573 __u8 __user *ptr = arg;
574 struct hci_inquiry_req ir;
575 struct hci_dev *hdev;
576 int err = 0, do_inquiry = 0, max_rsp;
577 long timeo;
578 __u8 *buf;
579
580 if (copy_from_user(&ir, ptr, sizeof(ir)))
581 return -EFAULT;
582
583 hdev = hci_dev_get(ir.dev_id);
584 if (!hdev)
585 return -ENODEV;
586
587 hci_dev_lock(hdev);
588 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
589 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
590 inquiry_cache_flush(hdev);
591 do_inquiry = 1;
592 }
593 hci_dev_unlock(hdev);
594
595 timeo = ir.length * msecs_to_jiffies(2000);
596
597 if (do_inquiry) {
598 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
599 if (err < 0)
600 goto done;
601 }
602
603
604
605
606 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
607
608
609
610
611 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
612 if (!buf) {
613 err = -ENOMEM;
614 goto done;
615 }
616
617 hci_dev_lock(hdev);
618 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
619 hci_dev_unlock(hdev);
620
621 BT_DBG("num_rsp %d", ir.num_rsp);
622
623 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
624 ptr += sizeof(ir);
625 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
626 ir.num_rsp))
627 err = -EFAULT;
628 } else
629 err = -EFAULT;
630
631 kfree(buf);
632
633done:
634 hci_dev_put(hdev);
635 return err;
636}
637
638
639
640int hci_dev_open(__u16 dev)
641{
642 struct hci_dev *hdev;
643 int ret = 0;
644
645 hdev = hci_dev_get(dev);
646 if (!hdev)
647 return -ENODEV;
648
649 BT_DBG("%s %p", hdev->name, hdev);
650
651 hci_req_lock(hdev);
652
653 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
654 ret = -ENODEV;
655 goto done;
656 }
657
658 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
659 ret = -ERFKILL;
660 goto done;
661 }
662
663 if (test_bit(HCI_UP, &hdev->flags)) {
664 ret = -EALREADY;
665 goto done;
666 }
667
668 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
669 set_bit(HCI_RAW, &hdev->flags);
670
671
672
673 if (hdev->dev_type != HCI_BREDR && !enable_hs)
674 set_bit(HCI_RAW, &hdev->flags);
675
676 if (hdev->open(hdev)) {
677 ret = -EIO;
678 goto done;
679 }
680
681 if (!test_bit(HCI_RAW, &hdev->flags)) {
682 atomic_set(&hdev->cmd_cnt, 1);
683 set_bit(HCI_INIT, &hdev->flags);
684 hdev->init_last_cmd = 0;
685
686 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
687
688 if (lmp_host_le_capable(hdev))
689 ret = __hci_request(hdev, hci_le_init_req, 0,
690 HCI_INIT_TIMEOUT);
691
692 clear_bit(HCI_INIT, &hdev->flags);
693 }
694
695 if (!ret) {
696 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
699 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
700 hci_dev_lock(hdev);
701 mgmt_powered(hdev, 1);
702 hci_dev_unlock(hdev);
703 }
704 } else {
705
706 flush_work(&hdev->tx_work);
707 flush_work(&hdev->cmd_work);
708 flush_work(&hdev->rx_work);
709
710 skb_queue_purge(&hdev->cmd_q);
711 skb_queue_purge(&hdev->rx_q);
712
713 if (hdev->flush)
714 hdev->flush(hdev);
715
716 if (hdev->sent_cmd) {
717 kfree_skb(hdev->sent_cmd);
718 hdev->sent_cmd = NULL;
719 }
720
721 hdev->close(hdev);
722 hdev->flags = 0;
723 }
724
725done:
726 hci_req_unlock(hdev);
727 hci_dev_put(hdev);
728 return ret;
729}
730
731static int hci_dev_do_close(struct hci_dev *hdev)
732{
733 BT_DBG("%s %p", hdev->name, hdev);
734
735 cancel_work_sync(&hdev->le_scan);
736
737 cancel_delayed_work(&hdev->power_off);
738
739 hci_req_cancel(hdev, ENODEV);
740 hci_req_lock(hdev);
741
742 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
743 del_timer_sync(&hdev->cmd_timer);
744 hci_req_unlock(hdev);
745 return 0;
746 }
747
748
749 flush_work(&hdev->tx_work);
750 flush_work(&hdev->rx_work);
751
752 if (hdev->discov_timeout > 0) {
753 cancel_delayed_work(&hdev->discov_off);
754 hdev->discov_timeout = 0;
755 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
756 }
757
758 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
759 cancel_delayed_work(&hdev->service_cache);
760
761 cancel_delayed_work_sync(&hdev->le_scan_disable);
762
763 hci_dev_lock(hdev);
764 inquiry_cache_flush(hdev);
765 hci_conn_hash_flush(hdev);
766 hci_dev_unlock(hdev);
767
768 hci_notify(hdev, HCI_DEV_DOWN);
769
770 if (hdev->flush)
771 hdev->flush(hdev);
772
773
774 skb_queue_purge(&hdev->cmd_q);
775 atomic_set(&hdev->cmd_cnt, 1);
776 if (!test_bit(HCI_RAW, &hdev->flags) &&
777 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
778 set_bit(HCI_INIT, &hdev->flags);
779 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
780 clear_bit(HCI_INIT, &hdev->flags);
781 }
782
783
784 flush_work(&hdev->cmd_work);
785
786
787 skb_queue_purge(&hdev->rx_q);
788 skb_queue_purge(&hdev->cmd_q);
789 skb_queue_purge(&hdev->raw_q);
790
791
792 if (hdev->sent_cmd) {
793 del_timer_sync(&hdev->cmd_timer);
794 kfree_skb(hdev->sent_cmd);
795 hdev->sent_cmd = NULL;
796 }
797
798
799
800 hdev->close(hdev);
801
802 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
803 hci_dev_lock(hdev);
804 mgmt_powered(hdev, 0);
805 hci_dev_unlock(hdev);
806 }
807
808
809 hdev->flags = 0;
810
811 memset(hdev->eir, 0, sizeof(hdev->eir));
812 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
813
814 hci_req_unlock(hdev);
815
816 hci_dev_put(hdev);
817 return 0;
818}
819
820int hci_dev_close(__u16 dev)
821{
822 struct hci_dev *hdev;
823 int err;
824
825 hdev = hci_dev_get(dev);
826 if (!hdev)
827 return -ENODEV;
828
829 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
830 cancel_delayed_work(&hdev->power_off);
831
832 err = hci_dev_do_close(hdev);
833
834 hci_dev_put(hdev);
835 return err;
836}
837
838int hci_dev_reset(__u16 dev)
839{
840 struct hci_dev *hdev;
841 int ret = 0;
842
843 hdev = hci_dev_get(dev);
844 if (!hdev)
845 return -ENODEV;
846
847 hci_req_lock(hdev);
848
849 if (!test_bit(HCI_UP, &hdev->flags))
850 goto done;
851
852
853 skb_queue_purge(&hdev->rx_q);
854 skb_queue_purge(&hdev->cmd_q);
855
856 hci_dev_lock(hdev);
857 inquiry_cache_flush(hdev);
858 hci_conn_hash_flush(hdev);
859 hci_dev_unlock(hdev);
860
861 if (hdev->flush)
862 hdev->flush(hdev);
863
864 atomic_set(&hdev->cmd_cnt, 1);
865 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
866
867 if (!test_bit(HCI_RAW, &hdev->flags))
868 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
869
870done:
871 hci_req_unlock(hdev);
872 hci_dev_put(hdev);
873 return ret;
874}
875
876int hci_dev_reset_stat(__u16 dev)
877{
878 struct hci_dev *hdev;
879 int ret = 0;
880
881 hdev = hci_dev_get(dev);
882 if (!hdev)
883 return -ENODEV;
884
885 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
886
887 hci_dev_put(hdev);
888
889 return ret;
890}
891
892int hci_dev_cmd(unsigned int cmd, void __user *arg)
893{
894 struct hci_dev *hdev;
895 struct hci_dev_req dr;
896 int err = 0;
897
898 if (copy_from_user(&dr, arg, sizeof(dr)))
899 return -EFAULT;
900
901 hdev = hci_dev_get(dr.dev_id);
902 if (!hdev)
903 return -ENODEV;
904
905 switch (cmd) {
906 case HCISETAUTH:
907 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
908 HCI_INIT_TIMEOUT);
909 break;
910
911 case HCISETENCRYPT:
912 if (!lmp_encrypt_capable(hdev)) {
913 err = -EOPNOTSUPP;
914 break;
915 }
916
917 if (!test_bit(HCI_AUTH, &hdev->flags)) {
918
919 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
920 HCI_INIT_TIMEOUT);
921 if (err)
922 break;
923 }
924
925 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
926 HCI_INIT_TIMEOUT);
927 break;
928
929 case HCISETSCAN:
930 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
931 HCI_INIT_TIMEOUT);
932 break;
933
934 case HCISETLINKPOL:
935 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
936 HCI_INIT_TIMEOUT);
937 break;
938
939 case HCISETLINKMODE:
940 hdev->link_mode = ((__u16) dr.dev_opt) &
941 (HCI_LM_MASTER | HCI_LM_ACCEPT);
942 break;
943
944 case HCISETPTYPE:
945 hdev->pkt_type = (__u16) dr.dev_opt;
946 break;
947
948 case HCISETACLMTU:
949 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
950 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
951 break;
952
953 case HCISETSCOMTU:
954 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
955 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
956 break;
957
958 default:
959 err = -EINVAL;
960 break;
961 }
962
963 hci_dev_put(hdev);
964 return err;
965}
966
967int hci_get_dev_list(void __user *arg)
968{
969 struct hci_dev *hdev;
970 struct hci_dev_list_req *dl;
971 struct hci_dev_req *dr;
972 int n = 0, size, err;
973 __u16 dev_num;
974
975 if (get_user(dev_num, (__u16 __user *) arg))
976 return -EFAULT;
977
978 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
979 return -EINVAL;
980
981 size = sizeof(*dl) + dev_num * sizeof(*dr);
982
983 dl = kzalloc(size, GFP_KERNEL);
984 if (!dl)
985 return -ENOMEM;
986
987 dr = dl->dev_req;
988
989 read_lock(&hci_dev_list_lock);
990 list_for_each_entry(hdev, &hci_dev_list, list) {
991 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
992 cancel_delayed_work(&hdev->power_off);
993
994 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
995 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
996
997 (dr + n)->dev_id = hdev->id;
998 (dr + n)->dev_opt = hdev->flags;
999
1000 if (++n >= dev_num)
1001 break;
1002 }
1003 read_unlock(&hci_dev_list_lock);
1004
1005 dl->dev_num = n;
1006 size = sizeof(*dl) + n * sizeof(*dr);
1007
1008 err = copy_to_user(arg, dl, size);
1009 kfree(dl);
1010
1011 return err ? -EFAULT : 0;
1012}
1013
1014int hci_get_dev_info(void __user *arg)
1015{
1016 struct hci_dev *hdev;
1017 struct hci_dev_info di;
1018 int err = 0;
1019
1020 if (copy_from_user(&di, arg, sizeof(di)))
1021 return -EFAULT;
1022
1023 hdev = hci_dev_get(di.dev_id);
1024 if (!hdev)
1025 return -ENODEV;
1026
1027 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1028 cancel_delayed_work_sync(&hdev->power_off);
1029
1030 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1031 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1032
1033 strcpy(di.name, hdev->name);
1034 di.bdaddr = hdev->bdaddr;
1035 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1036 di.flags = hdev->flags;
1037 di.pkt_type = hdev->pkt_type;
1038 di.acl_mtu = hdev->acl_mtu;
1039 di.acl_pkts = hdev->acl_pkts;
1040 di.sco_mtu = hdev->sco_mtu;
1041 di.sco_pkts = hdev->sco_pkts;
1042 di.link_policy = hdev->link_policy;
1043 di.link_mode = hdev->link_mode;
1044
1045 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1046 memcpy(&di.features, &hdev->features, sizeof(di.features));
1047
1048 if (copy_to_user(arg, &di, sizeof(di)))
1049 err = -EFAULT;
1050
1051 hci_dev_put(hdev);
1052
1053 return err;
1054}
1055
1056
1057
1058static int hci_rfkill_set_block(void *data, bool blocked)
1059{
1060 struct hci_dev *hdev = data;
1061
1062 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1063
1064 if (!blocked)
1065 return 0;
1066
1067 hci_dev_do_close(hdev);
1068
1069 return 0;
1070}
1071
1072static const struct rfkill_ops hci_rfkill_ops = {
1073 .set_block = hci_rfkill_set_block,
1074};
1075
1076static void hci_power_on(struct work_struct *work)
1077{
1078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1079
1080 BT_DBG("%s", hdev->name);
1081
1082 if (hci_dev_open(hdev->id) < 0)
1083 return;
1084
1085 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1086 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1087
1088 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1089 mgmt_index_added(hdev);
1090}
1091
1092static void hci_power_off(struct work_struct *work)
1093{
1094 struct hci_dev *hdev = container_of(work, struct hci_dev,
1095 power_off.work);
1096
1097 BT_DBG("%s", hdev->name);
1098
1099 hci_dev_do_close(hdev);
1100}
1101
1102static void hci_discov_off(struct work_struct *work)
1103{
1104 struct hci_dev *hdev;
1105 u8 scan = SCAN_PAGE;
1106
1107 hdev = container_of(work, struct hci_dev, discov_off.work);
1108
1109 BT_DBG("%s", hdev->name);
1110
1111 hci_dev_lock(hdev);
1112
1113 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1114
1115 hdev->discov_timeout = 0;
1116
1117 hci_dev_unlock(hdev);
1118}
1119
1120int hci_uuids_clear(struct hci_dev *hdev)
1121{
1122 struct list_head *p, *n;
1123
1124 list_for_each_safe(p, n, &hdev->uuids) {
1125 struct bt_uuid *uuid;
1126
1127 uuid = list_entry(p, struct bt_uuid, list);
1128
1129 list_del(p);
1130 kfree(uuid);
1131 }
1132
1133 return 0;
1134}
1135
1136int hci_link_keys_clear(struct hci_dev *hdev)
1137{
1138 struct list_head *p, *n;
1139
1140 list_for_each_safe(p, n, &hdev->link_keys) {
1141 struct link_key *key;
1142
1143 key = list_entry(p, struct link_key, list);
1144
1145 list_del(p);
1146 kfree(key);
1147 }
1148
1149 return 0;
1150}
1151
1152int hci_smp_ltks_clear(struct hci_dev *hdev)
1153{
1154 struct smp_ltk *k, *tmp;
1155
1156 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1157 list_del(&k->list);
1158 kfree(k);
1159 }
1160
1161 return 0;
1162}
1163
1164struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1165{
1166 struct link_key *k;
1167
1168 list_for_each_entry(k, &hdev->link_keys, list)
1169 if (bacmp(bdaddr, &k->bdaddr) == 0)
1170 return k;
1171
1172 return NULL;
1173}
1174
1175static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1176 u8 key_type, u8 old_key_type)
1177{
1178
1179 if (key_type < 0x03)
1180 return true;
1181
1182
1183 if (key_type == HCI_LK_DEBUG_COMBINATION)
1184 return false;
1185
1186
1187 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1188 return false;
1189
1190
1191 if (!conn)
1192 return true;
1193
1194
1195 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1196 return true;
1197
1198
1199 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1200 return true;
1201
1202
1203 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1204 return true;
1205
1206
1207
1208 return false;
1209}
1210
1211struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1212{
1213 struct smp_ltk *k;
1214
1215 list_for_each_entry(k, &hdev->long_term_keys, list) {
1216 if (k->ediv != ediv ||
1217 memcmp(rand, k->rand, sizeof(k->rand)))
1218 continue;
1219
1220 return k;
1221 }
1222
1223 return NULL;
1224}
1225
1226struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 addr_type)
1228{
1229 struct smp_ltk *k;
1230
1231 list_for_each_entry(k, &hdev->long_term_keys, list)
1232 if (addr_type == k->bdaddr_type &&
1233 bacmp(bdaddr, &k->bdaddr) == 0)
1234 return k;
1235
1236 return NULL;
1237}
1238
1239int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1240 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1241{
1242 struct link_key *key, *old_key;
1243 u8 old_key_type;
1244 bool persistent;
1245
1246 old_key = hci_find_link_key(hdev, bdaddr);
1247 if (old_key) {
1248 old_key_type = old_key->type;
1249 key = old_key;
1250 } else {
1251 old_key_type = conn ? conn->key_type : 0xff;
1252 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1253 if (!key)
1254 return -ENOMEM;
1255 list_add(&key->list, &hdev->link_keys);
1256 }
1257
1258 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1259
1260
1261
1262
1263 if (type == HCI_LK_CHANGED_COMBINATION &&
1264 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1265 type = HCI_LK_COMBINATION;
1266 if (conn)
1267 conn->key_type = type;
1268 }
1269
1270 bacpy(&key->bdaddr, bdaddr);
1271 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1272 key->pin_len = pin_len;
1273
1274 if (type == HCI_LK_CHANGED_COMBINATION)
1275 key->type = old_key_type;
1276 else
1277 key->type = type;
1278
1279 if (!new_key)
1280 return 0;
1281
1282 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1283
1284 mgmt_new_link_key(hdev, key, persistent);
1285
1286 if (conn)
1287 conn->flush_key = !persistent;
1288
1289 return 0;
1290}
1291
1292int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1293 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1294 ediv, u8 rand[8])
1295{
1296 struct smp_ltk *key, *old_key;
1297
1298 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1299 return 0;
1300
1301 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1302 if (old_key)
1303 key = old_key;
1304 else {
1305 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1306 if (!key)
1307 return -ENOMEM;
1308 list_add(&key->list, &hdev->long_term_keys);
1309 }
1310
1311 bacpy(&key->bdaddr, bdaddr);
1312 key->bdaddr_type = addr_type;
1313 memcpy(key->val, tk, sizeof(key->val));
1314 key->authenticated = authenticated;
1315 key->ediv = ediv;
1316 key->enc_size = enc_size;
1317 key->type = type;
1318 memcpy(key->rand, rand, sizeof(key->rand));
1319
1320 if (!new_key)
1321 return 0;
1322
1323 if (type & HCI_SMP_LTK)
1324 mgmt_new_ltk(hdev, key, 1);
1325
1326 return 0;
1327}
1328
1329int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1330{
1331 struct link_key *key;
1332
1333 key = hci_find_link_key(hdev, bdaddr);
1334 if (!key)
1335 return -ENOENT;
1336
1337 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1338
1339 list_del(&key->list);
1340 kfree(key);
1341
1342 return 0;
1343}
1344
1345int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1346{
1347 struct smp_ltk *k, *tmp;
1348
1349 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1350 if (bacmp(bdaddr, &k->bdaddr))
1351 continue;
1352
1353 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1354
1355 list_del(&k->list);
1356 kfree(k);
1357 }
1358
1359 return 0;
1360}
1361
1362
1363static void hci_cmd_timeout(unsigned long arg)
1364{
1365 struct hci_dev *hdev = (void *) arg;
1366
1367 if (hdev->sent_cmd) {
1368 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1369 u16 opcode = __le16_to_cpu(sent->opcode);
1370
1371 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1372 } else {
1373 BT_ERR("%s command tx timeout", hdev->name);
1374 }
1375
1376 atomic_set(&hdev->cmd_cnt, 1);
1377 queue_work(hdev->workqueue, &hdev->cmd_work);
1378}
1379
1380struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1381 bdaddr_t *bdaddr)
1382{
1383 struct oob_data *data;
1384
1385 list_for_each_entry(data, &hdev->remote_oob_data, list)
1386 if (bacmp(bdaddr, &data->bdaddr) == 0)
1387 return data;
1388
1389 return NULL;
1390}
1391
1392int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393{
1394 struct oob_data *data;
1395
1396 data = hci_find_remote_oob_data(hdev, bdaddr);
1397 if (!data)
1398 return -ENOENT;
1399
1400 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1401
1402 list_del(&data->list);
1403 kfree(data);
1404
1405 return 0;
1406}
1407
1408int hci_remote_oob_data_clear(struct hci_dev *hdev)
1409{
1410 struct oob_data *data, *n;
1411
1412 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1413 list_del(&data->list);
1414 kfree(data);
1415 }
1416
1417 return 0;
1418}
1419
1420int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1421 u8 *randomizer)
1422{
1423 struct oob_data *data;
1424
1425 data = hci_find_remote_oob_data(hdev, bdaddr);
1426
1427 if (!data) {
1428 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1429 if (!data)
1430 return -ENOMEM;
1431
1432 bacpy(&data->bdaddr, bdaddr);
1433 list_add(&data->list, &hdev->remote_oob_data);
1434 }
1435
1436 memcpy(data->hash, hash, sizeof(data->hash));
1437 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1438
1439 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1440
1441 return 0;
1442}
1443
1444struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1445{
1446 struct bdaddr_list *b;
1447
1448 list_for_each_entry(b, &hdev->blacklist, list)
1449 if (bacmp(bdaddr, &b->bdaddr) == 0)
1450 return b;
1451
1452 return NULL;
1453}
1454
1455int hci_blacklist_clear(struct hci_dev *hdev)
1456{
1457 struct list_head *p, *n;
1458
1459 list_for_each_safe(p, n, &hdev->blacklist) {
1460 struct bdaddr_list *b;
1461
1462 b = list_entry(p, struct bdaddr_list, list);
1463
1464 list_del(p);
1465 kfree(b);
1466 }
1467
1468 return 0;
1469}
1470
1471int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1472{
1473 struct bdaddr_list *entry;
1474
1475 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1476 return -EBADF;
1477
1478 if (hci_blacklist_lookup(hdev, bdaddr))
1479 return -EEXIST;
1480
1481 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1482 if (!entry)
1483 return -ENOMEM;
1484
1485 bacpy(&entry->bdaddr, bdaddr);
1486
1487 list_add(&entry->list, &hdev->blacklist);
1488
1489 return mgmt_device_blocked(hdev, bdaddr, type);
1490}
1491
1492int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1493{
1494 struct bdaddr_list *entry;
1495
1496 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1497 return hci_blacklist_clear(hdev);
1498
1499 entry = hci_blacklist_lookup(hdev, bdaddr);
1500 if (!entry)
1501 return -ENOENT;
1502
1503 list_del(&entry->list);
1504 kfree(entry);
1505
1506 return mgmt_device_unblocked(hdev, bdaddr, type);
1507}
1508
1509static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1510{
1511 struct le_scan_params *param = (struct le_scan_params *) opt;
1512 struct hci_cp_le_set_scan_param cp;
1513
1514 memset(&cp, 0, sizeof(cp));
1515 cp.type = param->type;
1516 cp.interval = cpu_to_le16(param->interval);
1517 cp.window = cpu_to_le16(param->window);
1518
1519 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1520}
1521
1522static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1523{
1524 struct hci_cp_le_set_scan_enable cp;
1525
1526 memset(&cp, 0, sizeof(cp));
1527 cp.enable = 1;
1528 cp.filter_dup = 1;
1529
1530 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1531}
1532
1533static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1534 u16 window, int timeout)
1535{
1536 long timeo = msecs_to_jiffies(3000);
1537 struct le_scan_params param;
1538 int err;
1539
1540 BT_DBG("%s", hdev->name);
1541
1542 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1543 return -EINPROGRESS;
1544
1545 param.type = type;
1546 param.interval = interval;
1547 param.window = window;
1548
1549 hci_req_lock(hdev);
1550
1551 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1552 timeo);
1553 if (!err)
1554 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1555
1556 hci_req_unlock(hdev);
1557
1558 if (err < 0)
1559 return err;
1560
1561 schedule_delayed_work(&hdev->le_scan_disable,
1562 msecs_to_jiffies(timeout));
1563
1564 return 0;
1565}
1566
1567int hci_cancel_le_scan(struct hci_dev *hdev)
1568{
1569 BT_DBG("%s", hdev->name);
1570
1571 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1572 return -EALREADY;
1573
1574 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1575 struct hci_cp_le_set_scan_enable cp;
1576
1577
1578 memset(&cp, 0, sizeof(cp));
1579 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1580 }
1581
1582 return 0;
1583}
1584
1585static void le_scan_disable_work(struct work_struct *work)
1586{
1587 struct hci_dev *hdev = container_of(work, struct hci_dev,
1588 le_scan_disable.work);
1589 struct hci_cp_le_set_scan_enable cp;
1590
1591 BT_DBG("%s", hdev->name);
1592
1593 memset(&cp, 0, sizeof(cp));
1594
1595 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1596}
1597
1598static void le_scan_work(struct work_struct *work)
1599{
1600 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1601 struct le_scan_params *param = &hdev->le_scan_params;
1602
1603 BT_DBG("%s", hdev->name);
1604
1605 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1606 param->timeout);
1607}
1608
1609int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1610 int timeout)
1611{
1612 struct le_scan_params *param = &hdev->le_scan_params;
1613
1614 BT_DBG("%s", hdev->name);
1615
1616 if (work_busy(&hdev->le_scan))
1617 return -EINPROGRESS;
1618
1619 param->type = type;
1620 param->interval = interval;
1621 param->window = window;
1622 param->timeout = timeout;
1623
1624 queue_work(system_long_wq, &hdev->le_scan);
1625
1626 return 0;
1627}
1628
1629
1630struct hci_dev *hci_alloc_dev(void)
1631{
1632 struct hci_dev *hdev;
1633
1634 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1635 if (!hdev)
1636 return NULL;
1637
1638 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1639 hdev->esco_type = (ESCO_HV1);
1640 hdev->link_mode = (HCI_LM_ACCEPT);
1641 hdev->io_capability = 0x03;
1642
1643 hdev->sniff_max_interval = 800;
1644 hdev->sniff_min_interval = 80;
1645
1646 mutex_init(&hdev->lock);
1647 mutex_init(&hdev->req_lock);
1648
1649 INIT_LIST_HEAD(&hdev->mgmt_pending);
1650 INIT_LIST_HEAD(&hdev->blacklist);
1651 INIT_LIST_HEAD(&hdev->uuids);
1652 INIT_LIST_HEAD(&hdev->link_keys);
1653 INIT_LIST_HEAD(&hdev->long_term_keys);
1654 INIT_LIST_HEAD(&hdev->remote_oob_data);
1655
1656 INIT_WORK(&hdev->rx_work, hci_rx_work);
1657 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1658 INIT_WORK(&hdev->tx_work, hci_tx_work);
1659 INIT_WORK(&hdev->power_on, hci_power_on);
1660 INIT_WORK(&hdev->le_scan, le_scan_work);
1661
1662 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1663 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1664 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1665
1666 skb_queue_head_init(&hdev->driver_init);
1667 skb_queue_head_init(&hdev->rx_q);
1668 skb_queue_head_init(&hdev->cmd_q);
1669 skb_queue_head_init(&hdev->raw_q);
1670
1671 init_waitqueue_head(&hdev->req_wait_q);
1672
1673 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1674
1675 hci_init_sysfs(hdev);
1676 discovery_init(hdev);
1677 hci_conn_hash_init(hdev);
1678
1679 return hdev;
1680}
1681EXPORT_SYMBOL(hci_alloc_dev);
1682
1683
1684void hci_free_dev(struct hci_dev *hdev)
1685{
1686 skb_queue_purge(&hdev->driver_init);
1687
1688
1689 put_device(&hdev->dev);
1690}
1691EXPORT_SYMBOL(hci_free_dev);
1692
1693
1694int hci_register_dev(struct hci_dev *hdev)
1695{
1696 int id, error;
1697
1698 if (!hdev->open || !hdev->close)
1699 return -EINVAL;
1700
1701
1702
1703
1704 switch (hdev->dev_type) {
1705 case HCI_BREDR:
1706 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1707 break;
1708 case HCI_AMP:
1709 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1710 break;
1711 default:
1712 return -EINVAL;
1713 }
1714
1715 if (id < 0)
1716 return id;
1717
1718 sprintf(hdev->name, "hci%d", id);
1719 hdev->id = id;
1720
1721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1722
1723 write_lock(&hci_dev_list_lock);
1724 list_add(&hdev->list, &hci_dev_list);
1725 write_unlock(&hci_dev_list_lock);
1726
1727 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1728 WQ_MEM_RECLAIM, 1);
1729 if (!hdev->workqueue) {
1730 error = -ENOMEM;
1731 goto err;
1732 }
1733
1734 error = hci_add_sysfs(hdev);
1735 if (error < 0)
1736 goto err_wqueue;
1737
1738 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1739 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740 hdev);
1741 if (hdev->rfkill) {
1742 if (rfkill_register(hdev->rfkill) < 0) {
1743 rfkill_destroy(hdev->rfkill);
1744 hdev->rfkill = NULL;
1745 }
1746 }
1747
1748 set_bit(HCI_SETUP, &hdev->dev_flags);
1749
1750 if (hdev->dev_type != HCI_AMP)
1751 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1752
1753 schedule_work(&hdev->power_on);
1754
1755 hci_notify(hdev, HCI_DEV_REG);
1756 hci_dev_hold(hdev);
1757
1758 return id;
1759
1760err_wqueue:
1761 destroy_workqueue(hdev->workqueue);
1762err:
1763 ida_simple_remove(&hci_index_ida, hdev->id);
1764 write_lock(&hci_dev_list_lock);
1765 list_del(&hdev->list);
1766 write_unlock(&hci_dev_list_lock);
1767
1768 return error;
1769}
1770EXPORT_SYMBOL(hci_register_dev);
1771
1772
1773void hci_unregister_dev(struct hci_dev *hdev)
1774{
1775 int i, id;
1776
1777 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1778
1779 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1780
1781 id = hdev->id;
1782
1783 write_lock(&hci_dev_list_lock);
1784 list_del(&hdev->list);
1785 write_unlock(&hci_dev_list_lock);
1786
1787 hci_dev_do_close(hdev);
1788
1789 for (i = 0; i < NUM_REASSEMBLY; i++)
1790 kfree_skb(hdev->reassembly[i]);
1791
1792 if (!test_bit(HCI_INIT, &hdev->flags) &&
1793 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1794 hci_dev_lock(hdev);
1795 mgmt_index_removed(hdev);
1796 hci_dev_unlock(hdev);
1797 }
1798
1799
1800
1801 BUG_ON(!list_empty(&hdev->mgmt_pending));
1802
1803 hci_notify(hdev, HCI_DEV_UNREG);
1804
1805 if (hdev->rfkill) {
1806 rfkill_unregister(hdev->rfkill);
1807 rfkill_destroy(hdev->rfkill);
1808 }
1809
1810 hci_del_sysfs(hdev);
1811
1812 destroy_workqueue(hdev->workqueue);
1813
1814 hci_dev_lock(hdev);
1815 hci_blacklist_clear(hdev);
1816 hci_uuids_clear(hdev);
1817 hci_link_keys_clear(hdev);
1818 hci_smp_ltks_clear(hdev);
1819 hci_remote_oob_data_clear(hdev);
1820 hci_dev_unlock(hdev);
1821
1822 hci_dev_put(hdev);
1823
1824 ida_simple_remove(&hci_index_ida, id);
1825}
1826EXPORT_SYMBOL(hci_unregister_dev);
1827
1828
1829int hci_suspend_dev(struct hci_dev *hdev)
1830{
1831 hci_notify(hdev, HCI_DEV_SUSPEND);
1832 return 0;
1833}
1834EXPORT_SYMBOL(hci_suspend_dev);
1835
1836
1837int hci_resume_dev(struct hci_dev *hdev)
1838{
1839 hci_notify(hdev, HCI_DEV_RESUME);
1840 return 0;
1841}
1842EXPORT_SYMBOL(hci_resume_dev);
1843
1844
1845int hci_recv_frame(struct sk_buff *skb)
1846{
1847 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1848 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1849 && !test_bit(HCI_INIT, &hdev->flags))) {
1850 kfree_skb(skb);
1851 return -ENXIO;
1852 }
1853
1854
1855 bt_cb(skb)->incoming = 1;
1856
1857
1858 __net_timestamp(skb);
1859
1860 skb_queue_tail(&hdev->rx_q, skb);
1861 queue_work(hdev->workqueue, &hdev->rx_work);
1862
1863 return 0;
1864}
1865EXPORT_SYMBOL(hci_recv_frame);
1866
1867static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1868 int count, __u8 index)
1869{
1870 int len = 0;
1871 int hlen = 0;
1872 int remain = count;
1873 struct sk_buff *skb;
1874 struct bt_skb_cb *scb;
1875
1876 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1877 index >= NUM_REASSEMBLY)
1878 return -EILSEQ;
1879
1880 skb = hdev->reassembly[index];
1881
1882 if (!skb) {
1883 switch (type) {
1884 case HCI_ACLDATA_PKT:
1885 len = HCI_MAX_FRAME_SIZE;
1886 hlen = HCI_ACL_HDR_SIZE;
1887 break;
1888 case HCI_EVENT_PKT:
1889 len = HCI_MAX_EVENT_SIZE;
1890 hlen = HCI_EVENT_HDR_SIZE;
1891 break;
1892 case HCI_SCODATA_PKT:
1893 len = HCI_MAX_SCO_SIZE;
1894 hlen = HCI_SCO_HDR_SIZE;
1895 break;
1896 }
1897
1898 skb = bt_skb_alloc(len, GFP_ATOMIC);
1899 if (!skb)
1900 return -ENOMEM;
1901
1902 scb = (void *) skb->cb;
1903 scb->expect = hlen;
1904 scb->pkt_type = type;
1905
1906 skb->dev = (void *) hdev;
1907 hdev->reassembly[index] = skb;
1908 }
1909
1910 while (count) {
1911 scb = (void *) skb->cb;
1912 len = min_t(uint, scb->expect, count);
1913
1914 memcpy(skb_put(skb, len), data, len);
1915
1916 count -= len;
1917 data += len;
1918 scb->expect -= len;
1919 remain = count;
1920
1921 switch (type) {
1922 case HCI_EVENT_PKT:
1923 if (skb->len == HCI_EVENT_HDR_SIZE) {
1924 struct hci_event_hdr *h = hci_event_hdr(skb);
1925 scb->expect = h->plen;
1926
1927 if (skb_tailroom(skb) < scb->expect) {
1928 kfree_skb(skb);
1929 hdev->reassembly[index] = NULL;
1930 return -ENOMEM;
1931 }
1932 }
1933 break;
1934
1935 case HCI_ACLDATA_PKT:
1936 if (skb->len == HCI_ACL_HDR_SIZE) {
1937 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1938 scb->expect = __le16_to_cpu(h->dlen);
1939
1940 if (skb_tailroom(skb) < scb->expect) {
1941 kfree_skb(skb);
1942 hdev->reassembly[index] = NULL;
1943 return -ENOMEM;
1944 }
1945 }
1946 break;
1947
1948 case HCI_SCODATA_PKT:
1949 if (skb->len == HCI_SCO_HDR_SIZE) {
1950 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1951 scb->expect = h->dlen;
1952
1953 if (skb_tailroom(skb) < scb->expect) {
1954 kfree_skb(skb);
1955 hdev->reassembly[index] = NULL;
1956 return -ENOMEM;
1957 }
1958 }
1959 break;
1960 }
1961
1962 if (scb->expect == 0) {
1963
1964
1965 bt_cb(skb)->pkt_type = type;
1966 hci_recv_frame(skb);
1967
1968 hdev->reassembly[index] = NULL;
1969 return remain;
1970 }
1971 }
1972
1973 return remain;
1974}
1975
1976int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1977{
1978 int rem = 0;
1979
1980 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1981 return -EILSEQ;
1982
1983 while (count) {
1984 rem = hci_reassembly(hdev, type, data, count, type - 1);
1985 if (rem < 0)
1986 return rem;
1987
1988 data += (count - rem);
1989 count = rem;
1990 }
1991
1992 return rem;
1993}
1994EXPORT_SYMBOL(hci_recv_fragment);
1995
1996#define STREAM_REASSEMBLY 0
1997
1998int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1999{
2000 int type;
2001 int rem = 0;
2002
2003 while (count) {
2004 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2005
2006 if (!skb) {
2007 struct { char type; } *pkt;
2008
2009
2010 pkt = data;
2011 type = pkt->type;
2012
2013 data++;
2014 count--;
2015 } else
2016 type = bt_cb(skb)->pkt_type;
2017
2018 rem = hci_reassembly(hdev, type, data, count,
2019 STREAM_REASSEMBLY);
2020 if (rem < 0)
2021 return rem;
2022
2023 data += (count - rem);
2024 count = rem;
2025 }
2026
2027 return rem;
2028}
2029EXPORT_SYMBOL(hci_recv_stream_fragment);
2030
2031
2032
2033int hci_register_cb(struct hci_cb *cb)
2034{
2035 BT_DBG("%p name %s", cb, cb->name);
2036
2037 write_lock(&hci_cb_list_lock);
2038 list_add(&cb->list, &hci_cb_list);
2039 write_unlock(&hci_cb_list_lock);
2040
2041 return 0;
2042}
2043EXPORT_SYMBOL(hci_register_cb);
2044
2045int hci_unregister_cb(struct hci_cb *cb)
2046{
2047 BT_DBG("%p name %s", cb, cb->name);
2048
2049 write_lock(&hci_cb_list_lock);
2050 list_del(&cb->list);
2051 write_unlock(&hci_cb_list_lock);
2052
2053 return 0;
2054}
2055EXPORT_SYMBOL(hci_unregister_cb);
2056
2057static int hci_send_frame(struct sk_buff *skb)
2058{
2059 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2060
2061 if (!hdev) {
2062 kfree_skb(skb);
2063 return -ENODEV;
2064 }
2065
2066 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2067
2068
2069 __net_timestamp(skb);
2070
2071
2072 hci_send_to_monitor(hdev, skb);
2073
2074 if (atomic_read(&hdev->promisc)) {
2075
2076 hci_send_to_sock(hdev, skb);
2077 }
2078
2079
2080 skb_orphan(skb);
2081
2082 return hdev->send(skb);
2083}
2084
2085
2086int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2087{
2088 int len = HCI_COMMAND_HDR_SIZE + plen;
2089 struct hci_command_hdr *hdr;
2090 struct sk_buff *skb;
2091
2092 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2093
2094 skb = bt_skb_alloc(len, GFP_ATOMIC);
2095 if (!skb) {
2096 BT_ERR("%s no memory for command", hdev->name);
2097 return -ENOMEM;
2098 }
2099
2100 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2101 hdr->opcode = cpu_to_le16(opcode);
2102 hdr->plen = plen;
2103
2104 if (plen)
2105 memcpy(skb_put(skb, plen), param, plen);
2106
2107 BT_DBG("skb len %d", skb->len);
2108
2109 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2110 skb->dev = (void *) hdev;
2111
2112 if (test_bit(HCI_INIT, &hdev->flags))
2113 hdev->init_last_cmd = opcode;
2114
2115 skb_queue_tail(&hdev->cmd_q, skb);
2116 queue_work(hdev->workqueue, &hdev->cmd_work);
2117
2118 return 0;
2119}
2120
2121
2122void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2123{
2124 struct hci_command_hdr *hdr;
2125
2126 if (!hdev->sent_cmd)
2127 return NULL;
2128
2129 hdr = (void *) hdev->sent_cmd->data;
2130
2131 if (hdr->opcode != cpu_to_le16(opcode))
2132 return NULL;
2133
2134 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2135
2136 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2137}
2138
2139
2140static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2141{
2142 struct hci_acl_hdr *hdr;
2143 int len = skb->len;
2144
2145 skb_push(skb, HCI_ACL_HDR_SIZE);
2146 skb_reset_transport_header(skb);
2147 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2148 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2149 hdr->dlen = cpu_to_le16(len);
2150}
2151
2152static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2153 struct sk_buff *skb, __u16 flags)
2154{
2155 struct hci_dev *hdev = conn->hdev;
2156 struct sk_buff *list;
2157
2158 skb->len = skb_headlen(skb);
2159 skb->data_len = 0;
2160
2161 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2162 hci_add_acl_hdr(skb, conn->handle, flags);
2163
2164 list = skb_shinfo(skb)->frag_list;
2165 if (!list) {
2166
2167 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2168
2169 skb_queue_tail(queue, skb);
2170 } else {
2171
2172 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2173
2174 skb_shinfo(skb)->frag_list = NULL;
2175
2176
2177 spin_lock(&queue->lock);
2178
2179 __skb_queue_tail(queue, skb);
2180
2181 flags &= ~ACL_START;
2182 flags |= ACL_CONT;
2183 do {
2184 skb = list; list = list->next;
2185
2186 skb->dev = (void *) hdev;
2187 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2188 hci_add_acl_hdr(skb, conn->handle, flags);
2189
2190 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2191
2192 __skb_queue_tail(queue, skb);
2193 } while (list);
2194
2195 spin_unlock(&queue->lock);
2196 }
2197}
2198
2199void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2200{
2201 struct hci_conn *conn = chan->conn;
2202 struct hci_dev *hdev = conn->hdev;
2203
2204 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2205
2206 skb->dev = (void *) hdev;
2207
2208 hci_queue_acl(conn, &chan->data_q, skb, flags);
2209
2210 queue_work(hdev->workqueue, &hdev->tx_work);
2211}
2212
2213
2214void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2215{
2216 struct hci_dev *hdev = conn->hdev;
2217 struct hci_sco_hdr hdr;
2218
2219 BT_DBG("%s len %d", hdev->name, skb->len);
2220
2221 hdr.handle = cpu_to_le16(conn->handle);
2222 hdr.dlen = skb->len;
2223
2224 skb_push(skb, HCI_SCO_HDR_SIZE);
2225 skb_reset_transport_header(skb);
2226 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2227
2228 skb->dev = (void *) hdev;
2229 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2230
2231 skb_queue_tail(&conn->data_q, skb);
2232 queue_work(hdev->workqueue, &hdev->tx_work);
2233}
2234
2235
2236
2237
2238static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2239 int *quote)
2240{
2241 struct hci_conn_hash *h = &hdev->conn_hash;
2242 struct hci_conn *conn = NULL, *c;
2243 unsigned int num = 0, min = ~0;
2244
2245
2246
2247
2248 rcu_read_lock();
2249
2250 list_for_each_entry_rcu(c, &h->list, list) {
2251 if (c->type != type || skb_queue_empty(&c->data_q))
2252 continue;
2253
2254 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2255 continue;
2256
2257 num++;
2258
2259 if (c->sent < min) {
2260 min = c->sent;
2261 conn = c;
2262 }
2263
2264 if (hci_conn_num(hdev, type) == num)
2265 break;
2266 }
2267
2268 rcu_read_unlock();
2269
2270 if (conn) {
2271 int cnt, q;
2272
2273 switch (conn->type) {
2274 case ACL_LINK:
2275 cnt = hdev->acl_cnt;
2276 break;
2277 case SCO_LINK:
2278 case ESCO_LINK:
2279 cnt = hdev->sco_cnt;
2280 break;
2281 case LE_LINK:
2282 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2283 break;
2284 default:
2285 cnt = 0;
2286 BT_ERR("Unknown link type");
2287 }
2288
2289 q = cnt / num;
2290 *quote = q ? q : 1;
2291 } else
2292 *quote = 0;
2293
2294 BT_DBG("conn %p quote %d", conn, *quote);
2295 return conn;
2296}
2297
2298static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2299{
2300 struct hci_conn_hash *h = &hdev->conn_hash;
2301 struct hci_conn *c;
2302
2303 BT_ERR("%s link tx timeout", hdev->name);
2304
2305 rcu_read_lock();
2306
2307
2308 list_for_each_entry_rcu(c, &h->list, list) {
2309 if (c->type == type && c->sent) {
2310 BT_ERR("%s killing stalled connection %s",
2311 hdev->name, batostr(&c->dst));
2312 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2313 }
2314 }
2315
2316 rcu_read_unlock();
2317}
2318
2319static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2320 int *quote)
2321{
2322 struct hci_conn_hash *h = &hdev->conn_hash;
2323 struct hci_chan *chan = NULL;
2324 unsigned int num = 0, min = ~0, cur_prio = 0;
2325 struct hci_conn *conn;
2326 int cnt, q, conn_num = 0;
2327
2328 BT_DBG("%s", hdev->name);
2329
2330 rcu_read_lock();
2331
2332 list_for_each_entry_rcu(conn, &h->list, list) {
2333 struct hci_chan *tmp;
2334
2335 if (conn->type != type)
2336 continue;
2337
2338 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2339 continue;
2340
2341 conn_num++;
2342
2343 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2344 struct sk_buff *skb;
2345
2346 if (skb_queue_empty(&tmp->data_q))
2347 continue;
2348
2349 skb = skb_peek(&tmp->data_q);
2350 if (skb->priority < cur_prio)
2351 continue;
2352
2353 if (skb->priority > cur_prio) {
2354 num = 0;
2355 min = ~0;
2356 cur_prio = skb->priority;
2357 }
2358
2359 num++;
2360
2361 if (conn->sent < min) {
2362 min = conn->sent;
2363 chan = tmp;
2364 }
2365 }
2366
2367 if (hci_conn_num(hdev, type) == conn_num)
2368 break;
2369 }
2370
2371 rcu_read_unlock();
2372
2373 if (!chan)
2374 return NULL;
2375
2376 switch (chan->conn->type) {
2377 case ACL_LINK:
2378 cnt = hdev->acl_cnt;
2379 break;
2380 case SCO_LINK:
2381 case ESCO_LINK:
2382 cnt = hdev->sco_cnt;
2383 break;
2384 case LE_LINK:
2385 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2386 break;
2387 default:
2388 cnt = 0;
2389 BT_ERR("Unknown link type");
2390 }
2391
2392 q = cnt / num;
2393 *quote = q ? q : 1;
2394 BT_DBG("chan %p quote %d", chan, *quote);
2395 return chan;
2396}
2397
2398static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2399{
2400 struct hci_conn_hash *h = &hdev->conn_hash;
2401 struct hci_conn *conn;
2402 int num = 0;
2403
2404 BT_DBG("%s", hdev->name);
2405
2406 rcu_read_lock();
2407
2408 list_for_each_entry_rcu(conn, &h->list, list) {
2409 struct hci_chan *chan;
2410
2411 if (conn->type != type)
2412 continue;
2413
2414 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2415 continue;
2416
2417 num++;
2418
2419 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2420 struct sk_buff *skb;
2421
2422 if (chan->sent) {
2423 chan->sent = 0;
2424 continue;
2425 }
2426
2427 if (skb_queue_empty(&chan->data_q))
2428 continue;
2429
2430 skb = skb_peek(&chan->data_q);
2431 if (skb->priority >= HCI_PRIO_MAX - 1)
2432 continue;
2433
2434 skb->priority = HCI_PRIO_MAX - 1;
2435
2436 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2437 skb->priority);
2438 }
2439
2440 if (hci_conn_num(hdev, type) == num)
2441 break;
2442 }
2443
2444 rcu_read_unlock();
2445
2446}
2447
2448static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2449{
2450
2451 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2452}
2453
2454static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2455{
2456 if (!test_bit(HCI_RAW, &hdev->flags)) {
2457
2458
2459 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2460 HCI_ACL_TX_TIMEOUT))
2461 hci_link_tx_to(hdev, ACL_LINK);
2462 }
2463}
2464
2465static void hci_sched_acl_pkt(struct hci_dev *hdev)
2466{
2467 unsigned int cnt = hdev->acl_cnt;
2468 struct hci_chan *chan;
2469 struct sk_buff *skb;
2470 int quote;
2471
2472 __check_timeout(hdev, cnt);
2473
2474 while (hdev->acl_cnt &&
2475 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2476 u32 priority = (skb_peek(&chan->data_q))->priority;
2477 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2478 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2479 skb->len, skb->priority);
2480
2481
2482 if (skb->priority < priority)
2483 break;
2484
2485 skb = skb_dequeue(&chan->data_q);
2486
2487 hci_conn_enter_active_mode(chan->conn,
2488 bt_cb(skb)->force_active);
2489
2490 hci_send_frame(skb);
2491 hdev->acl_last_tx = jiffies;
2492
2493 hdev->acl_cnt--;
2494 chan->sent++;
2495 chan->conn->sent++;
2496 }
2497 }
2498
2499 if (cnt != hdev->acl_cnt)
2500 hci_prio_recalculate(hdev, ACL_LINK);
2501}
2502
2503static void hci_sched_acl_blk(struct hci_dev *hdev)
2504{
2505 unsigned int cnt = hdev->block_cnt;
2506 struct hci_chan *chan;
2507 struct sk_buff *skb;
2508 int quote;
2509
2510 __check_timeout(hdev, cnt);
2511
2512 while (hdev->block_cnt > 0 &&
2513 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2514 u32 priority = (skb_peek(&chan->data_q))->priority;
2515 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2516 int blocks;
2517
2518 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2519 skb->len, skb->priority);
2520
2521
2522 if (skb->priority < priority)
2523 break;
2524
2525 skb = skb_dequeue(&chan->data_q);
2526
2527 blocks = __get_blocks(hdev, skb);
2528 if (blocks > hdev->block_cnt)
2529 return;
2530
2531 hci_conn_enter_active_mode(chan->conn,
2532 bt_cb(skb)->force_active);
2533
2534 hci_send_frame(skb);
2535 hdev->acl_last_tx = jiffies;
2536
2537 hdev->block_cnt -= blocks;
2538 quote -= blocks;
2539
2540 chan->sent += blocks;
2541 chan->conn->sent += blocks;
2542 }
2543 }
2544
2545 if (cnt != hdev->block_cnt)
2546 hci_prio_recalculate(hdev, ACL_LINK);
2547}
2548
2549static void hci_sched_acl(struct hci_dev *hdev)
2550{
2551 BT_DBG("%s", hdev->name);
2552
2553 if (!hci_conn_num(hdev, ACL_LINK))
2554 return;
2555
2556 switch (hdev->flow_ctl_mode) {
2557 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2558 hci_sched_acl_pkt(hdev);
2559 break;
2560
2561 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2562 hci_sched_acl_blk(hdev);
2563 break;
2564 }
2565}
2566
2567
2568static void hci_sched_sco(struct hci_dev *hdev)
2569{
2570 struct hci_conn *conn;
2571 struct sk_buff *skb;
2572 int quote;
2573
2574 BT_DBG("%s", hdev->name);
2575
2576 if (!hci_conn_num(hdev, SCO_LINK))
2577 return;
2578
2579 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2580 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2581 BT_DBG("skb %p len %d", skb, skb->len);
2582 hci_send_frame(skb);
2583
2584 conn->sent++;
2585 if (conn->sent == ~0)
2586 conn->sent = 0;
2587 }
2588 }
2589}
2590
2591static void hci_sched_esco(struct hci_dev *hdev)
2592{
2593 struct hci_conn *conn;
2594 struct sk_buff *skb;
2595 int quote;
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (!hci_conn_num(hdev, ESCO_LINK))
2600 return;
2601
2602 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2603 "e))) {
2604 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2605 BT_DBG("skb %p len %d", skb, skb->len);
2606 hci_send_frame(skb);
2607
2608 conn->sent++;
2609 if (conn->sent == ~0)
2610 conn->sent = 0;
2611 }
2612 }
2613}
2614
2615static void hci_sched_le(struct hci_dev *hdev)
2616{
2617 struct hci_chan *chan;
2618 struct sk_buff *skb;
2619 int quote, cnt, tmp;
2620
2621 BT_DBG("%s", hdev->name);
2622
2623 if (!hci_conn_num(hdev, LE_LINK))
2624 return;
2625
2626 if (!test_bit(HCI_RAW, &hdev->flags)) {
2627
2628
2629 if (!hdev->le_cnt && hdev->le_pkts &&
2630 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2631 hci_link_tx_to(hdev, LE_LINK);
2632 }
2633
2634 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2635 tmp = cnt;
2636 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2637 u32 priority = (skb_peek(&chan->data_q))->priority;
2638 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2639 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2640 skb->len, skb->priority);
2641
2642
2643 if (skb->priority < priority)
2644 break;
2645
2646 skb = skb_dequeue(&chan->data_q);
2647
2648 hci_send_frame(skb);
2649 hdev->le_last_tx = jiffies;
2650
2651 cnt--;
2652 chan->sent++;
2653 chan->conn->sent++;
2654 }
2655 }
2656
2657 if (hdev->le_pkts)
2658 hdev->le_cnt = cnt;
2659 else
2660 hdev->acl_cnt = cnt;
2661
2662 if (cnt != tmp)
2663 hci_prio_recalculate(hdev, LE_LINK);
2664}
2665
2666static void hci_tx_work(struct work_struct *work)
2667{
2668 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2669 struct sk_buff *skb;
2670
2671 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2672 hdev->sco_cnt, hdev->le_cnt);
2673
2674
2675
2676 hci_sched_acl(hdev);
2677
2678 hci_sched_sco(hdev);
2679
2680 hci_sched_esco(hdev);
2681
2682 hci_sched_le(hdev);
2683
2684
2685 while ((skb = skb_dequeue(&hdev->raw_q)))
2686 hci_send_frame(skb);
2687}
2688
2689
2690
2691
2692static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2693{
2694 struct hci_acl_hdr *hdr = (void *) skb->data;
2695 struct hci_conn *conn;
2696 __u16 handle, flags;
2697
2698 skb_pull(skb, HCI_ACL_HDR_SIZE);
2699
2700 handle = __le16_to_cpu(hdr->handle);
2701 flags = hci_flags(handle);
2702 handle = hci_handle(handle);
2703
2704 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2705 handle, flags);
2706
2707 hdev->stat.acl_rx++;
2708
2709 hci_dev_lock(hdev);
2710 conn = hci_conn_hash_lookup_handle(hdev, handle);
2711 hci_dev_unlock(hdev);
2712
2713 if (conn) {
2714 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2715
2716 hci_dev_lock(hdev);
2717 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2718 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2719 mgmt_device_connected(hdev, &conn->dst, conn->type,
2720 conn->dst_type, 0, NULL, 0,
2721 conn->dev_class);
2722 hci_dev_unlock(hdev);
2723
2724
2725 l2cap_recv_acldata(conn, skb, flags);
2726 return;
2727 } else {
2728 BT_ERR("%s ACL packet for unknown connection handle %d",
2729 hdev->name, handle);
2730 }
2731
2732 kfree_skb(skb);
2733}
2734
2735
2736static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2737{
2738 struct hci_sco_hdr *hdr = (void *) skb->data;
2739 struct hci_conn *conn;
2740 __u16 handle;
2741
2742 skb_pull(skb, HCI_SCO_HDR_SIZE);
2743
2744 handle = __le16_to_cpu(hdr->handle);
2745
2746 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2747
2748 hdev->stat.sco_rx++;
2749
2750 hci_dev_lock(hdev);
2751 conn = hci_conn_hash_lookup_handle(hdev, handle);
2752 hci_dev_unlock(hdev);
2753
2754 if (conn) {
2755
2756 sco_recv_scodata(conn, skb);
2757 return;
2758 } else {
2759 BT_ERR("%s SCO packet for unknown connection handle %d",
2760 hdev->name, handle);
2761 }
2762
2763 kfree_skb(skb);
2764}
2765
2766static void hci_rx_work(struct work_struct *work)
2767{
2768 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2769 struct sk_buff *skb;
2770
2771 BT_DBG("%s", hdev->name);
2772
2773 while ((skb = skb_dequeue(&hdev->rx_q))) {
2774
2775 hci_send_to_monitor(hdev, skb);
2776
2777 if (atomic_read(&hdev->promisc)) {
2778
2779 hci_send_to_sock(hdev, skb);
2780 }
2781
2782 if (test_bit(HCI_RAW, &hdev->flags)) {
2783 kfree_skb(skb);
2784 continue;
2785 }
2786
2787 if (test_bit(HCI_INIT, &hdev->flags)) {
2788
2789 switch (bt_cb(skb)->pkt_type) {
2790 case HCI_ACLDATA_PKT:
2791 case HCI_SCODATA_PKT:
2792 kfree_skb(skb);
2793 continue;
2794 }
2795 }
2796
2797
2798 switch (bt_cb(skb)->pkt_type) {
2799 case HCI_EVENT_PKT:
2800 BT_DBG("%s Event packet", hdev->name);
2801 hci_event_packet(hdev, skb);
2802 break;
2803
2804 case HCI_ACLDATA_PKT:
2805 BT_DBG("%s ACL data packet", hdev->name);
2806 hci_acldata_packet(hdev, skb);
2807 break;
2808
2809 case HCI_SCODATA_PKT:
2810 BT_DBG("%s SCO data packet", hdev->name);
2811 hci_scodata_packet(hdev, skb);
2812 break;
2813
2814 default:
2815 kfree_skb(skb);
2816 break;
2817 }
2818 }
2819}
2820
2821static void hci_cmd_work(struct work_struct *work)
2822{
2823 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2824 struct sk_buff *skb;
2825
2826 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2827 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2828
2829
2830 if (atomic_read(&hdev->cmd_cnt)) {
2831 skb = skb_dequeue(&hdev->cmd_q);
2832 if (!skb)
2833 return;
2834
2835 kfree_skb(hdev->sent_cmd);
2836
2837 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2838 if (hdev->sent_cmd) {
2839 atomic_dec(&hdev->cmd_cnt);
2840 hci_send_frame(skb);
2841 if (test_bit(HCI_RESET, &hdev->flags))
2842 del_timer(&hdev->cmd_timer);
2843 else
2844 mod_timer(&hdev->cmd_timer,
2845 jiffies + HCI_CMD_TIMEOUT);
2846 } else {
2847 skb_queue_head(&hdev->cmd_q, skb);
2848 queue_work(hdev->workqueue, &hdev->cmd_work);
2849 }
2850 }
2851}
2852
2853int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2854{
2855
2856 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2857 struct hci_cp_inquiry cp;
2858
2859 BT_DBG("%s", hdev->name);
2860
2861 if (test_bit(HCI_INQUIRY, &hdev->flags))
2862 return -EINPROGRESS;
2863
2864 inquiry_cache_flush(hdev);
2865
2866 memset(&cp, 0, sizeof(cp));
2867 memcpy(&cp.lap, lap, sizeof(cp.lap));
2868 cp.length = length;
2869
2870 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2871}
2872
2873int hci_cancel_inquiry(struct hci_dev *hdev)
2874{
2875 BT_DBG("%s", hdev->name);
2876
2877 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2878 return -EALREADY;
2879
2880 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2881}
2882
2883u8 bdaddr_to_le(u8 bdaddr_type)
2884{
2885 switch (bdaddr_type) {
2886 case BDADDR_LE_PUBLIC:
2887 return ADDR_LE_DEV_PUBLIC;
2888
2889 default:
2890
2891 return ADDR_LE_DEV_RANDOM;
2892 }
2893}
2894