1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/property.h>
33#include <linux/suspend.h>
34#include <linux/wait.h>
35#include <asm/unaligned.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39#include <net/bluetooth/l2cap.h>
40#include <net/bluetooth/mgmt.h>
41
42#include "hci_request.h"
43#include "hci_debugfs.h"
44#include "smp.h"
45#include "leds.h"
46#include "msft.h"
47#include "aosp.h"
48
49static void hci_rx_work(struct work_struct *work);
50static void hci_cmd_work(struct work_struct *work);
51static void hci_tx_work(struct work_struct *work);
52
53
54LIST_HEAD(hci_dev_list);
55DEFINE_RWLOCK(hci_dev_list_lock);
56
57
58LIST_HEAD(hci_cb_list);
59DEFINE_MUTEX(hci_cb_list_lock);
60
61
62static DEFINE_IDA(hci_index_ida);
63
64
65
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 err = kstrtobool_from_user(user_buf, count, &enable);
90 if (err)
91 return err;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 bool enable;
139 int err;
140
141 err = kstrtobool_from_user(user_buf, count, &enable);
142 if (err)
143 return err;
144
145
146
147
148
149
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153 goto done;
154
155 hci_req_sync_lock(hdev);
156 err = hdev->set_diag(hdev, enable);
157 hci_req_sync_unlock(hdev);
158
159 if (err < 0)
160 return err;
161
162done:
163 if (enable)
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 else
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168 return count;
169}
170
171static const struct file_operations vendor_diag_fops = {
172 .open = simple_open,
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
176};
177
178static void hci_debugfs_create_basic(struct hci_dev *hdev)
179{
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 &dut_mode_fops);
182
183 if (hdev->set_diag)
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 &vendor_diag_fops);
186}
187
188static int hci_reset_req(struct hci_request *req, unsigned long opt)
189{
190 BT_DBG("%s %ld", req->hdev->name, opt);
191
192
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
195 return 0;
196}
197
198static void bredr_init(struct hci_request *req)
199{
200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202
203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205
206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208
209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210}
211
212static void amp_init1(struct hci_request *req)
213{
214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216
217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222
223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225
226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233}
234
235static int amp_init2(struct hci_request *req)
236{
237
238
239
240
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244 return 0;
245}
246
247static int hci_init1_req(struct hci_request *req, unsigned long opt)
248{
249 struct hci_dev *hdev = req->hdev;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 hci_reset_req(req, 0);
256
257 switch (hdev->dev_type) {
258 case HCI_PRIMARY:
259 bredr_init(req);
260 break;
261 case HCI_AMP:
262 amp_init1(req);
263 break;
264 default:
265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269 return 0;
270}
271
272static void bredr_setup(struct hci_request *req)
273{
274 __le16 param;
275 __u8 flt_type;
276
277
278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280
281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283
284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286
287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295
296 flt_type = HCI_FLT_CLEAR_ALL;
297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299
300 param = cpu_to_le16(0x7d00);
301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
302}
303
304static void le_setup(struct hci_request *req)
305{
306 struct hci_dev *hdev = req->hdev;
307
308
309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311
312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317
318 if (!lmp_bredr_capable(hdev))
319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320}
321
322static void hci_setup_event_mask(struct hci_request *req)
323{
324 struct hci_dev *hdev = req->hdev;
325
326
327
328
329
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332
333
334
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 return;
337
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01;
340 } else {
341
342 memset(events, 0, sizeof(events));
343 events[1] |= 0x20;
344 events[1] |= 0x40;
345 events[1] |= 0x80;
346
347
348
349
350
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10;
353 events[2] |= 0x04;
354 events[3] |= 0x02;
355 }
356
357
358
359
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08;
362
363
364
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80;
367 events[5] |= 0x80;
368 }
369 }
370
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 events[4] |= 0x02;
374
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04;
377
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08;
380 events[5] |= 0x10;
381 }
382
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20;
385
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80;
388
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40;
391
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01;
394
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80;
397
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01;
400 events[6] |= 0x02;
401 events[6] |= 0x04;
402 events[6] |= 0x08;
403 events[6] |= 0x10;
404 events[6] |= 0x20;
405 events[7] |= 0x04;
406 events[7] |= 0x08;
407 events[7] |= 0x10;
408
409
410 }
411
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20;
414
415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416}
417
418static int hci_init2_req(struct hci_request *req, unsigned long opt)
419{
420 struct hci_dev *hdev = req->hdev;
421
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
424
425 if (lmp_bredr_capable(hdev))
426 bredr_setup(req);
427 else
428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430 if (lmp_le_capable(hdev))
431 le_setup(req);
432
433
434
435
436
437
438
439
440
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445 if (lmp_ssp_capable(hdev)) {
446
447
448
449
450
451
452 hdev->max_page = 0x01;
453
454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455 u8 mode = 0x01;
456
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
459 } else {
460 struct hci_cp_write_eir cp;
461
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
464
465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466 }
467 }
468
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471 u8 mode;
472
473
474
475
476
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 }
481
482 if (lmp_inq_tx_pwr_capable(hdev))
483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
487
488 cp.page = 0x01;
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 sizeof(cp), &cp);
491 }
492
493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494 u8 enable = 1;
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 &enable);
497 }
498
499 return 0;
500}
501
502static void hci_setup_link_policy(struct hci_request *req)
503{
504 struct hci_dev *hdev = req->hdev;
505 struct hci_cp_write_def_link_policy cp;
506 u16 link_policy = 0;
507
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
516
517 cp.policy = cpu_to_le16(link_policy);
518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519}
520
521static void hci_set_le_support(struct hci_request *req)
522{
523 struct hci_dev *hdev = req->hdev;
524 struct hci_cp_write_le_host_supported cp;
525
526
527 if (!lmp_bredr_capable(hdev))
528 return;
529
530 memset(&cp, 0, sizeof(cp));
531
532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533 cp.le = 0x01;
534 cp.simul = 0x00;
535 }
536
537 if (cp.le != lmp_host_le_capable(hdev))
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 &cp);
540}
541
542static void hci_set_event_mask_page_2(struct hci_request *req)
543{
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 bool changed = false;
547
548
549
550
551 if (lmp_cpb_central_capable(hdev)) {
552 events[1] |= 0x40;
553 events[1] |= 0x80;
554 events[2] |= 0x10;
555 events[2] |= 0x20;
556 changed = true;
557 }
558
559
560
561
562 if (lmp_cpb_peripheral_capable(hdev)) {
563 events[2] |= 0x01;
564 events[2] |= 0x02;
565 events[2] |= 0x04;
566 events[2] |= 0x08;
567 changed = true;
568 }
569
570
571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572 events[2] |= 0x80;
573 changed = true;
574 }
575
576
577
578
579
580
581
582 if (changed)
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
585}
586
587static int hci_init3_req(struct hci_request *req, unsigned long opt)
588{
589 struct hci_dev *hdev = req->hdev;
590 u8 p;
591
592 hci_setup_event_mask(req);
593
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 struct hci_cp_read_stored_link_key cp;
597
598 bacpy(&cp.bdaddr, BDADDR_ANY);
599 cp.read_all = 0x01;
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 }
602
603 if (hdev->commands[5] & 0x10)
604 hci_setup_link_policy(req);
605
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613
614
615
616
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620 if (lmp_le_capable(hdev)) {
621 u8 events[8];
622
623 memset(events, 0, sizeof(events));
624
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10;
627
628
629
630
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20;
633
634
635
636
637
638
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40;
641
642
643
644
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02;
647
648
649
650
651
652
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04;
655
656
657
658
659
660
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08;
663
664
665
666
667
668
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02;
671
672
673
674
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01;
677
678
679
680
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04;
683
684
685
686
687
688
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08;
691
692
693
694
695
696
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80;
699
700
701
702
703
704
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01;
707
708
709
710
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08;
713
714
715
716
717
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10;
720
721
722
723
724
725
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02;
728
729
730
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732 events);
733
734
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736
737
738
739
740
741
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743 }
744
745 if (hdev->commands[38] & 0x80) {
746
747 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748 0, NULL);
749 }
750
751 if (hdev->commands[26] & 0x40) {
752
753 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
754 0, NULL);
755 }
756
757 if (hdev->commands[26] & 0x80) {
758
759 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
760 }
761
762 if (hdev->commands[34] & 0x40) {
763
764 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765 0, NULL);
766 }
767
768 if (hdev->commands[34] & 0x20) {
769
770 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771 }
772
773 if (hdev->commands[35] & 0x04) {
774 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775
776
777 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778 &rpa_timeout);
779 }
780
781 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782
783 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784
785
786 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787 }
788
789 if (ext_adv_capable(hdev)) {
790
791 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792 0, NULL);
793 }
794
795 hci_set_le_support(req);
796 }
797
798
799 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800 struct hci_cp_read_local_ext_features cp;
801
802 cp.page = p;
803 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804 sizeof(cp), &cp);
805 }
806
807 return 0;
808}
809
810static int hci_init4_req(struct hci_request *req, unsigned long opt)
811{
812 struct hci_dev *hdev = req->hdev;
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827 if (hdev->commands[6] & 0x80 &&
828 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829 struct hci_cp_delete_stored_link_key cp;
830
831 bacpy(&cp.bdaddr, BDADDR_ANY);
832 cp.delete_all = 0x01;
833 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834 sizeof(cp), &cp);
835 }
836
837
838 if (hdev->commands[22] & 0x04)
839 hci_set_event_mask_page_2(req);
840
841
842 if (hdev->commands[29] & 0x20)
843 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844
845
846 if (hdev->commands[41] & 0x08)
847 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848
849
850 if (hdev->commands[30] & 0x08)
851 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852
853
854 if (lmp_sync_train_capable(hdev))
855 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
856
857
858 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
859 bredr_sc_enabled(hdev)) {
860 u8 support = 0x01;
861
862 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863 sizeof(support), &support);
864 }
865
866
867
868
869 if (hdev->commands[18] & 0x08 &&
870 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
871 bool enabled = hci_dev_test_flag(hdev,
872 HCI_WIDEBAND_SPEECH_ENABLED);
873
874 if (enabled !=
875 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876 struct hci_cp_write_def_err_data_reporting cp;
877
878 cp.err_data_reporting = enabled ?
879 ERR_DATA_REPORTING_ENABLED :
880 ERR_DATA_REPORTING_DISABLED;
881
882 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883 sizeof(cp), &cp);
884 }
885 }
886
887
888 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889 struct hci_cp_le_write_def_data_len cp;
890
891 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
893 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894 }
895
896
897 if (hdev->commands[35] & 0x20) {
898 struct hci_cp_le_set_default_phy cp;
899
900 cp.all_phys = 0x00;
901 cp.tx_phys = hdev->le_tx_def_phys;
902 cp.rx_phys = hdev->le_rx_def_phys;
903
904 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905 }
906
907 return 0;
908}
909
910static int __hci_init(struct hci_dev *hdev)
911{
912 int err;
913
914 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
915 if (err < 0)
916 return err;
917
918 if (hci_dev_test_flag(hdev, HCI_SETUP))
919 hci_debugfs_create_basic(hdev);
920
921 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
922 if (err < 0)
923 return err;
924
925
926
927
928
929 if (hdev->dev_type != HCI_PRIMARY)
930 return 0;
931
932 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
933 if (err < 0)
934 return err;
935
936 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
937 if (err < 0)
938 return err;
939
940
941
942
943
944
945
946
947
948
949
950
951
952 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953 !hci_dev_test_flag(hdev, HCI_CONFIG))
954 return 0;
955
956 hci_debugfs_create_common(hdev);
957
958 if (lmp_bredr_capable(hdev))
959 hci_debugfs_create_bredr(hdev);
960
961 if (lmp_le_capable(hdev))
962 hci_debugfs_create_le(hdev);
963
964 return 0;
965}
966
967static int hci_init0_req(struct hci_request *req, unsigned long opt)
968{
969 struct hci_dev *hdev = req->hdev;
970
971 BT_DBG("%s %ld", hdev->name, opt);
972
973
974 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975 hci_reset_req(req, 0);
976
977
978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979
980
981 if (hdev->set_bdaddr)
982 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
983
984 return 0;
985}
986
987static int __hci_unconf_init(struct hci_dev *hdev)
988{
989 int err;
990
991 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992 return 0;
993
994 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
995 if (err < 0)
996 return err;
997
998 if (hci_dev_test_flag(hdev, HCI_SETUP))
999 hci_debugfs_create_basic(hdev);
1000
1001 return 0;
1002}
1003
1004static int hci_scan_req(struct hci_request *req, unsigned long opt)
1005{
1006 __u8 scan = opt;
1007
1008 BT_DBG("%s %x", req->hdev->name, scan);
1009
1010
1011 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1012 return 0;
1013}
1014
1015static int hci_auth_req(struct hci_request *req, unsigned long opt)
1016{
1017 __u8 auth = opt;
1018
1019 BT_DBG("%s %x", req->hdev->name, auth);
1020
1021
1022 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1023 return 0;
1024}
1025
1026static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027{
1028 __u8 encrypt = opt;
1029
1030 BT_DBG("%s %x", req->hdev->name, encrypt);
1031
1032
1033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1034 return 0;
1035}
1036
1037static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038{
1039 __le16 policy = cpu_to_le16(opt);
1040
1041 BT_DBG("%s %x", req->hdev->name, policy);
1042
1043
1044 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1045 return 0;
1046}
1047
1048
1049
1050struct hci_dev *hci_dev_get(int index)
1051{
1052 struct hci_dev *hdev = NULL, *d;
1053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
1060 list_for_each_entry(d, &hci_dev_list, list) {
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068}
1069
1070
1071
1072bool hci_discovery_active(struct hci_dev *hdev)
1073{
1074 struct discovery_state *discov = &hdev->discovery;
1075
1076 switch (discov->state) {
1077 case DISCOVERY_FINDING:
1078 case DISCOVERY_RESOLVING:
1079 return true;
1080
1081 default:
1082 return false;
1083 }
1084}
1085
1086void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087{
1088 int old_state = hdev->discovery.state;
1089
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
1092 if (old_state == state)
1093 return;
1094
1095 hdev->discovery.state = state;
1096
1097 switch (state) {
1098 case DISCOVERY_STOPPED:
1099 hci_update_background_scan(hdev);
1100
1101 if (old_state != DISCOVERY_STARTING)
1102 mgmt_discovering(hdev, 0);
1103 break;
1104 case DISCOVERY_STARTING:
1105 break;
1106 case DISCOVERY_FINDING:
1107 mgmt_discovering(hdev, 1);
1108 break;
1109 case DISCOVERY_RESOLVING:
1110 break;
1111 case DISCOVERY_STOPPING:
1112 break;
1113 }
1114}
1115
1116void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117{
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct inquiry_entry *p, *n;
1120
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
1123 kfree(p);
1124 }
1125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1128}
1129
1130struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1132{
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *e;
1135
1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144}
1145
1146struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147 bdaddr_t *bdaddr)
1148{
1149 struct discovery_state *cache = &hdev->discovery;
1150 struct inquiry_entry *e;
1151
1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154 list_for_each_entry(e, &cache->unknown, list) {
1155 if (!bacmp(&e->data.bdaddr, bdaddr))
1156 return e;
1157 }
1158
1159 return NULL;
1160}
1161
1162struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163 bdaddr_t *bdaddr,
1164 int state)
1165{
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179}
1180
1181void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182 struct inquiry_entry *ie)
1183{
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
1192 abs(p->data.rssi) >= abs(ie->data.rssi))
1193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198}
1199
1200u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
1202{
1203 struct discovery_state *cache = &hdev->discovery;
1204 struct inquiry_entry *ie;
1205 u32 flags = 0;
1206
1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208
1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213
1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215 if (ie) {
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219 if (ie->name_state == NAME_NEEDED &&
1220 data->rssi != ie->data.rssi) {
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
1225 goto update;
1226 }
1227
1228
1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
1234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
1243
1244update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
1246 ie->name_state != NAME_PENDING) {
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1249 }
1250
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1253 cache->timestamp = jiffies;
1254
1255 if (ie->name_state == NAME_NOT_KNOWN)
1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257
1258done:
1259 return flags;
1260}
1261
1262static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263{
1264 struct discovery_state *cache = &hdev->discovery;
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
1269 list_for_each_entry(e, &cache->all, all) {
1270 struct inquiry_data *data = &e->data;
1271
1272 if (copied >= num)
1273 break;
1274
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
1281
1282 info++;
1283 copied++;
1284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288}
1289
1290static int hci_inq_req(struct hci_request *req, unsigned long opt)
1291{
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293 struct hci_dev *hdev = req->hdev;
1294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
1299 return 0;
1300
1301
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306
1307 return 0;
1308}
1309
1310int hci_inquiry(void __user *arg)
1311{
1312 __u8 __user *ptr = arg;
1313 struct hci_inquiry_req ir;
1314 struct hci_dev *hdev;
1315 int err = 0, do_inquiry = 0, max_rsp;
1316 long timeo;
1317 __u8 *buf;
1318
1319 if (copy_from_user(&ir, ptr, sizeof(ir)))
1320 return -EFAULT;
1321
1322 hdev = hci_dev_get(ir.dev_id);
1323 if (!hdev)
1324 return -ENODEV;
1325
1326 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1327 err = -EBUSY;
1328 goto done;
1329 }
1330
1331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1332 err = -EOPNOTSUPP;
1333 goto done;
1334 }
1335
1336 if (hdev->dev_type != HCI_PRIMARY) {
1337 err = -EOPNOTSUPP;
1338 goto done;
1339 }
1340
1341 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1342 err = -EOPNOTSUPP;
1343 goto done;
1344 }
1345
1346 hci_dev_lock(hdev);
1347 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1348 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1349 hci_inquiry_cache_flush(hdev);
1350 do_inquiry = 1;
1351 }
1352 hci_dev_unlock(hdev);
1353
1354 timeo = ir.length * msecs_to_jiffies(2000);
1355
1356 if (do_inquiry) {
1357 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1358 timeo, NULL);
1359 if (err < 0)
1360 goto done;
1361
1362
1363
1364
1365 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1366 TASK_INTERRUPTIBLE)) {
1367 err = -EINTR;
1368 goto done;
1369 }
1370 }
1371
1372
1373
1374
1375 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1376
1377
1378
1379
1380 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1381 if (!buf) {
1382 err = -ENOMEM;
1383 goto done;
1384 }
1385
1386 hci_dev_lock(hdev);
1387 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1388 hci_dev_unlock(hdev);
1389
1390 BT_DBG("num_rsp %d", ir.num_rsp);
1391
1392 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1393 ptr += sizeof(ir);
1394 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1395 ir.num_rsp))
1396 err = -EFAULT;
1397 } else
1398 err = -EFAULT;
1399
1400 kfree(buf);
1401
1402done:
1403 hci_dev_put(hdev);
1404 return err;
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1420{
1421 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1422 bdaddr_t ba;
1423 int ret;
1424
1425 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1426 (u8 *)&ba, sizeof(ba));
1427 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1428 return;
1429
1430 bacpy(&hdev->public_addr, &ba);
1431}
1432
1433static int hci_dev_do_open(struct hci_dev *hdev)
1434{
1435 int ret = 0;
1436
1437 BT_DBG("%s %p", hdev->name, hdev);
1438
1439 hci_req_sync_lock(hdev);
1440
1441 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1442 ret = -ENODEV;
1443 goto done;
1444 }
1445
1446 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1448
1449
1450
1451 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1452 ret = -ERFKILL;
1453 goto done;
1454 }
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1469 hdev->dev_type == HCI_PRIMARY &&
1470 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472 ret = -EADDRNOTAVAIL;
1473 goto done;
1474 }
1475 }
1476
1477 if (test_bit(HCI_UP, &hdev->flags)) {
1478 ret = -EALREADY;
1479 goto done;
1480 }
1481
1482 if (hdev->open(hdev)) {
1483 ret = -EIO;
1484 goto done;
1485 }
1486
1487 set_bit(HCI_RUNNING, &hdev->flags);
1488 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1489
1490 atomic_set(&hdev->cmd_cnt, 1);
1491 set_bit(HCI_INIT, &hdev->flags);
1492
1493 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1494 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1495 bool invalid_bdaddr;
1496
1497 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1498
1499 if (hdev->setup)
1500 ret = hdev->setup(hdev);
1501
1502
1503
1504
1505
1506 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1507 &hdev->quirks);
1508
1509 if (ret)
1510 goto setup_failed;
1511
1512 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1513 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1514 hci_dev_get_bd_addr_from_property(hdev);
1515
1516 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517 hdev->set_bdaddr) {
1518 ret = hdev->set_bdaddr(hdev,
1519 &hdev->public_addr);
1520
1521
1522
1523
1524
1525
1526 if (!ret)
1527 invalid_bdaddr = false;
1528 }
1529 }
1530
1531setup_failed:
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1543 invalid_bdaddr)
1544 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1555 ret = __hci_unconf_init(hdev);
1556 }
1557
1558 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1559
1560
1561
1562
1563
1564 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1565 hdev->set_bdaddr)
1566 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1567 else
1568 ret = -EADDRNOTAVAIL;
1569 }
1570
1571 if (!ret) {
1572 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1573 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1574 ret = __hci_init(hdev);
1575 if (!ret && hdev->post_init)
1576 ret = hdev->post_init(hdev);
1577 }
1578 }
1579
1580
1581
1582
1583
1584 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1586 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1587 ret = hdev->set_diag(hdev, true);
1588
1589 msft_do_open(hdev);
1590 aosp_do_open(hdev);
1591
1592 clear_bit(HCI_INIT, &hdev->flags);
1593
1594 if (!ret) {
1595 hci_dev_hold(hdev);
1596 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1597 hci_adv_instances_set_rpa_expired(hdev, true);
1598 set_bit(HCI_UP, &hdev->flags);
1599 hci_sock_dev_event(hdev, HCI_DEV_UP);
1600 hci_leds_update_powered(hdev, true);
1601 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1602 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1603 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1604 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1605 hci_dev_test_flag(hdev, HCI_MGMT) &&
1606 hdev->dev_type == HCI_PRIMARY) {
1607 ret = __hci_req_hci_power_on(hdev);
1608 mgmt_power_on(hdev, ret);
1609 }
1610 } else {
1611
1612 flush_work(&hdev->tx_work);
1613
1614
1615
1616
1617
1618 flush_work(&hdev->rx_work);
1619 flush_work(&hdev->cmd_work);
1620
1621 skb_queue_purge(&hdev->cmd_q);
1622 skb_queue_purge(&hdev->rx_q);
1623
1624 if (hdev->flush)
1625 hdev->flush(hdev);
1626
1627 if (hdev->sent_cmd) {
1628 kfree_skb(hdev->sent_cmd);
1629 hdev->sent_cmd = NULL;
1630 }
1631
1632 clear_bit(HCI_RUNNING, &hdev->flags);
1633 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1634
1635 hdev->close(hdev);
1636 hdev->flags &= BIT(HCI_RAW);
1637 }
1638
1639done:
1640 hci_req_sync_unlock(hdev);
1641 return ret;
1642}
1643
1644
1645
1646int hci_dev_open(__u16 dev)
1647{
1648 struct hci_dev *hdev;
1649 int err;
1650
1651 hdev = hci_dev_get(dev);
1652 if (!hdev)
1653 return -ENODEV;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1665 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1666 err = -EOPNOTSUPP;
1667 goto done;
1668 }
1669
1670
1671
1672
1673
1674
1675 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1676 cancel_delayed_work(&hdev->power_off);
1677
1678
1679
1680
1681
1682 flush_workqueue(hdev->req_workqueue);
1683
1684
1685
1686
1687
1688
1689
1690 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1691 !hci_dev_test_flag(hdev, HCI_MGMT))
1692 hci_dev_set_flag(hdev, HCI_BONDABLE);
1693
1694 err = hci_dev_do_open(hdev);
1695
1696done:
1697 hci_dev_put(hdev);
1698 return err;
1699}
1700
1701
1702static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1703{
1704 struct hci_conn_params *p;
1705
1706 list_for_each_entry(p, &hdev->le_conn_params, list) {
1707 if (p->conn) {
1708 hci_conn_drop(p->conn);
1709 hci_conn_put(p->conn);
1710 p->conn = NULL;
1711 }
1712 list_del_init(&p->action);
1713 }
1714
1715 BT_DBG("All LE pending actions cleared");
1716}
1717
1718int hci_dev_do_close(struct hci_dev *hdev)
1719{
1720 bool auto_off;
1721
1722 BT_DBG("%s %p", hdev->name, hdev);
1723
1724 cancel_delayed_work(&hdev->power_off);
1725 cancel_delayed_work(&hdev->ncmd_timer);
1726
1727 hci_request_cancel_all(hdev);
1728 hci_req_sync_lock(hdev);
1729
1730 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1731 cancel_delayed_work_sync(&hdev->cmd_timer);
1732 hci_req_sync_unlock(hdev);
1733 return 0;
1734 }
1735
1736 hci_leds_update_powered(hdev, false);
1737
1738
1739 flush_work(&hdev->tx_work);
1740 flush_work(&hdev->rx_work);
1741
1742 if (hdev->discov_timeout > 0) {
1743 hdev->discov_timeout = 0;
1744 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1745 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1746 }
1747
1748 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1749 cancel_delayed_work(&hdev->service_cache);
1750
1751 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1752 struct adv_info *adv_instance;
1753
1754 cancel_delayed_work_sync(&hdev->rpa_expired);
1755
1756 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1757 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1758 }
1759
1760
1761
1762
1763 drain_workqueue(hdev->workqueue);
1764
1765 hci_dev_lock(hdev);
1766
1767 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1768
1769 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1770
1771 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1772 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1773 hci_dev_test_flag(hdev, HCI_MGMT))
1774 __mgmt_power_off(hdev);
1775
1776 hci_inquiry_cache_flush(hdev);
1777 hci_pend_le_actions_clear(hdev);
1778 hci_conn_hash_flush(hdev);
1779 hci_dev_unlock(hdev);
1780
1781 smp_unregister(hdev);
1782
1783 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1784
1785 aosp_do_close(hdev);
1786 msft_do_close(hdev);
1787
1788 if (hdev->flush)
1789 hdev->flush(hdev);
1790
1791
1792 skb_queue_purge(&hdev->cmd_q);
1793 atomic_set(&hdev->cmd_cnt, 1);
1794 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1795 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1796 set_bit(HCI_INIT, &hdev->flags);
1797 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1798 clear_bit(HCI_INIT, &hdev->flags);
1799 }
1800
1801 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1802 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1803 test_bit(HCI_UP, &hdev->flags)) {
1804
1805 if (hdev->shutdown)
1806 hdev->shutdown(hdev);
1807 }
1808
1809
1810 flush_work(&hdev->cmd_work);
1811
1812
1813 skb_queue_purge(&hdev->rx_q);
1814 skb_queue_purge(&hdev->cmd_q);
1815 skb_queue_purge(&hdev->raw_q);
1816
1817
1818 if (hdev->sent_cmd) {
1819 cancel_delayed_work_sync(&hdev->cmd_timer);
1820 kfree_skb(hdev->sent_cmd);
1821 hdev->sent_cmd = NULL;
1822 }
1823
1824 clear_bit(HCI_RUNNING, &hdev->flags);
1825 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1826
1827 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1828 wake_up(&hdev->suspend_wait_q);
1829
1830
1831
1832 hdev->close(hdev);
1833
1834
1835 hdev->flags &= BIT(HCI_RAW);
1836 hci_dev_clear_volatile_flags(hdev);
1837
1838
1839 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1840
1841 memset(hdev->eir, 0, sizeof(hdev->eir));
1842 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1843 bacpy(&hdev->random_addr, BDADDR_ANY);
1844
1845 hci_req_sync_unlock(hdev);
1846
1847 hci_dev_put(hdev);
1848 return 0;
1849}
1850
1851int hci_dev_close(__u16 dev)
1852{
1853 struct hci_dev *hdev;
1854 int err;
1855
1856 hdev = hci_dev_get(dev);
1857 if (!hdev)
1858 return -ENODEV;
1859
1860 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1861 err = -EBUSY;
1862 goto done;
1863 }
1864
1865 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1866 cancel_delayed_work(&hdev->power_off);
1867
1868 err = hci_dev_do_close(hdev);
1869
1870done:
1871 hci_dev_put(hdev);
1872 return err;
1873}
1874
1875static int hci_dev_do_reset(struct hci_dev *hdev)
1876{
1877 int ret;
1878
1879 BT_DBG("%s %p", hdev->name, hdev);
1880
1881 hci_req_sync_lock(hdev);
1882
1883
1884 skb_queue_purge(&hdev->rx_q);
1885 skb_queue_purge(&hdev->cmd_q);
1886
1887
1888
1889
1890 drain_workqueue(hdev->workqueue);
1891
1892 hci_dev_lock(hdev);
1893 hci_inquiry_cache_flush(hdev);
1894 hci_conn_hash_flush(hdev);
1895 hci_dev_unlock(hdev);
1896
1897 if (hdev->flush)
1898 hdev->flush(hdev);
1899
1900 atomic_set(&hdev->cmd_cnt, 1);
1901 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1902
1903 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1904
1905 hci_req_sync_unlock(hdev);
1906 return ret;
1907}
1908
1909int hci_dev_reset(__u16 dev)
1910{
1911 struct hci_dev *hdev;
1912 int err;
1913
1914 hdev = hci_dev_get(dev);
1915 if (!hdev)
1916 return -ENODEV;
1917
1918 if (!test_bit(HCI_UP, &hdev->flags)) {
1919 err = -ENETDOWN;
1920 goto done;
1921 }
1922
1923 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1924 err = -EBUSY;
1925 goto done;
1926 }
1927
1928 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1929 err = -EOPNOTSUPP;
1930 goto done;
1931 }
1932
1933 err = hci_dev_do_reset(hdev);
1934
1935done:
1936 hci_dev_put(hdev);
1937 return err;
1938}
1939
1940int hci_dev_reset_stat(__u16 dev)
1941{
1942 struct hci_dev *hdev;
1943 int ret = 0;
1944
1945 hdev = hci_dev_get(dev);
1946 if (!hdev)
1947 return -ENODEV;
1948
1949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1950 ret = -EBUSY;
1951 goto done;
1952 }
1953
1954 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1955 ret = -EOPNOTSUPP;
1956 goto done;
1957 }
1958
1959 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1960
1961done:
1962 hci_dev_put(hdev);
1963 return ret;
1964}
1965
1966static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1967{
1968 bool conn_changed, discov_changed;
1969
1970 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1971
1972 if ((scan & SCAN_PAGE))
1973 conn_changed = !hci_dev_test_and_set_flag(hdev,
1974 HCI_CONNECTABLE);
1975 else
1976 conn_changed = hci_dev_test_and_clear_flag(hdev,
1977 HCI_CONNECTABLE);
1978
1979 if ((scan & SCAN_INQUIRY)) {
1980 discov_changed = !hci_dev_test_and_set_flag(hdev,
1981 HCI_DISCOVERABLE);
1982 } else {
1983 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1984 discov_changed = hci_dev_test_and_clear_flag(hdev,
1985 HCI_DISCOVERABLE);
1986 }
1987
1988 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1989 return;
1990
1991 if (conn_changed || discov_changed) {
1992
1993 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1994
1995 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1996 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1997
1998 mgmt_new_settings(hdev);
1999 }
2000}
2001
2002int hci_dev_cmd(unsigned int cmd, void __user *arg)
2003{
2004 struct hci_dev *hdev;
2005 struct hci_dev_req dr;
2006 int err = 0;
2007
2008 if (copy_from_user(&dr, arg, sizeof(dr)))
2009 return -EFAULT;
2010
2011 hdev = hci_dev_get(dr.dev_id);
2012 if (!hdev)
2013 return -ENODEV;
2014
2015 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2016 err = -EBUSY;
2017 goto done;
2018 }
2019
2020 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2021 err = -EOPNOTSUPP;
2022 goto done;
2023 }
2024
2025 if (hdev->dev_type != HCI_PRIMARY) {
2026 err = -EOPNOTSUPP;
2027 goto done;
2028 }
2029
2030 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2031 err = -EOPNOTSUPP;
2032 goto done;
2033 }
2034
2035 switch (cmd) {
2036 case HCISETAUTH:
2037 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2038 HCI_INIT_TIMEOUT, NULL);
2039 break;
2040
2041 case HCISETENCRYPT:
2042 if (!lmp_encrypt_capable(hdev)) {
2043 err = -EOPNOTSUPP;
2044 break;
2045 }
2046
2047 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2048
2049 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2050 HCI_INIT_TIMEOUT, NULL);
2051 if (err)
2052 break;
2053 }
2054
2055 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2056 HCI_INIT_TIMEOUT, NULL);
2057 break;
2058
2059 case HCISETSCAN:
2060 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2061 HCI_INIT_TIMEOUT, NULL);
2062
2063
2064
2065
2066 if (!err)
2067 hci_update_scan_state(hdev, dr.dev_opt);
2068 break;
2069
2070 case HCISETLINKPOL:
2071 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2072 HCI_INIT_TIMEOUT, NULL);
2073 break;
2074
2075 case HCISETLINKMODE:
2076 hdev->link_mode = ((__u16) dr.dev_opt) &
2077 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2078 break;
2079
2080 case HCISETPTYPE:
2081 if (hdev->pkt_type == (__u16) dr.dev_opt)
2082 break;
2083
2084 hdev->pkt_type = (__u16) dr.dev_opt;
2085 mgmt_phy_configuration_changed(hdev, NULL);
2086 break;
2087
2088 case HCISETACLMTU:
2089 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2090 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2091 break;
2092
2093 case HCISETSCOMTU:
2094 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2095 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2096 break;
2097
2098 default:
2099 err = -EINVAL;
2100 break;
2101 }
2102
2103done:
2104 hci_dev_put(hdev);
2105 return err;
2106}
2107
2108int hci_get_dev_list(void __user *arg)
2109{
2110 struct hci_dev *hdev;
2111 struct hci_dev_list_req *dl;
2112 struct hci_dev_req *dr;
2113 int n = 0, size, err;
2114 __u16 dev_num;
2115
2116 if (get_user(dev_num, (__u16 __user *) arg))
2117 return -EFAULT;
2118
2119 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2120 return -EINVAL;
2121
2122 size = sizeof(*dl) + dev_num * sizeof(*dr);
2123
2124 dl = kzalloc(size, GFP_KERNEL);
2125 if (!dl)
2126 return -ENOMEM;
2127
2128 dr = dl->dev_req;
2129
2130 read_lock(&hci_dev_list_lock);
2131 list_for_each_entry(hdev, &hci_dev_list, list) {
2132 unsigned long flags = hdev->flags;
2133
2134
2135
2136
2137
2138 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2139 flags &= ~BIT(HCI_UP);
2140
2141 (dr + n)->dev_id = hdev->id;
2142 (dr + n)->dev_opt = flags;
2143
2144 if (++n >= dev_num)
2145 break;
2146 }
2147 read_unlock(&hci_dev_list_lock);
2148
2149 dl->dev_num = n;
2150 size = sizeof(*dl) + n * sizeof(*dr);
2151
2152 err = copy_to_user(arg, dl, size);
2153 kfree(dl);
2154
2155 return err ? -EFAULT : 0;
2156}
2157
2158int hci_get_dev_info(void __user *arg)
2159{
2160 struct hci_dev *hdev;
2161 struct hci_dev_info di;
2162 unsigned long flags;
2163 int err = 0;
2164
2165 if (copy_from_user(&di, arg, sizeof(di)))
2166 return -EFAULT;
2167
2168 hdev = hci_dev_get(di.dev_id);
2169 if (!hdev)
2170 return -ENODEV;
2171
2172
2173
2174
2175
2176 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2177 flags = hdev->flags & ~BIT(HCI_UP);
2178 else
2179 flags = hdev->flags;
2180
2181 strcpy(di.name, hdev->name);
2182 di.bdaddr = hdev->bdaddr;
2183 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2184 di.flags = flags;
2185 di.pkt_type = hdev->pkt_type;
2186 if (lmp_bredr_capable(hdev)) {
2187 di.acl_mtu = hdev->acl_mtu;
2188 di.acl_pkts = hdev->acl_pkts;
2189 di.sco_mtu = hdev->sco_mtu;
2190 di.sco_pkts = hdev->sco_pkts;
2191 } else {
2192 di.acl_mtu = hdev->le_mtu;
2193 di.acl_pkts = hdev->le_pkts;
2194 di.sco_mtu = 0;
2195 di.sco_pkts = 0;
2196 }
2197 di.link_policy = hdev->link_policy;
2198 di.link_mode = hdev->link_mode;
2199
2200 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2201 memcpy(&di.features, &hdev->features, sizeof(di.features));
2202
2203 if (copy_to_user(arg, &di, sizeof(di)))
2204 err = -EFAULT;
2205
2206 hci_dev_put(hdev);
2207
2208 return err;
2209}
2210
2211
2212
2213static int hci_rfkill_set_block(void *data, bool blocked)
2214{
2215 struct hci_dev *hdev = data;
2216
2217 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2218
2219 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2220 return -EBUSY;
2221
2222 if (blocked) {
2223 hci_dev_set_flag(hdev, HCI_RFKILLED);
2224 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2225 !hci_dev_test_flag(hdev, HCI_CONFIG))
2226 hci_dev_do_close(hdev);
2227 } else {
2228 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2229 }
2230
2231 return 0;
2232}
2233
2234static const struct rfkill_ops hci_rfkill_ops = {
2235 .set_block = hci_rfkill_set_block,
2236};
2237
2238static void hci_power_on(struct work_struct *work)
2239{
2240 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2241 int err;
2242
2243 BT_DBG("%s", hdev->name);
2244
2245 if (test_bit(HCI_UP, &hdev->flags) &&
2246 hci_dev_test_flag(hdev, HCI_MGMT) &&
2247 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2248 cancel_delayed_work(&hdev->power_off);
2249 hci_req_sync_lock(hdev);
2250 err = __hci_req_hci_power_on(hdev);
2251 hci_req_sync_unlock(hdev);
2252 mgmt_power_on(hdev, err);
2253 return;
2254 }
2255
2256 err = hci_dev_do_open(hdev);
2257 if (err < 0) {
2258 hci_dev_lock(hdev);
2259 mgmt_set_powered_failed(hdev, err);
2260 hci_dev_unlock(hdev);
2261 return;
2262 }
2263
2264
2265
2266
2267
2268 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2269 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2270 (hdev->dev_type == HCI_PRIMARY &&
2271 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2272 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2273 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2274 hci_dev_do_close(hdev);
2275 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2276 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2277 HCI_AUTO_OFF_TIMEOUT);
2278 }
2279
2280 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2281
2282
2283
2284 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2285 set_bit(HCI_RAW, &hdev->flags);
2286
2287
2288
2289
2290
2291
2292
2293
2294 mgmt_index_added(hdev);
2295 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2296
2297
2298
2299 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2300 clear_bit(HCI_RAW, &hdev->flags);
2301
2302
2303
2304
2305
2306 mgmt_index_added(hdev);
2307 }
2308}
2309
2310static void hci_power_off(struct work_struct *work)
2311{
2312 struct hci_dev *hdev = container_of(work, struct hci_dev,
2313 power_off.work);
2314
2315 BT_DBG("%s", hdev->name);
2316
2317 hci_dev_do_close(hdev);
2318}
2319
2320static void hci_error_reset(struct work_struct *work)
2321{
2322 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2323
2324 BT_DBG("%s", hdev->name);
2325
2326 if (hdev->hw_error)
2327 hdev->hw_error(hdev, hdev->hw_error_code);
2328 else
2329 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2330
2331 if (hci_dev_do_close(hdev))
2332 return;
2333
2334 hci_dev_do_open(hdev);
2335}
2336
2337void hci_uuids_clear(struct hci_dev *hdev)
2338{
2339 struct bt_uuid *uuid, *tmp;
2340
2341 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2342 list_del(&uuid->list);
2343 kfree(uuid);
2344 }
2345}
2346
2347void hci_link_keys_clear(struct hci_dev *hdev)
2348{
2349 struct link_key *key;
2350
2351 list_for_each_entry(key, &hdev->link_keys, list) {
2352 list_del_rcu(&key->list);
2353 kfree_rcu(key, rcu);
2354 }
2355}
2356
2357void hci_smp_ltks_clear(struct hci_dev *hdev)
2358{
2359 struct smp_ltk *k;
2360
2361 list_for_each_entry(k, &hdev->long_term_keys, list) {
2362 list_del_rcu(&k->list);
2363 kfree_rcu(k, rcu);
2364 }
2365}
2366
2367void hci_smp_irks_clear(struct hci_dev *hdev)
2368{
2369 struct smp_irk *k;
2370
2371 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2372 list_del_rcu(&k->list);
2373 kfree_rcu(k, rcu);
2374 }
2375}
2376
2377void hci_blocked_keys_clear(struct hci_dev *hdev)
2378{
2379 struct blocked_key *b;
2380
2381 list_for_each_entry(b, &hdev->blocked_keys, list) {
2382 list_del_rcu(&b->list);
2383 kfree_rcu(b, rcu);
2384 }
2385}
2386
2387bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2388{
2389 bool blocked = false;
2390 struct blocked_key *b;
2391
2392 rcu_read_lock();
2393 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2394 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2395 blocked = true;
2396 break;
2397 }
2398 }
2399
2400 rcu_read_unlock();
2401 return blocked;
2402}
2403
2404struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2405{
2406 struct link_key *k;
2407
2408 rcu_read_lock();
2409 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2410 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2411 rcu_read_unlock();
2412
2413 if (hci_is_blocked_key(hdev,
2414 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2415 k->val)) {
2416 bt_dev_warn_ratelimited(hdev,
2417 "Link key blocked for %pMR",
2418 &k->bdaddr);
2419 return NULL;
2420 }
2421
2422 return k;
2423 }
2424 }
2425 rcu_read_unlock();
2426
2427 return NULL;
2428}
2429
2430static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2431 u8 key_type, u8 old_key_type)
2432{
2433
2434 if (key_type < 0x03)
2435 return true;
2436
2437
2438 if (key_type == HCI_LK_DEBUG_COMBINATION)
2439 return false;
2440
2441
2442 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2443 return false;
2444
2445
2446 if (!conn)
2447 return true;
2448
2449
2450 if (conn->type == LE_LINK)
2451 return true;
2452
2453
2454 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2455 return true;
2456
2457
2458 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2459 return true;
2460
2461
2462 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2463 return true;
2464
2465
2466
2467 return false;
2468}
2469
2470static u8 ltk_role(u8 type)
2471{
2472 if (type == SMP_LTK)
2473 return HCI_ROLE_MASTER;
2474
2475 return HCI_ROLE_SLAVE;
2476}
2477
2478struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2479 u8 addr_type, u8 role)
2480{
2481 struct smp_ltk *k;
2482
2483 rcu_read_lock();
2484 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2485 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2486 continue;
2487
2488 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2489 rcu_read_unlock();
2490
2491 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2492 k->val)) {
2493 bt_dev_warn_ratelimited(hdev,
2494 "LTK blocked for %pMR",
2495 &k->bdaddr);
2496 return NULL;
2497 }
2498
2499 return k;
2500 }
2501 }
2502 rcu_read_unlock();
2503
2504 return NULL;
2505}
2506
2507struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2508{
2509 struct smp_irk *irk_to_return = NULL;
2510 struct smp_irk *irk;
2511
2512 rcu_read_lock();
2513 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2514 if (!bacmp(&irk->rpa, rpa)) {
2515 irk_to_return = irk;
2516 goto done;
2517 }
2518 }
2519
2520 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2521 if (smp_irk_matches(hdev, irk->val, rpa)) {
2522 bacpy(&irk->rpa, rpa);
2523 irk_to_return = irk;
2524 goto done;
2525 }
2526 }
2527
2528done:
2529 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2530 irk_to_return->val)) {
2531 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2532 &irk_to_return->bdaddr);
2533 irk_to_return = NULL;
2534 }
2535
2536 rcu_read_unlock();
2537
2538 return irk_to_return;
2539}
2540
2541struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2542 u8 addr_type)
2543{
2544 struct smp_irk *irk_to_return = NULL;
2545 struct smp_irk *irk;
2546
2547
2548 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2549 return NULL;
2550
2551 rcu_read_lock();
2552 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2553 if (addr_type == irk->addr_type &&
2554 bacmp(bdaddr, &irk->bdaddr) == 0) {
2555 irk_to_return = irk;
2556 goto done;
2557 }
2558 }
2559
2560done:
2561
2562 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2563 irk_to_return->val)) {
2564 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2565 &irk_to_return->bdaddr);
2566 irk_to_return = NULL;
2567 }
2568
2569 rcu_read_unlock();
2570
2571 return irk_to_return;
2572}
2573
2574struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2575 bdaddr_t *bdaddr, u8 *val, u8 type,
2576 u8 pin_len, bool *persistent)
2577{
2578 struct link_key *key, *old_key;
2579 u8 old_key_type;
2580
2581 old_key = hci_find_link_key(hdev, bdaddr);
2582 if (old_key) {
2583 old_key_type = old_key->type;
2584 key = old_key;
2585 } else {
2586 old_key_type = conn ? conn->key_type : 0xff;
2587 key = kzalloc(sizeof(*key), GFP_KERNEL);
2588 if (!key)
2589 return NULL;
2590 list_add_rcu(&key->list, &hdev->link_keys);
2591 }
2592
2593 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2594
2595
2596
2597
2598 if (type == HCI_LK_CHANGED_COMBINATION &&
2599 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2600 type = HCI_LK_COMBINATION;
2601 if (conn)
2602 conn->key_type = type;
2603 }
2604
2605 bacpy(&key->bdaddr, bdaddr);
2606 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2607 key->pin_len = pin_len;
2608
2609 if (type == HCI_LK_CHANGED_COMBINATION)
2610 key->type = old_key_type;
2611 else
2612 key->type = type;
2613
2614 if (persistent)
2615 *persistent = hci_persistent_key(hdev, conn, type,
2616 old_key_type);
2617
2618 return key;
2619}
2620
2621struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2622 u8 addr_type, u8 type, u8 authenticated,
2623 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2624{
2625 struct smp_ltk *key, *old_key;
2626 u8 role = ltk_role(type);
2627
2628 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2629 if (old_key)
2630 key = old_key;
2631 else {
2632 key = kzalloc(sizeof(*key), GFP_KERNEL);
2633 if (!key)
2634 return NULL;
2635 list_add_rcu(&key->list, &hdev->long_term_keys);
2636 }
2637
2638 bacpy(&key->bdaddr, bdaddr);
2639 key->bdaddr_type = addr_type;
2640 memcpy(key->val, tk, sizeof(key->val));
2641 key->authenticated = authenticated;
2642 key->ediv = ediv;
2643 key->rand = rand;
2644 key->enc_size = enc_size;
2645 key->type = type;
2646
2647 return key;
2648}
2649
2650struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2651 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2652{
2653 struct smp_irk *irk;
2654
2655 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2656 if (!irk) {
2657 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2658 if (!irk)
2659 return NULL;
2660
2661 bacpy(&irk->bdaddr, bdaddr);
2662 irk->addr_type = addr_type;
2663
2664 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2665 }
2666
2667 memcpy(irk->val, val, 16);
2668 bacpy(&irk->rpa, rpa);
2669
2670 return irk;
2671}
2672
2673int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2674{
2675 struct link_key *key;
2676
2677 key = hci_find_link_key(hdev, bdaddr);
2678 if (!key)
2679 return -ENOENT;
2680
2681 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2682
2683 list_del_rcu(&key->list);
2684 kfree_rcu(key, rcu);
2685
2686 return 0;
2687}
2688
2689int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2690{
2691 struct smp_ltk *k;
2692 int removed = 0;
2693
2694 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2695 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2696 continue;
2697
2698 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2699
2700 list_del_rcu(&k->list);
2701 kfree_rcu(k, rcu);
2702 removed++;
2703 }
2704
2705 return removed ? 0 : -ENOENT;
2706}
2707
2708void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2709{
2710 struct smp_irk *k;
2711
2712 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2713 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2714 continue;
2715
2716 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2717
2718 list_del_rcu(&k->list);
2719 kfree_rcu(k, rcu);
2720 }
2721}
2722
2723bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2724{
2725 struct smp_ltk *k;
2726 struct smp_irk *irk;
2727 u8 addr_type;
2728
2729 if (type == BDADDR_BREDR) {
2730 if (hci_find_link_key(hdev, bdaddr))
2731 return true;
2732 return false;
2733 }
2734
2735
2736 if (type == BDADDR_LE_PUBLIC)
2737 addr_type = ADDR_LE_DEV_PUBLIC;
2738 else
2739 addr_type = ADDR_LE_DEV_RANDOM;
2740
2741 irk = hci_get_irk(hdev, bdaddr, addr_type);
2742 if (irk) {
2743 bdaddr = &irk->bdaddr;
2744 addr_type = irk->addr_type;
2745 }
2746
2747 rcu_read_lock();
2748 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2749 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2750 rcu_read_unlock();
2751 return true;
2752 }
2753 }
2754 rcu_read_unlock();
2755
2756 return false;
2757}
2758
2759
2760static void hci_cmd_timeout(struct work_struct *work)
2761{
2762 struct hci_dev *hdev = container_of(work, struct hci_dev,
2763 cmd_timer.work);
2764
2765 if (hdev->sent_cmd) {
2766 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2767 u16 opcode = __le16_to_cpu(sent->opcode);
2768
2769 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2770 } else {
2771 bt_dev_err(hdev, "command tx timeout");
2772 }
2773
2774 if (hdev->cmd_timeout)
2775 hdev->cmd_timeout(hdev);
2776
2777 atomic_set(&hdev->cmd_cnt, 1);
2778 queue_work(hdev->workqueue, &hdev->cmd_work);
2779}
2780
2781
2782static void hci_ncmd_timeout(struct work_struct *work)
2783{
2784 struct hci_dev *hdev = container_of(work, struct hci_dev,
2785 ncmd_timer.work);
2786
2787 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2788
2789
2790
2791
2792 if (test_bit(HCI_INIT, &hdev->flags))
2793 return;
2794
2795
2796 hci_reset_dev(hdev);
2797}
2798
2799struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2800 bdaddr_t *bdaddr, u8 bdaddr_type)
2801{
2802 struct oob_data *data;
2803
2804 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2805 if (bacmp(bdaddr, &data->bdaddr) != 0)
2806 continue;
2807 if (data->bdaddr_type != bdaddr_type)
2808 continue;
2809 return data;
2810 }
2811
2812 return NULL;
2813}
2814
2815int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2816 u8 bdaddr_type)
2817{
2818 struct oob_data *data;
2819
2820 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2821 if (!data)
2822 return -ENOENT;
2823
2824 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2825
2826 list_del(&data->list);
2827 kfree(data);
2828
2829 return 0;
2830}
2831
2832void hci_remote_oob_data_clear(struct hci_dev *hdev)
2833{
2834 struct oob_data *data, *n;
2835
2836 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2837 list_del(&data->list);
2838 kfree(data);
2839 }
2840}
2841
2842int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2843 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2844 u8 *hash256, u8 *rand256)
2845{
2846 struct oob_data *data;
2847
2848 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2849 if (!data) {
2850 data = kmalloc(sizeof(*data), GFP_KERNEL);
2851 if (!data)
2852 return -ENOMEM;
2853
2854 bacpy(&data->bdaddr, bdaddr);
2855 data->bdaddr_type = bdaddr_type;
2856 list_add(&data->list, &hdev->remote_oob_data);
2857 }
2858
2859 if (hash192 && rand192) {
2860 memcpy(data->hash192, hash192, sizeof(data->hash192));
2861 memcpy(data->rand192, rand192, sizeof(data->rand192));
2862 if (hash256 && rand256)
2863 data->present = 0x03;
2864 } else {
2865 memset(data->hash192, 0, sizeof(data->hash192));
2866 memset(data->rand192, 0, sizeof(data->rand192));
2867 if (hash256 && rand256)
2868 data->present = 0x02;
2869 else
2870 data->present = 0x00;
2871 }
2872
2873 if (hash256 && rand256) {
2874 memcpy(data->hash256, hash256, sizeof(data->hash256));
2875 memcpy(data->rand256, rand256, sizeof(data->rand256));
2876 } else {
2877 memset(data->hash256, 0, sizeof(data->hash256));
2878 memset(data->rand256, 0, sizeof(data->rand256));
2879 if (hash192 && rand192)
2880 data->present = 0x01;
2881 }
2882
2883 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2884
2885 return 0;
2886}
2887
2888
2889struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2890{
2891 struct adv_info *adv_instance;
2892
2893 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2894 if (adv_instance->instance == instance)
2895 return adv_instance;
2896 }
2897
2898 return NULL;
2899}
2900
2901
2902struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2903{
2904 struct adv_info *cur_instance;
2905
2906 cur_instance = hci_find_adv_instance(hdev, instance);
2907 if (!cur_instance)
2908 return NULL;
2909
2910 if (cur_instance == list_last_entry(&hdev->adv_instances,
2911 struct adv_info, list))
2912 return list_first_entry(&hdev->adv_instances,
2913 struct adv_info, list);
2914 else
2915 return list_next_entry(cur_instance, list);
2916}
2917
2918
2919int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2920{
2921 struct adv_info *adv_instance;
2922
2923 adv_instance = hci_find_adv_instance(hdev, instance);
2924 if (!adv_instance)
2925 return -ENOENT;
2926
2927 BT_DBG("%s removing %dMR", hdev->name, instance);
2928
2929 if (hdev->cur_adv_instance == instance) {
2930 if (hdev->adv_instance_timeout) {
2931 cancel_delayed_work(&hdev->adv_instance_expire);
2932 hdev->adv_instance_timeout = 0;
2933 }
2934 hdev->cur_adv_instance = 0x00;
2935 }
2936
2937 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2938
2939 list_del(&adv_instance->list);
2940 kfree(adv_instance);
2941
2942 hdev->adv_instance_cnt--;
2943
2944 return 0;
2945}
2946
2947void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2948{
2949 struct adv_info *adv_instance, *n;
2950
2951 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2952 adv_instance->rpa_expired = rpa_expired;
2953}
2954
2955
2956void hci_adv_instances_clear(struct hci_dev *hdev)
2957{
2958 struct adv_info *adv_instance, *n;
2959
2960 if (hdev->adv_instance_timeout) {
2961 cancel_delayed_work(&hdev->adv_instance_expire);
2962 hdev->adv_instance_timeout = 0;
2963 }
2964
2965 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2966 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2967 list_del(&adv_instance->list);
2968 kfree(adv_instance);
2969 }
2970
2971 hdev->adv_instance_cnt = 0;
2972 hdev->cur_adv_instance = 0x00;
2973}
2974
2975static void adv_instance_rpa_expired(struct work_struct *work)
2976{
2977 struct adv_info *adv_instance = container_of(work, struct adv_info,
2978 rpa_expired_cb.work);
2979
2980 BT_DBG("");
2981
2982 adv_instance->rpa_expired = true;
2983}
2984
2985
2986int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2987 u16 adv_data_len, u8 *adv_data,
2988 u16 scan_rsp_len, u8 *scan_rsp_data,
2989 u16 timeout, u16 duration, s8 tx_power,
2990 u32 min_interval, u32 max_interval)
2991{
2992 struct adv_info *adv_instance;
2993
2994 adv_instance = hci_find_adv_instance(hdev, instance);
2995 if (adv_instance) {
2996 memset(adv_instance->adv_data, 0,
2997 sizeof(adv_instance->adv_data));
2998 memset(adv_instance->scan_rsp_data, 0,
2999 sizeof(adv_instance->scan_rsp_data));
3000 } else {
3001 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3002 instance < 1 || instance > hdev->le_num_of_adv_sets)
3003 return -EOVERFLOW;
3004
3005 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3006 if (!adv_instance)
3007 return -ENOMEM;
3008
3009 adv_instance->pending = true;
3010 adv_instance->instance = instance;
3011 list_add(&adv_instance->list, &hdev->adv_instances);
3012 hdev->adv_instance_cnt++;
3013 }
3014
3015 adv_instance->flags = flags;
3016 adv_instance->adv_data_len = adv_data_len;
3017 adv_instance->scan_rsp_len = scan_rsp_len;
3018 adv_instance->min_interval = min_interval;
3019 adv_instance->max_interval = max_interval;
3020 adv_instance->tx_power = tx_power;
3021
3022 if (adv_data_len)
3023 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3024
3025 if (scan_rsp_len)
3026 memcpy(adv_instance->scan_rsp_data,
3027 scan_rsp_data, scan_rsp_len);
3028
3029 adv_instance->timeout = timeout;
3030 adv_instance->remaining_time = timeout;
3031
3032 if (duration == 0)
3033 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3034 else
3035 adv_instance->duration = duration;
3036
3037 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3038 adv_instance_rpa_expired);
3039
3040 BT_DBG("%s for %dMR", hdev->name, instance);
3041
3042 return 0;
3043}
3044
3045
3046int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3047 u16 adv_data_len, u8 *adv_data,
3048 u16 scan_rsp_len, u8 *scan_rsp_data)
3049{
3050 struct adv_info *adv_instance;
3051
3052 adv_instance = hci_find_adv_instance(hdev, instance);
3053
3054
3055 if (!adv_instance)
3056 return -ENOENT;
3057
3058 if (adv_data_len) {
3059 memset(adv_instance->adv_data, 0,
3060 sizeof(adv_instance->adv_data));
3061 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3062 adv_instance->adv_data_len = adv_data_len;
3063 }
3064
3065 if (scan_rsp_len) {
3066 memset(adv_instance->scan_rsp_data, 0,
3067 sizeof(adv_instance->scan_rsp_data));
3068 memcpy(adv_instance->scan_rsp_data,
3069 scan_rsp_data, scan_rsp_len);
3070 adv_instance->scan_rsp_len = scan_rsp_len;
3071 }
3072
3073 return 0;
3074}
3075
3076
3077void hci_adv_monitors_clear(struct hci_dev *hdev)
3078{
3079 struct adv_monitor *monitor;
3080 int handle;
3081
3082 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3083 hci_free_adv_monitor(hdev, monitor);
3084
3085 idr_destroy(&hdev->adv_monitors_idr);
3086}
3087
3088
3089
3090
3091void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3092{
3093 struct adv_pattern *pattern;
3094 struct adv_pattern *tmp;
3095
3096 if (!monitor)
3097 return;
3098
3099 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3100 list_del(&pattern->list);
3101 kfree(pattern);
3102 }
3103
3104 if (monitor->handle)
3105 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3106
3107 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3108 hdev->adv_monitors_cnt--;
3109 mgmt_adv_monitor_removed(hdev, monitor->handle);
3110 }
3111
3112 kfree(monitor);
3113}
3114
3115int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3116{
3117 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3118}
3119
3120int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3121{
3122 return mgmt_remove_adv_monitor_complete(hdev, status);
3123}
3124
3125
3126
3127
3128
3129
3130bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3131 int *err)
3132{
3133 int min, max, handle;
3134
3135 *err = 0;
3136
3137 if (!monitor) {
3138 *err = -EINVAL;
3139 return false;
3140 }
3141
3142 min = HCI_MIN_ADV_MONITOR_HANDLE;
3143 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3144 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3145 GFP_KERNEL);
3146 if (handle < 0) {
3147 *err = handle;
3148 return false;
3149 }
3150
3151 monitor->handle = handle;
3152
3153 if (!hdev_is_powered(hdev))
3154 return false;
3155
3156 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3157 case HCI_ADV_MONITOR_EXT_NONE:
3158 hci_update_background_scan(hdev);
3159 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3160
3161 return false;
3162 case HCI_ADV_MONITOR_EXT_MSFT:
3163 *err = msft_add_monitor_pattern(hdev, monitor);
3164 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3165 *err);
3166 break;
3167 }
3168
3169 return (*err == 0);
3170}
3171
3172
3173
3174
3175
3176
3177static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3178 struct adv_monitor *monitor,
3179 u16 handle, int *err)
3180{
3181 *err = 0;
3182
3183 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3184 case HCI_ADV_MONITOR_EXT_NONE:
3185 goto free_monitor;
3186 case HCI_ADV_MONITOR_EXT_MSFT:
3187 *err = msft_remove_monitor(hdev, monitor, handle);
3188 break;
3189 }
3190
3191
3192 if (*err == -ENOENT)
3193 goto free_monitor;
3194
3195 return (*err == 0);
3196
3197free_monitor:
3198 if (*err == -ENOENT)
3199 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3200 monitor->handle);
3201 hci_free_adv_monitor(hdev, monitor);
3202
3203 *err = 0;
3204 return false;
3205}
3206
3207
3208
3209
3210bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3211{
3212 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3213 bool pending;
3214
3215 if (!monitor) {
3216 *err = -EINVAL;
3217 return false;
3218 }
3219
3220 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3221 if (!*err && !pending)
3222 hci_update_background_scan(hdev);
3223
3224 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3225 hdev->name, handle, *err, pending ? "" : "not ");
3226
3227 return pending;
3228}
3229
3230
3231
3232
3233bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3234{
3235 struct adv_monitor *monitor;
3236 int idr_next_id = 0;
3237 bool pending = false;
3238 bool update = false;
3239
3240 *err = 0;
3241
3242 while (!*err && !pending) {
3243 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3244 if (!monitor)
3245 break;
3246
3247 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3248
3249 if (!*err && !pending)
3250 update = true;
3251 }
3252
3253 if (update)
3254 hci_update_background_scan(hdev);
3255
3256 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3257 hdev->name, *err, pending ? "" : "not ");
3258
3259 return pending;
3260}
3261
3262
3263bool hci_is_adv_monitoring(struct hci_dev *hdev)
3264{
3265 return !idr_is_empty(&hdev->adv_monitors_idr);
3266}
3267
3268int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3269{
3270 if (msft_monitor_supported(hdev))
3271 return HCI_ADV_MONITOR_EXT_MSFT;
3272
3273 return HCI_ADV_MONITOR_EXT_NONE;
3274}
3275
3276struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3277 bdaddr_t *bdaddr, u8 type)
3278{
3279 struct bdaddr_list *b;
3280
3281 list_for_each_entry(b, bdaddr_list, list) {
3282 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3283 return b;
3284 }
3285
3286 return NULL;
3287}
3288
3289struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3290 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3291 u8 type)
3292{
3293 struct bdaddr_list_with_irk *b;
3294
3295 list_for_each_entry(b, bdaddr_list, list) {
3296 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3297 return b;
3298 }
3299
3300 return NULL;
3301}
3302
3303struct bdaddr_list_with_flags *
3304hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3305 bdaddr_t *bdaddr, u8 type)
3306{
3307 struct bdaddr_list_with_flags *b;
3308
3309 list_for_each_entry(b, bdaddr_list, list) {
3310 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3311 return b;
3312 }
3313
3314 return NULL;
3315}
3316
3317void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3318{
3319 struct bdaddr_list *b, *n;
3320
3321 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3322 list_del(&b->list);
3323 kfree(b);
3324 }
3325}
3326
3327int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3328{
3329 struct bdaddr_list *entry;
3330
3331 if (!bacmp(bdaddr, BDADDR_ANY))
3332 return -EBADF;
3333
3334 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3335 return -EEXIST;
3336
3337 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3338 if (!entry)
3339 return -ENOMEM;
3340
3341 bacpy(&entry->bdaddr, bdaddr);
3342 entry->bdaddr_type = type;
3343
3344 list_add(&entry->list, list);
3345
3346 return 0;
3347}
3348
3349int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3350 u8 type, u8 *peer_irk, u8 *local_irk)
3351{
3352 struct bdaddr_list_with_irk *entry;
3353
3354 if (!bacmp(bdaddr, BDADDR_ANY))
3355 return -EBADF;
3356
3357 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3358 return -EEXIST;
3359
3360 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3361 if (!entry)
3362 return -ENOMEM;
3363
3364 bacpy(&entry->bdaddr, bdaddr);
3365 entry->bdaddr_type = type;
3366
3367 if (peer_irk)
3368 memcpy(entry->peer_irk, peer_irk, 16);
3369
3370 if (local_irk)
3371 memcpy(entry->local_irk, local_irk, 16);
3372
3373 list_add(&entry->list, list);
3374
3375 return 0;
3376}
3377
3378int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3379 u8 type, u32 flags)
3380{
3381 struct bdaddr_list_with_flags *entry;
3382
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3384 return -EBADF;
3385
3386 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3387 return -EEXIST;
3388
3389 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3390 if (!entry)
3391 return -ENOMEM;
3392
3393 bacpy(&entry->bdaddr, bdaddr);
3394 entry->bdaddr_type = type;
3395 entry->current_flags = flags;
3396
3397 list_add(&entry->list, list);
3398
3399 return 0;
3400}
3401
3402int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3403{
3404 struct bdaddr_list *entry;
3405
3406 if (!bacmp(bdaddr, BDADDR_ANY)) {
3407 hci_bdaddr_list_clear(list);
3408 return 0;
3409 }
3410
3411 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3412 if (!entry)
3413 return -ENOENT;
3414
3415 list_del(&entry->list);
3416 kfree(entry);
3417
3418 return 0;
3419}
3420
3421int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3422 u8 type)
3423{
3424 struct bdaddr_list_with_irk *entry;
3425
3426 if (!bacmp(bdaddr, BDADDR_ANY)) {
3427 hci_bdaddr_list_clear(list);
3428 return 0;
3429 }
3430
3431 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3432 if (!entry)
3433 return -ENOENT;
3434
3435 list_del(&entry->list);
3436 kfree(entry);
3437
3438 return 0;
3439}
3440
3441int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3442 u8 type)
3443{
3444 struct bdaddr_list_with_flags *entry;
3445
3446 if (!bacmp(bdaddr, BDADDR_ANY)) {
3447 hci_bdaddr_list_clear(list);
3448 return 0;
3449 }
3450
3451 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3452 if (!entry)
3453 return -ENOENT;
3454
3455 list_del(&entry->list);
3456 kfree(entry);
3457
3458 return 0;
3459}
3460
3461
3462struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3463 bdaddr_t *addr, u8 addr_type)
3464{
3465 struct hci_conn_params *params;
3466
3467 list_for_each_entry(params, &hdev->le_conn_params, list) {
3468 if (bacmp(¶ms->addr, addr) == 0 &&
3469 params->addr_type == addr_type) {
3470 return params;
3471 }
3472 }
3473
3474 return NULL;
3475}
3476
3477
3478struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3479 bdaddr_t *addr, u8 addr_type)
3480{
3481 struct hci_conn_params *param;
3482
3483 switch (addr_type) {
3484 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3485 addr_type = ADDR_LE_DEV_PUBLIC;
3486 break;
3487 case ADDR_LE_DEV_RANDOM_RESOLVED:
3488 addr_type = ADDR_LE_DEV_RANDOM;
3489 break;
3490 }
3491
3492 list_for_each_entry(param, list, action) {
3493 if (bacmp(¶m->addr, addr) == 0 &&
3494 param->addr_type == addr_type)
3495 return param;
3496 }
3497
3498 return NULL;
3499}
3500
3501
3502struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3503 bdaddr_t *addr, u8 addr_type)
3504{
3505 struct hci_conn_params *params;
3506
3507 params = hci_conn_params_lookup(hdev, addr, addr_type);
3508 if (params)
3509 return params;
3510
3511 params = kzalloc(sizeof(*params), GFP_KERNEL);
3512 if (!params) {
3513 bt_dev_err(hdev, "out of memory");
3514 return NULL;
3515 }
3516
3517 bacpy(¶ms->addr, addr);
3518 params->addr_type = addr_type;
3519
3520 list_add(¶ms->list, &hdev->le_conn_params);
3521 INIT_LIST_HEAD(¶ms->action);
3522
3523 params->conn_min_interval = hdev->le_conn_min_interval;
3524 params->conn_max_interval = hdev->le_conn_max_interval;
3525 params->conn_latency = hdev->le_conn_latency;
3526 params->supervision_timeout = hdev->le_supv_timeout;
3527 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3528
3529 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3530
3531 return params;
3532}
3533
3534static void hci_conn_params_free(struct hci_conn_params *params)
3535{
3536 if (params->conn) {
3537 hci_conn_drop(params->conn);
3538 hci_conn_put(params->conn);
3539 }
3540
3541 list_del(¶ms->action);
3542 list_del(¶ms->list);
3543 kfree(params);
3544}
3545
3546
3547void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3548{
3549 struct hci_conn_params *params;
3550
3551 params = hci_conn_params_lookup(hdev, addr, addr_type);
3552 if (!params)
3553 return;
3554
3555 hci_conn_params_free(params);
3556
3557 hci_update_background_scan(hdev);
3558
3559 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3560}
3561
3562
3563void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3564{
3565 struct hci_conn_params *params, *tmp;
3566
3567 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3568 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3569 continue;
3570
3571
3572
3573
3574 if (params->explicit_connect) {
3575 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3576 continue;
3577 }
3578
3579 list_del(¶ms->list);
3580 kfree(params);
3581 }
3582
3583 BT_DBG("All LE disabled connection parameters were removed");
3584}
3585
3586
3587static void hci_conn_params_clear_all(struct hci_dev *hdev)
3588{
3589 struct hci_conn_params *params, *tmp;
3590
3591 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3592 hci_conn_params_free(params);
3593
3594 BT_DBG("All LE connection parameters were removed");
3595}
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3611 u8 *bdaddr_type)
3612{
3613 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3614 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3615 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3616 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3617 bacpy(bdaddr, &hdev->static_addr);
3618 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3619 } else {
3620 bacpy(bdaddr, &hdev->bdaddr);
3621 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3622 }
3623}
3624
3625static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3626{
3627 int i;
3628
3629 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3630 clear_bit(i, hdev->suspend_tasks);
3631
3632 wake_up(&hdev->suspend_wait_q);
3633}
3634
3635static int hci_suspend_wait_event(struct hci_dev *hdev)
3636{
3637#define WAKE_COND \
3638 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3639 __SUSPEND_NUM_TASKS)
3640
3641 int i;
3642 int ret = wait_event_timeout(hdev->suspend_wait_q,
3643 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3644
3645 if (ret == 0) {
3646 bt_dev_err(hdev, "Timed out waiting for suspend events");
3647 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3648 if (test_bit(i, hdev->suspend_tasks))
3649 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3650 clear_bit(i, hdev->suspend_tasks);
3651 }
3652
3653 ret = -ETIMEDOUT;
3654 } else {
3655 ret = 0;
3656 }
3657
3658 return ret;
3659}
3660
3661static void hci_prepare_suspend(struct work_struct *work)
3662{
3663 struct hci_dev *hdev =
3664 container_of(work, struct hci_dev, suspend_prepare);
3665
3666 hci_dev_lock(hdev);
3667 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3668 hci_dev_unlock(hdev);
3669}
3670
3671static int hci_change_suspend_state(struct hci_dev *hdev,
3672 enum suspended_state next)
3673{
3674 hdev->suspend_state_next = next;
3675 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3676 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3677 return hci_suspend_wait_event(hdev);
3678}
3679
3680static void hci_clear_wake_reason(struct hci_dev *hdev)
3681{
3682 hci_dev_lock(hdev);
3683
3684 hdev->wake_reason = 0;
3685 bacpy(&hdev->wake_addr, BDADDR_ANY);
3686 hdev->wake_addr_type = 0;
3687
3688 hci_dev_unlock(hdev);
3689}
3690
3691static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3692 void *data)
3693{
3694 struct hci_dev *hdev =
3695 container_of(nb, struct hci_dev, suspend_notifier);
3696 int ret = 0;
3697 u8 state = BT_RUNNING;
3698
3699
3700 if (mgmt_powering_down(hdev)) {
3701 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3702 ret = hci_suspend_wait_event(hdev);
3703 if (ret)
3704 goto done;
3705 }
3706
3707
3708 if (!hdev_is_powered(hdev) ||
3709 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3710 goto done;
3711
3712 if (action == PM_SUSPEND_PREPARE) {
3713
3714
3715
3716
3717
3718 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3719 if (!ret)
3720 state = BT_SUSPEND_DISCONNECT;
3721
3722
3723
3724
3725 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3726 ret = hci_change_suspend_state(hdev,
3727 BT_SUSPEND_CONFIGURE_WAKE);
3728 if (!ret)
3729 state = BT_SUSPEND_CONFIGURE_WAKE;
3730 }
3731
3732 hci_clear_wake_reason(hdev);
3733 mgmt_suspending(hdev, state);
3734
3735 } else if (action == PM_POST_SUSPEND) {
3736 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3737
3738 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3739 hdev->wake_addr_type);
3740 }
3741
3742done:
3743
3744
3745
3746 if (ret)
3747 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3748 action, ret);
3749
3750 return NOTIFY_DONE;
3751}
3752
3753
3754struct hci_dev *hci_alloc_dev(void)
3755{
3756 struct hci_dev *hdev;
3757
3758 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3759 if (!hdev)
3760 return NULL;
3761
3762 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3763 hdev->esco_type = (ESCO_HV1);
3764 hdev->link_mode = (HCI_LM_ACCEPT);
3765 hdev->num_iac = 0x01;
3766 hdev->io_capability = 0x03;
3767 hdev->manufacturer = 0xffff;
3768 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3769 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3770 hdev->adv_instance_cnt = 0;
3771 hdev->cur_adv_instance = 0x00;
3772 hdev->adv_instance_timeout = 0;
3773
3774 hdev->advmon_allowlist_duration = 300;
3775 hdev->advmon_no_filter_duration = 500;
3776 hdev->enable_advmon_interleave_scan = 0x00;
3777
3778 hdev->sniff_max_interval = 800;
3779 hdev->sniff_min_interval = 80;
3780
3781 hdev->le_adv_channel_map = 0x07;
3782 hdev->le_adv_min_interval = 0x0800;
3783 hdev->le_adv_max_interval = 0x0800;
3784 hdev->le_scan_interval = 0x0060;
3785 hdev->le_scan_window = 0x0030;
3786 hdev->le_scan_int_suspend = 0x0400;
3787 hdev->le_scan_window_suspend = 0x0012;
3788 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3789 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3790 hdev->le_scan_int_adv_monitor = 0x0060;
3791 hdev->le_scan_window_adv_monitor = 0x0030;
3792 hdev->le_scan_int_connect = 0x0060;
3793 hdev->le_scan_window_connect = 0x0060;
3794 hdev->le_conn_min_interval = 0x0018;
3795 hdev->le_conn_max_interval = 0x0028;
3796 hdev->le_conn_latency = 0x0000;
3797 hdev->le_supv_timeout = 0x002a;
3798 hdev->le_def_tx_len = 0x001b;
3799 hdev->le_def_tx_time = 0x0148;
3800 hdev->le_max_tx_len = 0x001b;
3801 hdev->le_max_tx_time = 0x0148;
3802 hdev->le_max_rx_len = 0x001b;
3803 hdev->le_max_rx_time = 0x0148;
3804 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3805 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3806 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3807 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3808 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3809 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3810 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3811 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3812 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3813
3814 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3815 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3816 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3817 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3818 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3819 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3820
3821
3822 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3823 hdev->def_page_scan_int = 0x0800;
3824 hdev->def_page_scan_window = 0x0012;
3825
3826 mutex_init(&hdev->lock);
3827 mutex_init(&hdev->req_lock);
3828
3829 INIT_LIST_HEAD(&hdev->mgmt_pending);
3830 INIT_LIST_HEAD(&hdev->reject_list);
3831 INIT_LIST_HEAD(&hdev->accept_list);
3832 INIT_LIST_HEAD(&hdev->uuids);
3833 INIT_LIST_HEAD(&hdev->link_keys);
3834 INIT_LIST_HEAD(&hdev->long_term_keys);
3835 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3836 INIT_LIST_HEAD(&hdev->remote_oob_data);
3837 INIT_LIST_HEAD(&hdev->le_accept_list);
3838 INIT_LIST_HEAD(&hdev->le_resolv_list);
3839 INIT_LIST_HEAD(&hdev->le_conn_params);
3840 INIT_LIST_HEAD(&hdev->pend_le_conns);
3841 INIT_LIST_HEAD(&hdev->pend_le_reports);
3842 INIT_LIST_HEAD(&hdev->conn_hash.list);
3843 INIT_LIST_HEAD(&hdev->adv_instances);
3844 INIT_LIST_HEAD(&hdev->blocked_keys);
3845
3846 INIT_WORK(&hdev->rx_work, hci_rx_work);
3847 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3848 INIT_WORK(&hdev->tx_work, hci_tx_work);
3849 INIT_WORK(&hdev->power_on, hci_power_on);
3850 INIT_WORK(&hdev->error_reset, hci_error_reset);
3851 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3852
3853 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3854
3855 skb_queue_head_init(&hdev->rx_q);
3856 skb_queue_head_init(&hdev->cmd_q);
3857 skb_queue_head_init(&hdev->raw_q);
3858
3859 init_waitqueue_head(&hdev->req_wait_q);
3860 init_waitqueue_head(&hdev->suspend_wait_q);
3861
3862 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3863 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3864
3865 hci_request_setup(hdev);
3866
3867 hci_init_sysfs(hdev);
3868 discovery_init(hdev);
3869
3870 return hdev;
3871}
3872EXPORT_SYMBOL(hci_alloc_dev);
3873
3874
3875void hci_free_dev(struct hci_dev *hdev)
3876{
3877
3878 put_device(&hdev->dev);
3879}
3880EXPORT_SYMBOL(hci_free_dev);
3881
3882
3883int hci_register_dev(struct hci_dev *hdev)
3884{
3885 int id, error;
3886
3887 if (!hdev->open || !hdev->close || !hdev->send)
3888 return -EINVAL;
3889
3890
3891
3892
3893 switch (hdev->dev_type) {
3894 case HCI_PRIMARY:
3895 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3896 break;
3897 case HCI_AMP:
3898 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3899 break;
3900 default:
3901 return -EINVAL;
3902 }
3903
3904 if (id < 0)
3905 return id;
3906
3907 sprintf(hdev->name, "hci%d", id);
3908 hdev->id = id;
3909
3910 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3911
3912 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3913 if (!hdev->workqueue) {
3914 error = -ENOMEM;
3915 goto err;
3916 }
3917
3918 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3919 hdev->name);
3920 if (!hdev->req_workqueue) {
3921 destroy_workqueue(hdev->workqueue);
3922 error = -ENOMEM;
3923 goto err;
3924 }
3925
3926 if (!IS_ERR_OR_NULL(bt_debugfs))
3927 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3928
3929 dev_set_name(&hdev->dev, "%s", hdev->name);
3930
3931 error = device_add(&hdev->dev);
3932 if (error < 0)
3933 goto err_wqueue;
3934
3935 hci_leds_init(hdev);
3936
3937 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3938 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3939 hdev);
3940 if (hdev->rfkill) {
3941 if (rfkill_register(hdev->rfkill) < 0) {
3942 rfkill_destroy(hdev->rfkill);
3943 hdev->rfkill = NULL;
3944 }
3945 }
3946
3947 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3948 hci_dev_set_flag(hdev, HCI_RFKILLED);
3949
3950 hci_dev_set_flag(hdev, HCI_SETUP);
3951 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3952
3953 if (hdev->dev_type == HCI_PRIMARY) {
3954
3955
3956
3957 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3958 }
3959
3960 write_lock(&hci_dev_list_lock);
3961 list_add(&hdev->list, &hci_dev_list);
3962 write_unlock(&hci_dev_list_lock);
3963
3964
3965
3966
3967 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3968 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3969
3970 hci_sock_dev_event(hdev, HCI_DEV_REG);
3971 hci_dev_hold(hdev);
3972
3973 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3974 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3975 error = register_pm_notifier(&hdev->suspend_notifier);
3976 if (error)
3977 goto err_wqueue;
3978 }
3979
3980 queue_work(hdev->req_workqueue, &hdev->power_on);
3981
3982 idr_init(&hdev->adv_monitors_idr);
3983
3984 return id;
3985
3986err_wqueue:
3987 destroy_workqueue(hdev->workqueue);
3988 destroy_workqueue(hdev->req_workqueue);
3989err:
3990 ida_simple_remove(&hci_index_ida, hdev->id);
3991
3992 return error;
3993}
3994EXPORT_SYMBOL(hci_register_dev);
3995
3996
3997void hci_unregister_dev(struct hci_dev *hdev)
3998{
3999 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4000
4001 hci_dev_set_flag(hdev, HCI_UNREGISTER);
4002
4003 write_lock(&hci_dev_list_lock);
4004 list_del(&hdev->list);
4005 write_unlock(&hci_dev_list_lock);
4006
4007 cancel_work_sync(&hdev->power_on);
4008
4009 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4010 hci_suspend_clear_tasks(hdev);
4011 unregister_pm_notifier(&hdev->suspend_notifier);
4012 cancel_work_sync(&hdev->suspend_prepare);
4013 }
4014
4015 hci_dev_do_close(hdev);
4016
4017 if (!test_bit(HCI_INIT, &hdev->flags) &&
4018 !hci_dev_test_flag(hdev, HCI_SETUP) &&
4019 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4020 hci_dev_lock(hdev);
4021 mgmt_index_removed(hdev);
4022 hci_dev_unlock(hdev);
4023 }
4024
4025
4026
4027 BUG_ON(!list_empty(&hdev->mgmt_pending));
4028
4029 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4030
4031 if (hdev->rfkill) {
4032 rfkill_unregister(hdev->rfkill);
4033 rfkill_destroy(hdev->rfkill);
4034 }
4035
4036 device_del(&hdev->dev);
4037
4038 hci_dev_put(hdev);
4039}
4040EXPORT_SYMBOL(hci_unregister_dev);
4041
4042
4043void hci_cleanup_dev(struct hci_dev *hdev)
4044{
4045 debugfs_remove_recursive(hdev->debugfs);
4046 kfree_const(hdev->hw_info);
4047 kfree_const(hdev->fw_info);
4048
4049 destroy_workqueue(hdev->workqueue);
4050 destroy_workqueue(hdev->req_workqueue);
4051
4052 hci_dev_lock(hdev);
4053 hci_bdaddr_list_clear(&hdev->reject_list);
4054 hci_bdaddr_list_clear(&hdev->accept_list);
4055 hci_uuids_clear(hdev);
4056 hci_link_keys_clear(hdev);
4057 hci_smp_ltks_clear(hdev);
4058 hci_smp_irks_clear(hdev);
4059 hci_remote_oob_data_clear(hdev);
4060 hci_adv_instances_clear(hdev);
4061 hci_adv_monitors_clear(hdev);
4062 hci_bdaddr_list_clear(&hdev->le_accept_list);
4063 hci_bdaddr_list_clear(&hdev->le_resolv_list);
4064 hci_conn_params_clear_all(hdev);
4065 hci_discovery_filter_clear(hdev);
4066 hci_blocked_keys_clear(hdev);
4067 hci_dev_unlock(hdev);
4068
4069 ida_simple_remove(&hci_index_ida, hdev->id);
4070}
4071
4072
4073int hci_suspend_dev(struct hci_dev *hdev)
4074{
4075 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4076 return 0;
4077}
4078EXPORT_SYMBOL(hci_suspend_dev);
4079
4080
4081int hci_resume_dev(struct hci_dev *hdev)
4082{
4083 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4084 return 0;
4085}
4086EXPORT_SYMBOL(hci_resume_dev);
4087
4088
4089int hci_reset_dev(struct hci_dev *hdev)
4090{
4091 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4092 struct sk_buff *skb;
4093
4094 skb = bt_skb_alloc(3, GFP_ATOMIC);
4095 if (!skb)
4096 return -ENOMEM;
4097
4098 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4099 skb_put_data(skb, hw_err, 3);
4100
4101 bt_dev_err(hdev, "Injecting HCI hardware error event");
4102
4103
4104 return hci_recv_frame(hdev, skb);
4105}
4106EXPORT_SYMBOL(hci_reset_dev);
4107
4108
4109int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4110{
4111 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4112 && !test_bit(HCI_INIT, &hdev->flags))) {
4113 kfree_skb(skb);
4114 return -ENXIO;
4115 }
4116
4117 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4118 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4119 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4120 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4121 kfree_skb(skb);
4122 return -EINVAL;
4123 }
4124
4125
4126 bt_cb(skb)->incoming = 1;
4127
4128
4129 __net_timestamp(skb);
4130
4131 skb_queue_tail(&hdev->rx_q, skb);
4132 queue_work(hdev->workqueue, &hdev->rx_work);
4133
4134 return 0;
4135}
4136EXPORT_SYMBOL(hci_recv_frame);
4137
4138
4139int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4140{
4141
4142 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4143
4144
4145 __net_timestamp(skb);
4146
4147 skb_queue_tail(&hdev->rx_q, skb);
4148 queue_work(hdev->workqueue, &hdev->rx_work);
4149
4150 return 0;
4151}
4152EXPORT_SYMBOL(hci_recv_diag);
4153
4154void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4155{
4156 va_list vargs;
4157
4158 va_start(vargs, fmt);
4159 kfree_const(hdev->hw_info);
4160 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4161 va_end(vargs);
4162}
4163EXPORT_SYMBOL(hci_set_hw_info);
4164
4165void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4166{
4167 va_list vargs;
4168
4169 va_start(vargs, fmt);
4170 kfree_const(hdev->fw_info);
4171 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4172 va_end(vargs);
4173}
4174EXPORT_SYMBOL(hci_set_fw_info);
4175
4176
4177
4178int hci_register_cb(struct hci_cb *cb)
4179{
4180 BT_DBG("%p name %s", cb, cb->name);
4181
4182 mutex_lock(&hci_cb_list_lock);
4183 list_add_tail(&cb->list, &hci_cb_list);
4184 mutex_unlock(&hci_cb_list_lock);
4185
4186 return 0;
4187}
4188EXPORT_SYMBOL(hci_register_cb);
4189
4190int hci_unregister_cb(struct hci_cb *cb)
4191{
4192 BT_DBG("%p name %s", cb, cb->name);
4193
4194 mutex_lock(&hci_cb_list_lock);
4195 list_del(&cb->list);
4196 mutex_unlock(&hci_cb_list_lock);
4197
4198 return 0;
4199}
4200EXPORT_SYMBOL(hci_unregister_cb);
4201
4202static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4203{
4204 int err;
4205
4206 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4207 skb->len);
4208
4209
4210 __net_timestamp(skb);
4211
4212
4213 hci_send_to_monitor(hdev, skb);
4214
4215 if (atomic_read(&hdev->promisc)) {
4216
4217 hci_send_to_sock(hdev, skb);
4218 }
4219
4220
4221 skb_orphan(skb);
4222
4223 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4224 kfree_skb(skb);
4225 return;
4226 }
4227
4228 err = hdev->send(hdev, skb);
4229 if (err < 0) {
4230 bt_dev_err(hdev, "sending frame failed (%d)", err);
4231 kfree_skb(skb);
4232 }
4233}
4234
4235
4236int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4237 const void *param)
4238{
4239 struct sk_buff *skb;
4240
4241 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4242
4243 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4244 if (!skb) {
4245 bt_dev_err(hdev, "no memory for command");
4246 return -ENOMEM;
4247 }
4248
4249
4250
4251
4252 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4253
4254 skb_queue_tail(&hdev->cmd_q, skb);
4255 queue_work(hdev->workqueue, &hdev->cmd_work);
4256
4257 return 0;
4258}
4259
4260int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4261 const void *param)
4262{
4263 struct sk_buff *skb;
4264
4265 if (hci_opcode_ogf(opcode) != 0x3f) {
4266
4267
4268
4269
4270
4271
4272
4273
4274 bt_dev_err(hdev, "unresponded command not supported");
4275 return -EINVAL;
4276 }
4277
4278 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4279 if (!skb) {
4280 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4281 opcode);
4282 return -ENOMEM;
4283 }
4284
4285 hci_send_frame(hdev, skb);
4286
4287 return 0;
4288}
4289EXPORT_SYMBOL(__hci_cmd_send);
4290
4291
4292void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4293{
4294 struct hci_command_hdr *hdr;
4295
4296 if (!hdev->sent_cmd)
4297 return NULL;
4298
4299 hdr = (void *) hdev->sent_cmd->data;
4300
4301 if (hdr->opcode != cpu_to_le16(opcode))
4302 return NULL;
4303
4304 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4305
4306 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4307}
4308
4309
4310struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4311 const void *param, u32 timeout)
4312{
4313 struct sk_buff *skb;
4314
4315 if (!test_bit(HCI_UP, &hdev->flags))
4316 return ERR_PTR(-ENETDOWN);
4317
4318 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4319
4320 hci_req_sync_lock(hdev);
4321 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4322 hci_req_sync_unlock(hdev);
4323
4324 return skb;
4325}
4326EXPORT_SYMBOL(hci_cmd_sync);
4327
4328
4329static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4330{
4331 struct hci_acl_hdr *hdr;
4332 int len = skb->len;
4333
4334 skb_push(skb, HCI_ACL_HDR_SIZE);
4335 skb_reset_transport_header(skb);
4336 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4337 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4338 hdr->dlen = cpu_to_le16(len);
4339}
4340
4341static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4342 struct sk_buff *skb, __u16 flags)
4343{
4344 struct hci_conn *conn = chan->conn;
4345 struct hci_dev *hdev = conn->hdev;
4346 struct sk_buff *list;
4347
4348 skb->len = skb_headlen(skb);
4349 skb->data_len = 0;
4350
4351 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4352
4353 switch (hdev->dev_type) {
4354 case HCI_PRIMARY:
4355 hci_add_acl_hdr(skb, conn->handle, flags);
4356 break;
4357 case HCI_AMP:
4358 hci_add_acl_hdr(skb, chan->handle, flags);
4359 break;
4360 default:
4361 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4362 return;
4363 }
4364
4365 list = skb_shinfo(skb)->frag_list;
4366 if (!list) {
4367
4368 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4369
4370 skb_queue_tail(queue, skb);
4371 } else {
4372
4373 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4374
4375 skb_shinfo(skb)->frag_list = NULL;
4376
4377
4378
4379
4380
4381
4382 spin_lock_bh(&queue->lock);
4383
4384 __skb_queue_tail(queue, skb);
4385
4386 flags &= ~ACL_START;
4387 flags |= ACL_CONT;
4388 do {
4389 skb = list; list = list->next;
4390
4391 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4392 hci_add_acl_hdr(skb, conn->handle, flags);
4393
4394 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4395
4396 __skb_queue_tail(queue, skb);
4397 } while (list);
4398
4399 spin_unlock_bh(&queue->lock);
4400 }
4401}
4402
4403void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4404{
4405 struct hci_dev *hdev = chan->conn->hdev;
4406
4407 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4408
4409 hci_queue_acl(chan, &chan->data_q, skb, flags);
4410
4411 queue_work(hdev->workqueue, &hdev->tx_work);
4412}
4413
4414
4415void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4416{
4417 struct hci_dev *hdev = conn->hdev;
4418 struct hci_sco_hdr hdr;
4419
4420 BT_DBG("%s len %d", hdev->name, skb->len);
4421
4422 hdr.handle = cpu_to_le16(conn->handle);
4423 hdr.dlen = skb->len;
4424
4425 skb_push(skb, HCI_SCO_HDR_SIZE);
4426 skb_reset_transport_header(skb);
4427 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4428
4429 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4430
4431 skb_queue_tail(&conn->data_q, skb);
4432 queue_work(hdev->workqueue, &hdev->tx_work);
4433}
4434
4435
4436
4437
4438static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4439 int *quote)
4440{
4441 struct hci_conn_hash *h = &hdev->conn_hash;
4442 struct hci_conn *conn = NULL, *c;
4443 unsigned int num = 0, min = ~0;
4444
4445
4446
4447
4448 rcu_read_lock();
4449
4450 list_for_each_entry_rcu(c, &h->list, list) {
4451 if (c->type != type || skb_queue_empty(&c->data_q))
4452 continue;
4453
4454 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4455 continue;
4456
4457 num++;
4458
4459 if (c->sent < min) {
4460 min = c->sent;
4461 conn = c;
4462 }
4463
4464 if (hci_conn_num(hdev, type) == num)
4465 break;
4466 }
4467
4468 rcu_read_unlock();
4469
4470 if (conn) {
4471 int cnt, q;
4472
4473 switch (conn->type) {
4474 case ACL_LINK:
4475 cnt = hdev->acl_cnt;
4476 break;
4477 case SCO_LINK:
4478 case ESCO_LINK:
4479 cnt = hdev->sco_cnt;
4480 break;
4481 case LE_LINK:
4482 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4483 break;
4484 default:
4485 cnt = 0;
4486 bt_dev_err(hdev, "unknown link type %d", conn->type);
4487 }
4488
4489 q = cnt / num;
4490 *quote = q ? q : 1;
4491 } else
4492 *quote = 0;
4493
4494 BT_DBG("conn %p quote %d", conn, *quote);
4495 return conn;
4496}
4497
4498static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4499{
4500 struct hci_conn_hash *h = &hdev->conn_hash;
4501 struct hci_conn *c;
4502
4503 bt_dev_err(hdev, "link tx timeout");
4504
4505 rcu_read_lock();
4506
4507
4508 list_for_each_entry_rcu(c, &h->list, list) {
4509 if (c->type == type && c->sent) {
4510 bt_dev_err(hdev, "killing stalled connection %pMR",
4511 &c->dst);
4512 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4513 }
4514 }
4515
4516 rcu_read_unlock();
4517}
4518
4519static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4520 int *quote)
4521{
4522 struct hci_conn_hash *h = &hdev->conn_hash;
4523 struct hci_chan *chan = NULL;
4524 unsigned int num = 0, min = ~0, cur_prio = 0;
4525 struct hci_conn *conn;
4526 int cnt, q, conn_num = 0;
4527
4528 BT_DBG("%s", hdev->name);
4529
4530 rcu_read_lock();
4531
4532 list_for_each_entry_rcu(conn, &h->list, list) {
4533 struct hci_chan *tmp;
4534
4535 if (conn->type != type)
4536 continue;
4537
4538 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4539 continue;
4540
4541 conn_num++;
4542
4543 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4544 struct sk_buff *skb;
4545
4546 if (skb_queue_empty(&tmp->data_q))
4547 continue;
4548
4549 skb = skb_peek(&tmp->data_q);
4550 if (skb->priority < cur_prio)
4551 continue;
4552
4553 if (skb->priority > cur_prio) {
4554 num = 0;
4555 min = ~0;
4556 cur_prio = skb->priority;
4557 }
4558
4559 num++;
4560
4561 if (conn->sent < min) {
4562 min = conn->sent;
4563 chan = tmp;
4564 }
4565 }
4566
4567 if (hci_conn_num(hdev, type) == conn_num)
4568 break;
4569 }
4570
4571 rcu_read_unlock();
4572
4573 if (!chan)
4574 return NULL;
4575
4576 switch (chan->conn->type) {
4577 case ACL_LINK:
4578 cnt = hdev->acl_cnt;
4579 break;
4580 case AMP_LINK:
4581 cnt = hdev->block_cnt;
4582 break;
4583 case SCO_LINK:
4584 case ESCO_LINK:
4585 cnt = hdev->sco_cnt;
4586 break;
4587 case LE_LINK:
4588 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4589 break;
4590 default:
4591 cnt = 0;
4592 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4593 }
4594
4595 q = cnt / num;
4596 *quote = q ? q : 1;
4597 BT_DBG("chan %p quote %d", chan, *quote);
4598 return chan;
4599}
4600
4601static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4602{
4603 struct hci_conn_hash *h = &hdev->conn_hash;
4604 struct hci_conn *conn;
4605 int num = 0;
4606
4607 BT_DBG("%s", hdev->name);
4608
4609 rcu_read_lock();
4610
4611 list_for_each_entry_rcu(conn, &h->list, list) {
4612 struct hci_chan *chan;
4613
4614 if (conn->type != type)
4615 continue;
4616
4617 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4618 continue;
4619
4620 num++;
4621
4622 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4623 struct sk_buff *skb;
4624
4625 if (chan->sent) {
4626 chan->sent = 0;
4627 continue;
4628 }
4629
4630 if (skb_queue_empty(&chan->data_q))
4631 continue;
4632
4633 skb = skb_peek(&chan->data_q);
4634 if (skb->priority >= HCI_PRIO_MAX - 1)
4635 continue;
4636
4637 skb->priority = HCI_PRIO_MAX - 1;
4638
4639 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4640 skb->priority);
4641 }
4642
4643 if (hci_conn_num(hdev, type) == num)
4644 break;
4645 }
4646
4647 rcu_read_unlock();
4648
4649}
4650
4651static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4652{
4653
4654 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4655}
4656
4657static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4658{
4659 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4660
4661
4662 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4663 HCI_ACL_TX_TIMEOUT))
4664 hci_link_tx_to(hdev, ACL_LINK);
4665 }
4666}
4667
4668
4669static void hci_sched_sco(struct hci_dev *hdev)
4670{
4671 struct hci_conn *conn;
4672 struct sk_buff *skb;
4673 int quote;
4674
4675 BT_DBG("%s", hdev->name);
4676
4677 if (!hci_conn_num(hdev, SCO_LINK))
4678 return;
4679
4680 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4681 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4682 BT_DBG("skb %p len %d", skb, skb->len);
4683 hci_send_frame(hdev, skb);
4684
4685 conn->sent++;
4686 if (conn->sent == ~0)
4687 conn->sent = 0;
4688 }
4689 }
4690}
4691
4692static void hci_sched_esco(struct hci_dev *hdev)
4693{
4694 struct hci_conn *conn;
4695 struct sk_buff *skb;
4696 int quote;
4697
4698 BT_DBG("%s", hdev->name);
4699
4700 if (!hci_conn_num(hdev, ESCO_LINK))
4701 return;
4702
4703 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4704 "e))) {
4705 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4706 BT_DBG("skb %p len %d", skb, skb->len);
4707 hci_send_frame(hdev, skb);
4708
4709 conn->sent++;
4710 if (conn->sent == ~0)
4711 conn->sent = 0;
4712 }
4713 }
4714}
4715
4716static void hci_sched_acl_pkt(struct hci_dev *hdev)
4717{
4718 unsigned int cnt = hdev->acl_cnt;
4719 struct hci_chan *chan;
4720 struct sk_buff *skb;
4721 int quote;
4722
4723 __check_timeout(hdev, cnt);
4724
4725 while (hdev->acl_cnt &&
4726 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4727 u32 priority = (skb_peek(&chan->data_q))->priority;
4728 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4729 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4730 skb->len, skb->priority);
4731
4732
4733 if (skb->priority < priority)
4734 break;
4735
4736 skb = skb_dequeue(&chan->data_q);
4737
4738 hci_conn_enter_active_mode(chan->conn,
4739 bt_cb(skb)->force_active);
4740
4741 hci_send_frame(hdev, skb);
4742 hdev->acl_last_tx = jiffies;
4743
4744 hdev->acl_cnt--;
4745 chan->sent++;
4746 chan->conn->sent++;
4747
4748
4749 hci_sched_sco(hdev);
4750 hci_sched_esco(hdev);
4751 }
4752 }
4753
4754 if (cnt != hdev->acl_cnt)
4755 hci_prio_recalculate(hdev, ACL_LINK);
4756}
4757
4758static void hci_sched_acl_blk(struct hci_dev *hdev)
4759{
4760 unsigned int cnt = hdev->block_cnt;
4761 struct hci_chan *chan;
4762 struct sk_buff *skb;
4763 int quote;
4764 u8 type;
4765
4766 __check_timeout(hdev, cnt);
4767
4768 BT_DBG("%s", hdev->name);
4769
4770 if (hdev->dev_type == HCI_AMP)
4771 type = AMP_LINK;
4772 else
4773 type = ACL_LINK;
4774
4775 while (hdev->block_cnt > 0 &&
4776 (chan = hci_chan_sent(hdev, type, "e))) {
4777 u32 priority = (skb_peek(&chan->data_q))->priority;
4778 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4779 int blocks;
4780
4781 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4782 skb->len, skb->priority);
4783
4784
4785 if (skb->priority < priority)
4786 break;
4787
4788 skb = skb_dequeue(&chan->data_q);
4789
4790 blocks = __get_blocks(hdev, skb);
4791 if (blocks > hdev->block_cnt)
4792 return;
4793
4794 hci_conn_enter_active_mode(chan->conn,
4795 bt_cb(skb)->force_active);
4796
4797 hci_send_frame(hdev, skb);
4798 hdev->acl_last_tx = jiffies;
4799
4800 hdev->block_cnt -= blocks;
4801 quote -= blocks;
4802
4803 chan->sent += blocks;
4804 chan->conn->sent += blocks;
4805 }
4806 }
4807
4808 if (cnt != hdev->block_cnt)
4809 hci_prio_recalculate(hdev, type);
4810}
4811
4812static void hci_sched_acl(struct hci_dev *hdev)
4813{
4814 BT_DBG("%s", hdev->name);
4815
4816
4817 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4818 return;
4819
4820
4821 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4822 return;
4823
4824 switch (hdev->flow_ctl_mode) {
4825 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4826 hci_sched_acl_pkt(hdev);
4827 break;
4828
4829 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4830 hci_sched_acl_blk(hdev);
4831 break;
4832 }
4833}
4834
4835static void hci_sched_le(struct hci_dev *hdev)
4836{
4837 struct hci_chan *chan;
4838 struct sk_buff *skb;
4839 int quote, cnt, tmp;
4840
4841 BT_DBG("%s", hdev->name);
4842
4843 if (!hci_conn_num(hdev, LE_LINK))
4844 return;
4845
4846 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4847
4848 __check_timeout(hdev, cnt);
4849
4850 tmp = cnt;
4851 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4852 u32 priority = (skb_peek(&chan->data_q))->priority;
4853 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4854 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4855 skb->len, skb->priority);
4856
4857
4858 if (skb->priority < priority)
4859 break;
4860
4861 skb = skb_dequeue(&chan->data_q);
4862
4863 hci_send_frame(hdev, skb);
4864 hdev->le_last_tx = jiffies;
4865
4866 cnt--;
4867 chan->sent++;
4868 chan->conn->sent++;
4869
4870
4871 hci_sched_sco(hdev);
4872 hci_sched_esco(hdev);
4873 }
4874 }
4875
4876 if (hdev->le_pkts)
4877 hdev->le_cnt = cnt;
4878 else
4879 hdev->acl_cnt = cnt;
4880
4881 if (cnt != tmp)
4882 hci_prio_recalculate(hdev, LE_LINK);
4883}
4884
4885static void hci_tx_work(struct work_struct *work)
4886{
4887 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4888 struct sk_buff *skb;
4889
4890 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4891 hdev->sco_cnt, hdev->le_cnt);
4892
4893 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4894
4895 hci_sched_sco(hdev);
4896 hci_sched_esco(hdev);
4897 hci_sched_acl(hdev);
4898 hci_sched_le(hdev);
4899 }
4900
4901
4902 while ((skb = skb_dequeue(&hdev->raw_q)))
4903 hci_send_frame(hdev, skb);
4904}
4905
4906
4907
4908
4909static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4910{
4911 struct hci_acl_hdr *hdr = (void *) skb->data;
4912 struct hci_conn *conn;
4913 __u16 handle, flags;
4914
4915 skb_pull(skb, HCI_ACL_HDR_SIZE);
4916
4917 handle = __le16_to_cpu(hdr->handle);
4918 flags = hci_flags(handle);
4919 handle = hci_handle(handle);
4920
4921 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4922 handle, flags);
4923
4924 hdev->stat.acl_rx++;
4925
4926 hci_dev_lock(hdev);
4927 conn = hci_conn_hash_lookup_handle(hdev, handle);
4928 hci_dev_unlock(hdev);
4929
4930 if (conn) {
4931 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4932
4933
4934 l2cap_recv_acldata(conn, skb, flags);
4935 return;
4936 } else {
4937 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4938 handle);
4939 }
4940
4941 kfree_skb(skb);
4942}
4943
4944
4945static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4946{
4947 struct hci_sco_hdr *hdr = (void *) skb->data;
4948 struct hci_conn *conn;
4949 __u16 handle, flags;
4950
4951 skb_pull(skb, HCI_SCO_HDR_SIZE);
4952
4953 handle = __le16_to_cpu(hdr->handle);
4954 flags = hci_flags(handle);
4955 handle = hci_handle(handle);
4956
4957 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4958 handle, flags);
4959
4960 hdev->stat.sco_rx++;
4961
4962 hci_dev_lock(hdev);
4963 conn = hci_conn_hash_lookup_handle(hdev, handle);
4964 hci_dev_unlock(hdev);
4965
4966 if (conn) {
4967
4968 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4969 sco_recv_scodata(conn, skb);
4970 return;
4971 } else {
4972 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4973 handle);
4974 }
4975
4976 kfree_skb(skb);
4977}
4978
4979static bool hci_req_is_complete(struct hci_dev *hdev)
4980{
4981 struct sk_buff *skb;
4982
4983 skb = skb_peek(&hdev->cmd_q);
4984 if (!skb)
4985 return true;
4986
4987 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4988}
4989
4990static void hci_resend_last(struct hci_dev *hdev)
4991{
4992 struct hci_command_hdr *sent;
4993 struct sk_buff *skb;
4994 u16 opcode;
4995
4996 if (!hdev->sent_cmd)
4997 return;
4998
4999 sent = (void *) hdev->sent_cmd->data;
5000 opcode = __le16_to_cpu(sent->opcode);
5001 if (opcode == HCI_OP_RESET)
5002 return;
5003
5004 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5005 if (!skb)
5006 return;
5007
5008 skb_queue_head(&hdev->cmd_q, skb);
5009 queue_work(hdev->workqueue, &hdev->cmd_work);
5010}
5011
5012void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5013 hci_req_complete_t *req_complete,
5014 hci_req_complete_skb_t *req_complete_skb)
5015{
5016 struct sk_buff *skb;
5017 unsigned long flags;
5018
5019 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5020
5021
5022
5023
5024 if (!hci_sent_cmd_data(hdev, opcode)) {
5025
5026
5027
5028
5029
5030
5031 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5032 hci_resend_last(hdev);
5033
5034 return;
5035 }
5036
5037
5038 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5039
5040
5041
5042
5043 if (!status && !hci_req_is_complete(hdev))
5044 return;
5045
5046
5047
5048
5049
5050 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5051 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5052 return;
5053 }
5054
5055 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5056 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5057 return;
5058 }
5059
5060
5061 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5062 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5063 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5064 __skb_queue_head(&hdev->cmd_q, skb);
5065 break;
5066 }
5067
5068 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5069 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5070 else
5071 *req_complete = bt_cb(skb)->hci.req_complete;
5072 kfree_skb(skb);
5073 }
5074 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5075}
5076
5077static void hci_rx_work(struct work_struct *work)
5078{
5079 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5080 struct sk_buff *skb;
5081
5082 BT_DBG("%s", hdev->name);
5083
5084 while ((skb = skb_dequeue(&hdev->rx_q))) {
5085
5086 hci_send_to_monitor(hdev, skb);
5087
5088 if (atomic_read(&hdev->promisc)) {
5089
5090 hci_send_to_sock(hdev, skb);
5091 }
5092
5093
5094
5095
5096
5097
5098
5099 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5100 !test_bit(HCI_INIT, &hdev->flags)) {
5101 kfree_skb(skb);
5102 continue;
5103 }
5104
5105 if (test_bit(HCI_INIT, &hdev->flags)) {
5106
5107 switch (hci_skb_pkt_type(skb)) {
5108 case HCI_ACLDATA_PKT:
5109 case HCI_SCODATA_PKT:
5110 case HCI_ISODATA_PKT:
5111 kfree_skb(skb);
5112 continue;
5113 }
5114 }
5115
5116
5117 switch (hci_skb_pkt_type(skb)) {
5118 case HCI_EVENT_PKT:
5119 BT_DBG("%s Event packet", hdev->name);
5120 hci_event_packet(hdev, skb);
5121 break;
5122
5123 case HCI_ACLDATA_PKT:
5124 BT_DBG("%s ACL data packet", hdev->name);
5125 hci_acldata_packet(hdev, skb);
5126 break;
5127
5128 case HCI_SCODATA_PKT:
5129 BT_DBG("%s SCO data packet", hdev->name);
5130 hci_scodata_packet(hdev, skb);
5131 break;
5132
5133 default:
5134 kfree_skb(skb);
5135 break;
5136 }
5137 }
5138}
5139
5140static void hci_cmd_work(struct work_struct *work)
5141{
5142 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5143 struct sk_buff *skb;
5144
5145 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5146 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5147
5148
5149 if (atomic_read(&hdev->cmd_cnt)) {
5150 skb = skb_dequeue(&hdev->cmd_q);
5151 if (!skb)
5152 return;
5153
5154 kfree_skb(hdev->sent_cmd);
5155
5156 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5157 if (hdev->sent_cmd) {
5158 if (hci_req_status_pend(hdev))
5159 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5160 atomic_dec(&hdev->cmd_cnt);
5161 hci_send_frame(hdev, skb);
5162 if (test_bit(HCI_RESET, &hdev->flags))
5163 cancel_delayed_work(&hdev->cmd_timer);
5164 else
5165 schedule_delayed_work(&hdev->cmd_timer,
5166 HCI_CMD_TIMEOUT);
5167 } else {
5168 skb_queue_head(&hdev->cmd_q, skb);
5169 queue_work(hdev->workqueue, &hdev->cmd_work);
5170 }
5171 }
5172}
5173