1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "hci_request.h"
34#include "hci_debugfs.h"
35#include "a2mp.h"
36#include "amp.h"
37#include "smp.h"
38#include "msft.h"
39
40#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
42
43
44
45static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
46 u8 *new_status)
47{
48 __u8 status = *((__u8 *) skb->data);
49
50 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51
52
53
54
55
56
57
58
59 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
60 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
61 status = 0x00;
62 }
63
64 *new_status = status;
65
66 if (status)
67 return;
68
69 clear_bit(HCI_INQUIRY, &hdev->flags);
70 smp_mb__after_atomic();
71 wake_up_bit(&hdev->flags, HCI_INQUIRY);
72
73 hci_dev_lock(hdev);
74
75
76
77 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
78 hdev->le_scan_type != LE_SCAN_ACTIVE)
79 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
80 hci_dev_unlock(hdev);
81
82 hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86{
87 __u8 status = *((__u8 *) skb->data);
88
89 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90
91 if (status)
92 return;
93
94 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
95}
96
97static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98{
99 __u8 status = *((__u8 *) skb->data);
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
102
103 if (status)
104 return;
105
106 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107
108 hci_conn_check_pending(hdev);
109}
110
111static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
112 struct sk_buff *skb)
113{
114 BT_DBG("%s", hdev->name);
115}
116
117static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118{
119 struct hci_rp_role_discovery *rp = (void *) skb->data;
120 struct hci_conn *conn;
121
122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123
124 if (rp->status)
125 return;
126
127 hci_dev_lock(hdev);
128
129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 if (conn)
131 conn->role = rp->role;
132
133 hci_dev_unlock(hdev);
134}
135
136static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137{
138 struct hci_rp_read_link_policy *rp = (void *) skb->data;
139 struct hci_conn *conn;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 hci_dev_lock(hdev);
147
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = __le16_to_cpu(rp->policy);
151
152 hci_dev_unlock(hdev);
153}
154
155static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156{
157 struct hci_rp_write_link_policy *rp = (void *) skb->data;
158 struct hci_conn *conn;
159 void *sent;
160
161 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162
163 if (rp->status)
164 return;
165
166 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
167 if (!sent)
168 return;
169
170 hci_dev_lock(hdev);
171
172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 if (conn)
174 conn->link_policy = get_unaligned_le16(sent + 2);
175
176 hci_dev_unlock(hdev);
177}
178
179static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
180 struct sk_buff *skb)
181{
182 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183
184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
185
186 if (rp->status)
187 return;
188
189 hdev->link_policy = __le16_to_cpu(rp->policy);
190}
191
192static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
193 struct sk_buff *skb)
194{
195 __u8 status = *((__u8 *) skb->data);
196 void *sent;
197
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
199
200 if (status)
201 return;
202
203 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
204 if (!sent)
205 return;
206
207 hdev->link_policy = get_unaligned_le16(sent);
208}
209
210static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211{
212 __u8 status = *((__u8 *) skb->data);
213
214 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215
216 clear_bit(HCI_RESET, &hdev->flags);
217
218 if (status)
219 return;
220
221
222 hci_dev_clear_volatile_flags(hdev);
223
224 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225
226 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228
229 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230 hdev->adv_data_len = 0;
231
232 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233 hdev->scan_rsp_data_len = 0;
234
235 hdev->le_scan_type = LE_SCAN_PASSIVE;
236
237 hdev->ssp_debug_mode = 0;
238
239 hci_bdaddr_list_clear(&hdev->le_accept_list);
240 hci_bdaddr_list_clear(&hdev->le_resolv_list);
241}
242
243static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 struct sk_buff *skb)
245{
246 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247 struct hci_cp_read_stored_link_key *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
252 if (!sent)
253 return;
254
255 if (!rp->status && sent->read_all == 0x01) {
256 hdev->stored_max_keys = rp->max_keys;
257 hdev->stored_num_keys = rp->num_keys;
258 }
259}
260
261static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 struct sk_buff *skb)
263{
264 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
267
268 if (rp->status)
269 return;
270
271 if (rp->num_keys <= hdev->stored_num_keys)
272 hdev->stored_num_keys -= rp->num_keys;
273 else
274 hdev->stored_num_keys = 0;
275}
276
277static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278{
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
281
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
285 if (!sent)
286 return;
287
288 hci_dev_lock(hdev);
289
290 if (hci_dev_test_flag(hdev, HCI_MGMT))
291 mgmt_set_local_name_complete(hdev, sent, status);
292 else if (!status)
293 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294
295 hci_dev_unlock(hdev);
296}
297
298static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299{
300 struct hci_rp_read_local_name *rp = (void *) skb->data;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
303
304 if (rp->status)
305 return;
306
307 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308 hci_dev_test_flag(hdev, HCI_CONFIG))
309 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310}
311
312static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313{
314 __u8 status = *((__u8 *) skb->data);
315 void *sent;
316
317 BT_DBG("%s status 0x%2.2x", hdev->name, status);
318
319 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
320 if (!sent)
321 return;
322
323 hci_dev_lock(hdev);
324
325 if (!status) {
326 __u8 param = *((__u8 *) sent);
327
328 if (param == AUTH_ENABLED)
329 set_bit(HCI_AUTH, &hdev->flags);
330 else
331 clear_bit(HCI_AUTH, &hdev->flags);
332 }
333
334 if (hci_dev_test_flag(hdev, HCI_MGMT))
335 mgmt_auth_enable_complete(hdev, status);
336
337 hci_dev_unlock(hdev);
338}
339
340static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341{
342 __u8 status = *((__u8 *) skb->data);
343 __u8 param;
344 void *sent;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348 if (status)
349 return;
350
351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
352 if (!sent)
353 return;
354
355 param = *((__u8 *) sent);
356
357 if (param)
358 set_bit(HCI_ENCRYPT, &hdev->flags);
359 else
360 clear_bit(HCI_ENCRYPT, &hdev->flags);
361}
362
363static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364{
365 __u8 status = *((__u8 *) skb->data);
366 __u8 param;
367 void *sent;
368
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
372 if (!sent)
373 return;
374
375 param = *((__u8 *) sent);
376
377 hci_dev_lock(hdev);
378
379 if (status) {
380 hdev->discov_timeout = 0;
381 goto done;
382 }
383
384 if (param & SCAN_INQUIRY)
385 set_bit(HCI_ISCAN, &hdev->flags);
386 else
387 clear_bit(HCI_ISCAN, &hdev->flags);
388
389 if (param & SCAN_PAGE)
390 set_bit(HCI_PSCAN, &hdev->flags);
391 else
392 clear_bit(HCI_PSCAN, &hdev->flags);
393
394done:
395 hci_dev_unlock(hdev);
396}
397
398static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
399{
400 __u8 status = *((__u8 *)skb->data);
401 struct hci_cp_set_event_filter *cp;
402 void *sent;
403
404 BT_DBG("%s status 0x%2.2x", hdev->name, status);
405
406 if (status)
407 return;
408
409 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
410 if (!sent)
411 return;
412
413 cp = (struct hci_cp_set_event_filter *)sent;
414
415 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
416 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
417 else
418 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
419}
420
421static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
422{
423 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
424
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426
427 if (rp->status)
428 return;
429
430 memcpy(hdev->dev_class, rp->dev_class, 3);
431
432 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
433 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
434}
435
436static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
437{
438 __u8 status = *((__u8 *) skb->data);
439 void *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
444 if (!sent)
445 return;
446
447 hci_dev_lock(hdev);
448
449 if (status == 0)
450 memcpy(hdev->dev_class, sent, 3);
451
452 if (hci_dev_test_flag(hdev, HCI_MGMT))
453 mgmt_set_class_of_dev_complete(hdev, sent, status);
454
455 hci_dev_unlock(hdev);
456}
457
458static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
459{
460 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
461 __u16 setting;
462
463 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
464
465 if (rp->status)
466 return;
467
468 setting = __le16_to_cpu(rp->voice_setting);
469
470 if (hdev->voice_setting == setting)
471 return;
472
473 hdev->voice_setting = setting;
474
475 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
476
477 if (hdev->notify)
478 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
479}
480
481static void hci_cc_write_voice_setting(struct hci_dev *hdev,
482 struct sk_buff *skb)
483{
484 __u8 status = *((__u8 *) skb->data);
485 __u16 setting;
486 void *sent;
487
488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
489
490 if (status)
491 return;
492
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
494 if (!sent)
495 return;
496
497 setting = get_unaligned_le16(sent);
498
499 if (hdev->voice_setting == setting)
500 return;
501
502 hdev->voice_setting = setting;
503
504 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
505
506 if (hdev->notify)
507 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
508}
509
510static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
511 struct sk_buff *skb)
512{
513 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
514
515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517 if (rp->status)
518 return;
519
520 hdev->num_iac = rp->num_iac;
521
522 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
523}
524
525static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
526{
527 __u8 status = *((__u8 *) skb->data);
528 struct hci_cp_write_ssp_mode *sent;
529
530 BT_DBG("%s status 0x%2.2x", hdev->name, status);
531
532 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
533 if (!sent)
534 return;
535
536 hci_dev_lock(hdev);
537
538 if (!status) {
539 if (sent->mode)
540 hdev->features[1][0] |= LMP_HOST_SSP;
541 else
542 hdev->features[1][0] &= ~LMP_HOST_SSP;
543 }
544
545 if (hci_dev_test_flag(hdev, HCI_MGMT))
546 mgmt_ssp_enable_complete(hdev, sent->mode, status);
547 else if (!status) {
548 if (sent->mode)
549 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
550 else
551 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
552 }
553
554 hci_dev_unlock(hdev);
555}
556
557static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
558{
559 u8 status = *((u8 *) skb->data);
560 struct hci_cp_write_sc_support *sent;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, status);
563
564 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
565 if (!sent)
566 return;
567
568 hci_dev_lock(hdev);
569
570 if (!status) {
571 if (sent->support)
572 hdev->features[1][0] |= LMP_HOST_SC;
573 else
574 hdev->features[1][0] &= ~LMP_HOST_SC;
575 }
576
577 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
578 if (sent->support)
579 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
580 else
581 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
582 }
583
584 hci_dev_unlock(hdev);
585}
586
587static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
588{
589 struct hci_rp_read_local_version *rp = (void *) skb->data;
590
591 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592
593 if (rp->status)
594 return;
595
596 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
597 hci_dev_test_flag(hdev, HCI_CONFIG)) {
598 hdev->hci_ver = rp->hci_ver;
599 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
600 hdev->lmp_ver = rp->lmp_ver;
601 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
602 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
603 }
604}
605
606static void hci_cc_read_local_commands(struct hci_dev *hdev,
607 struct sk_buff *skb)
608{
609 struct hci_rp_read_local_commands *rp = (void *) skb->data;
610
611 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
612
613 if (rp->status)
614 return;
615
616 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
617 hci_dev_test_flag(hdev, HCI_CONFIG))
618 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
619}
620
621static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
622 struct sk_buff *skb)
623{
624 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
625 struct hci_conn *conn;
626
627 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
628
629 if (rp->status)
630 return;
631
632 hci_dev_lock(hdev);
633
634 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
635 if (conn)
636 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
637
638 hci_dev_unlock(hdev);
639}
640
641static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
642 struct sk_buff *skb)
643{
644 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
645 struct hci_conn *conn;
646 void *sent;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
654 if (!sent)
655 return;
656
657 hci_dev_lock(hdev);
658
659 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
660 if (conn)
661 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
662
663 hci_dev_unlock(hdev);
664}
665
666static void hci_cc_read_local_features(struct hci_dev *hdev,
667 struct sk_buff *skb)
668{
669 struct hci_rp_read_local_features *rp = (void *) skb->data;
670
671 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
672
673 if (rp->status)
674 return;
675
676 memcpy(hdev->features, rp->features, 8);
677
678
679
680
681 if (hdev->features[0][0] & LMP_3SLOT)
682 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
683
684 if (hdev->features[0][0] & LMP_5SLOT)
685 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
686
687 if (hdev->features[0][1] & LMP_HV2) {
688 hdev->pkt_type |= (HCI_HV2);
689 hdev->esco_type |= (ESCO_HV2);
690 }
691
692 if (hdev->features[0][1] & LMP_HV3) {
693 hdev->pkt_type |= (HCI_HV3);
694 hdev->esco_type |= (ESCO_HV3);
695 }
696
697 if (lmp_esco_capable(hdev))
698 hdev->esco_type |= (ESCO_EV3);
699
700 if (hdev->features[0][4] & LMP_EV4)
701 hdev->esco_type |= (ESCO_EV4);
702
703 if (hdev->features[0][4] & LMP_EV5)
704 hdev->esco_type |= (ESCO_EV5);
705
706 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
707 hdev->esco_type |= (ESCO_2EV3);
708
709 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
710 hdev->esco_type |= (ESCO_3EV3);
711
712 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714}
715
716static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
717 struct sk_buff *skb)
718{
719 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
720
721 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
722
723 if (rp->status)
724 return;
725
726 if (hdev->max_page < rp->max_page)
727 hdev->max_page = rp->max_page;
728
729 if (rp->page < HCI_MAX_PAGES)
730 memcpy(hdev->features[rp->page], rp->features, 8);
731}
732
733static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
734 struct sk_buff *skb)
735{
736 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 hdev->flow_ctl_mode = rp->mode;
744}
745
746static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
747{
748 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (rp->status)
753 return;
754
755 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
756 hdev->sco_mtu = rp->sco_mtu;
757 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
758 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
759
760 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
761 hdev->sco_mtu = 64;
762 hdev->sco_pkts = 8;
763 }
764
765 hdev->acl_cnt = hdev->acl_pkts;
766 hdev->sco_cnt = hdev->sco_pkts;
767
768 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
769 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
770}
771
772static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
773{
774 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
775
776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
777
778 if (rp->status)
779 return;
780
781 if (test_bit(HCI_INIT, &hdev->flags))
782 bacpy(&hdev->bdaddr, &rp->bdaddr);
783
784 if (hci_dev_test_flag(hdev, HCI_SETUP))
785 bacpy(&hdev->setup_addr, &rp->bdaddr);
786}
787
788static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
789 struct sk_buff *skb)
790{
791 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
792
793 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794
795 if (rp->status)
796 return;
797
798 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
799 hci_dev_test_flag(hdev, HCI_CONFIG)) {
800 hdev->pairing_opts = rp->pairing_opts;
801 hdev->max_enc_key_size = rp->max_key_size;
802 }
803}
804
805static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
806 struct sk_buff *skb)
807{
808 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
809
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811
812 if (rp->status)
813 return;
814
815 if (test_bit(HCI_INIT, &hdev->flags)) {
816 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
817 hdev->page_scan_window = __le16_to_cpu(rp->window);
818 }
819}
820
821static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
822 struct sk_buff *skb)
823{
824 u8 status = *((u8 *) skb->data);
825 struct hci_cp_write_page_scan_activity *sent;
826
827 BT_DBG("%s status 0x%2.2x", hdev->name, status);
828
829 if (status)
830 return;
831
832 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
833 if (!sent)
834 return;
835
836 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
837 hdev->page_scan_window = __le16_to_cpu(sent->window);
838}
839
840static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
841 struct sk_buff *skb)
842{
843 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 if (test_bit(HCI_INIT, &hdev->flags))
851 hdev->page_scan_type = rp->type;
852}
853
854static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
855 struct sk_buff *skb)
856{
857 u8 status = *((u8 *) skb->data);
858 u8 *type;
859
860 BT_DBG("%s status 0x%2.2x", hdev->name, status);
861
862 if (status)
863 return;
864
865 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
866 if (type)
867 hdev->page_scan_type = *type;
868}
869
870static void hci_cc_read_data_block_size(struct hci_dev *hdev,
871 struct sk_buff *skb)
872{
873 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
874
875 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
876
877 if (rp->status)
878 return;
879
880 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
881 hdev->block_len = __le16_to_cpu(rp->block_len);
882 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
883
884 hdev->block_cnt = hdev->num_blocks;
885
886 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
887 hdev->block_cnt, hdev->block_len);
888}
889
890static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
891{
892 struct hci_rp_read_clock *rp = (void *) skb->data;
893 struct hci_cp_read_clock *cp;
894 struct hci_conn *conn;
895
896 BT_DBG("%s", hdev->name);
897
898 if (skb->len < sizeof(*rp))
899 return;
900
901 if (rp->status)
902 return;
903
904 hci_dev_lock(hdev);
905
906 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
907 if (!cp)
908 goto unlock;
909
910 if (cp->which == 0x00) {
911 hdev->clock = le32_to_cpu(rp->clock);
912 goto unlock;
913 }
914
915 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
916 if (conn) {
917 conn->clock = le32_to_cpu(rp->clock);
918 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
919 }
920
921unlock:
922 hci_dev_unlock(hdev);
923}
924
925static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
926 struct sk_buff *skb)
927{
928 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
929
930 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
931
932 if (rp->status)
933 return;
934
935 hdev->amp_status = rp->amp_status;
936 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
937 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
938 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
939 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
940 hdev->amp_type = rp->amp_type;
941 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
942 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
943 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
944 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
945}
946
947static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
948 struct sk_buff *skb)
949{
950 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
951
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
953
954 if (rp->status)
955 return;
956
957 hdev->inq_tx_power = rp->tx_power;
958}
959
960static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
961 struct sk_buff *skb)
962{
963 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
964
965 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
966
967 if (rp->status)
968 return;
969
970 hdev->err_data_reporting = rp->err_data_reporting;
971}
972
973static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
974 struct sk_buff *skb)
975{
976 __u8 status = *((__u8 *)skb->data);
977 struct hci_cp_write_def_err_data_reporting *cp;
978
979 BT_DBG("%s status 0x%2.2x", hdev->name, status);
980
981 if (status)
982 return;
983
984 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
985 if (!cp)
986 return;
987
988 hdev->err_data_reporting = cp->err_data_reporting;
989}
990
991static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
992{
993 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
994 struct hci_cp_pin_code_reply *cp;
995 struct hci_conn *conn;
996
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1003
1004 if (rp->status)
1005 goto unlock;
1006
1007 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1008 if (!cp)
1009 goto unlock;
1010
1011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1012 if (conn)
1013 conn->pin_length = cp->pin_len;
1014
1015unlock:
1016 hci_dev_unlock(hdev);
1017}
1018
1019static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1020{
1021 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1022
1023 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024
1025 hci_dev_lock(hdev);
1026
1027 if (hci_dev_test_flag(hdev, HCI_MGMT))
1028 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1029 rp->status);
1030
1031 hci_dev_unlock(hdev);
1032}
1033
1034static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1035 struct sk_buff *skb)
1036{
1037 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1038
1039 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1040
1041 if (rp->status)
1042 return;
1043
1044 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1045 hdev->le_pkts = rp->le_max_pkt;
1046
1047 hdev->le_cnt = hdev->le_pkts;
1048
1049 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1050}
1051
1052static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1053 struct sk_buff *skb)
1054{
1055 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1058
1059 if (rp->status)
1060 return;
1061
1062 memcpy(hdev->le_features, rp->features, 8);
1063}
1064
1065static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1066 struct sk_buff *skb)
1067{
1068 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1069
1070 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1071
1072 if (rp->status)
1073 return;
1074
1075 hdev->adv_tx_power = rp->tx_power;
1076}
1077
1078static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1079{
1080 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1081
1082 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1083
1084 hci_dev_lock(hdev);
1085
1086 if (hci_dev_test_flag(hdev, HCI_MGMT))
1087 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1088 rp->status);
1089
1090 hci_dev_unlock(hdev);
1091}
1092
1093static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1094 struct sk_buff *skb)
1095{
1096 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1097
1098 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1099
1100 hci_dev_lock(hdev);
1101
1102 if (hci_dev_test_flag(hdev, HCI_MGMT))
1103 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1104 ACL_LINK, 0, rp->status);
1105
1106 hci_dev_unlock(hdev);
1107}
1108
1109static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1110{
1111 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1112
1113 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1114
1115 hci_dev_lock(hdev);
1116
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1119 0, rp->status);
1120
1121 hci_dev_unlock(hdev);
1122}
1123
1124static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1125 struct sk_buff *skb)
1126{
1127 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1128
1129 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1130
1131 hci_dev_lock(hdev);
1132
1133 if (hci_dev_test_flag(hdev, HCI_MGMT))
1134 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1135 ACL_LINK, 0, rp->status);
1136
1137 hci_dev_unlock(hdev);
1138}
1139
1140static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1141 struct sk_buff *skb)
1142{
1143 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1144
1145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1146}
1147
1148static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1149 struct sk_buff *skb)
1150{
1151 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1152
1153 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1154}
1155
1156static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1157{
1158 __u8 status = *((__u8 *) skb->data);
1159 bdaddr_t *sent;
1160
1161 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1162
1163 if (status)
1164 return;
1165
1166 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1167 if (!sent)
1168 return;
1169
1170 hci_dev_lock(hdev);
1171
1172 bacpy(&hdev->random_addr, sent);
1173
1174 hci_dev_unlock(hdev);
1175}
1176
1177static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1178{
1179 __u8 status = *((__u8 *) skb->data);
1180 struct hci_cp_le_set_default_phy *cp;
1181
1182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183
1184 if (status)
1185 return;
1186
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1188 if (!cp)
1189 return;
1190
1191 hci_dev_lock(hdev);
1192
1193 hdev->le_tx_def_phys = cp->tx_phys;
1194 hdev->le_rx_def_phys = cp->rx_phys;
1195
1196 hci_dev_unlock(hdev);
1197}
1198
1199static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1200 struct sk_buff *skb)
1201{
1202 __u8 status = *((__u8 *) skb->data);
1203 struct hci_cp_le_set_adv_set_rand_addr *cp;
1204 struct adv_info *adv_instance;
1205
1206 if (status)
1207 return;
1208
1209 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1210 if (!cp)
1211 return;
1212
1213 hci_dev_lock(hdev);
1214
1215 if (!cp->handle) {
1216
1217 bacpy(&hdev->random_addr, &cp->bdaddr);
1218 } else {
1219 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1220 if (adv_instance)
1221 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1222 }
1223
1224 hci_dev_unlock(hdev);
1225}
1226
1227static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1228 struct sk_buff *skb)
1229{
1230 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1231
1232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1233
1234 if (rp->status)
1235 return;
1236
1237 hdev->min_le_tx_power = rp->min_le_tx_power;
1238 hdev->max_le_tx_power = rp->max_le_tx_power;
1239}
1240
1241static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1242{
1243 __u8 *sent, status = *((__u8 *) skb->data);
1244
1245 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1246
1247 if (status)
1248 return;
1249
1250 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1251 if (!sent)
1252 return;
1253
1254 hci_dev_lock(hdev);
1255
1256
1257
1258
1259 if (*sent) {
1260 struct hci_conn *conn;
1261
1262 hci_dev_set_flag(hdev, HCI_LE_ADV);
1263
1264 conn = hci_lookup_le_connect(hdev);
1265 if (conn)
1266 queue_delayed_work(hdev->workqueue,
1267 &conn->le_conn_timeout,
1268 conn->conn_timeout);
1269 } else {
1270 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1271 }
1272
1273 hci_dev_unlock(hdev);
1274}
1275
1276static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1277 struct sk_buff *skb)
1278{
1279 struct hci_cp_le_set_ext_adv_enable *cp;
1280 __u8 status = *((__u8 *) skb->data);
1281
1282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1283
1284 if (status)
1285 return;
1286
1287 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1288 if (!cp)
1289 return;
1290
1291 hci_dev_lock(hdev);
1292
1293 if (cp->enable) {
1294 struct hci_conn *conn;
1295
1296 hci_dev_set_flag(hdev, HCI_LE_ADV);
1297
1298 conn = hci_lookup_le_connect(hdev);
1299 if (conn)
1300 queue_delayed_work(hdev->workqueue,
1301 &conn->le_conn_timeout,
1302 conn->conn_timeout);
1303 } else {
1304 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1305 }
1306
1307 hci_dev_unlock(hdev);
1308}
1309
1310static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1311{
1312 struct hci_cp_le_set_scan_param *cp;
1313 __u8 status = *((__u8 *) skb->data);
1314
1315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1316
1317 if (status)
1318 return;
1319
1320 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1321 if (!cp)
1322 return;
1323
1324 hci_dev_lock(hdev);
1325
1326 hdev->le_scan_type = cp->type;
1327
1328 hci_dev_unlock(hdev);
1329}
1330
1331static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1332 struct sk_buff *skb)
1333{
1334 struct hci_cp_le_set_ext_scan_params *cp;
1335 __u8 status = *((__u8 *) skb->data);
1336 struct hci_cp_le_scan_phy_params *phy_param;
1337
1338 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1339
1340 if (status)
1341 return;
1342
1343 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1344 if (!cp)
1345 return;
1346
1347 phy_param = (void *)cp->data;
1348
1349 hci_dev_lock(hdev);
1350
1351 hdev->le_scan_type = phy_param->type;
1352
1353 hci_dev_unlock(hdev);
1354}
1355
1356static bool has_pending_adv_report(struct hci_dev *hdev)
1357{
1358 struct discovery_state *d = &hdev->discovery;
1359
1360 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1361}
1362
1363static void clear_pending_adv_report(struct hci_dev *hdev)
1364{
1365 struct discovery_state *d = &hdev->discovery;
1366
1367 bacpy(&d->last_adv_addr, BDADDR_ANY);
1368 d->last_adv_data_len = 0;
1369}
1370
1371static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1372 u8 bdaddr_type, s8 rssi, u32 flags,
1373 u8 *data, u8 len)
1374{
1375 struct discovery_state *d = &hdev->discovery;
1376
1377 if (len > HCI_MAX_AD_LENGTH)
1378 return;
1379
1380 bacpy(&d->last_adv_addr, bdaddr);
1381 d->last_adv_addr_type = bdaddr_type;
1382 d->last_adv_rssi = rssi;
1383 d->last_adv_flags = flags;
1384 memcpy(d->last_adv_data, data, len);
1385 d->last_adv_data_len = len;
1386}
1387
1388static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1389{
1390 hci_dev_lock(hdev);
1391
1392 switch (enable) {
1393 case LE_SCAN_ENABLE:
1394 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1395 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1396 clear_pending_adv_report(hdev);
1397 break;
1398
1399 case LE_SCAN_DISABLE:
1400
1401
1402
1403
1404 if (has_pending_adv_report(hdev)) {
1405 struct discovery_state *d = &hdev->discovery;
1406
1407 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1408 d->last_adv_addr_type, NULL,
1409 d->last_adv_rssi, d->last_adv_flags,
1410 d->last_adv_data,
1411 d->last_adv_data_len, NULL, 0);
1412 }
1413
1414
1415
1416
1417 cancel_delayed_work(&hdev->le_scan_disable);
1418
1419 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1420
1421
1422
1423
1424
1425
1426
1427
1428 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1430 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1431 hdev->discovery.state == DISCOVERY_FINDING)
1432 hci_req_reenable_advertising(hdev);
1433
1434 break;
1435
1436 default:
1437 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1438 enable);
1439 break;
1440 }
1441
1442 hci_dev_unlock(hdev);
1443}
1444
1445static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1446 struct sk_buff *skb)
1447{
1448 struct hci_cp_le_set_scan_enable *cp;
1449 __u8 status = *((__u8 *) skb->data);
1450
1451 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1452
1453 if (status)
1454 return;
1455
1456 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1457 if (!cp)
1458 return;
1459
1460 le_set_scan_enable_complete(hdev, cp->enable);
1461}
1462
1463static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1464 struct sk_buff *skb)
1465{
1466 struct hci_cp_le_set_ext_scan_enable *cp;
1467 __u8 status = *((__u8 *) skb->data);
1468
1469 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1470
1471 if (status)
1472 return;
1473
1474 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1475 if (!cp)
1476 return;
1477
1478 le_set_scan_enable_complete(hdev, cp->enable);
1479}
1480
1481static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1482 struct sk_buff *skb)
1483{
1484 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1485
1486 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1487 rp->num_of_sets);
1488
1489 if (rp->status)
1490 return;
1491
1492 hdev->le_num_of_adv_sets = rp->num_of_sets;
1493}
1494
1495static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1496 struct sk_buff *skb)
1497{
1498 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1499
1500 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1501
1502 if (rp->status)
1503 return;
1504
1505 hdev->le_accept_list_size = rp->size;
1506}
1507
1508static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1509 struct sk_buff *skb)
1510{
1511 __u8 status = *((__u8 *) skb->data);
1512
1513 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1514
1515 if (status)
1516 return;
1517
1518 hci_bdaddr_list_clear(&hdev->le_accept_list);
1519}
1520
1521static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1522 struct sk_buff *skb)
1523{
1524 struct hci_cp_le_add_to_accept_list *sent;
1525 __u8 status = *((__u8 *) skb->data);
1526
1527 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1528
1529 if (status)
1530 return;
1531
1532 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1533 if (!sent)
1534 return;
1535
1536 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1537 sent->bdaddr_type);
1538}
1539
1540static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1541 struct sk_buff *skb)
1542{
1543 struct hci_cp_le_del_from_accept_list *sent;
1544 __u8 status = *((__u8 *) skb->data);
1545
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1547
1548 if (status)
1549 return;
1550
1551 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1552 if (!sent)
1553 return;
1554
1555 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1556 sent->bdaddr_type);
1557}
1558
1559static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1560 struct sk_buff *skb)
1561{
1562 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1563
1564 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1565
1566 if (rp->status)
1567 return;
1568
1569 memcpy(hdev->le_states, rp->le_states, 8);
1570}
1571
1572static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1573 struct sk_buff *skb)
1574{
1575 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1576
1577 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1578
1579 if (rp->status)
1580 return;
1581
1582 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1583 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1584}
1585
1586static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1587 struct sk_buff *skb)
1588{
1589 struct hci_cp_le_write_def_data_len *sent;
1590 __u8 status = *((__u8 *) skb->data);
1591
1592 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593
1594 if (status)
1595 return;
1596
1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1598 if (!sent)
1599 return;
1600
1601 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1602 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1603}
1604
1605static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1606 struct sk_buff *skb)
1607{
1608 struct hci_cp_le_add_to_resolv_list *sent;
1609 __u8 status = *((__u8 *) skb->data);
1610
1611 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1612
1613 if (status)
1614 return;
1615
1616 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1617 if (!sent)
1618 return;
1619
1620 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1621 sent->bdaddr_type, sent->peer_irk,
1622 sent->local_irk);
1623}
1624
1625static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1626 struct sk_buff *skb)
1627{
1628 struct hci_cp_le_del_from_resolv_list *sent;
1629 __u8 status = *((__u8 *) skb->data);
1630
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1632
1633 if (status)
1634 return;
1635
1636 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1637 if (!sent)
1638 return;
1639
1640 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1641 sent->bdaddr_type);
1642}
1643
1644static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1645 struct sk_buff *skb)
1646{
1647 __u8 status = *((__u8 *) skb->data);
1648
1649 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1650
1651 if (status)
1652 return;
1653
1654 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1655}
1656
1657static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1658 struct sk_buff *skb)
1659{
1660 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1661
1662 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1663
1664 if (rp->status)
1665 return;
1666
1667 hdev->le_resolv_list_size = rp->size;
1668}
1669
1670static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1671 struct sk_buff *skb)
1672{
1673 __u8 *sent, status = *((__u8 *) skb->data);
1674
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1676
1677 if (status)
1678 return;
1679
1680 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1681 if (!sent)
1682 return;
1683
1684 hci_dev_lock(hdev);
1685
1686 if (*sent)
1687 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1688 else
1689 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1690
1691 hci_dev_unlock(hdev);
1692}
1693
1694static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1695 struct sk_buff *skb)
1696{
1697 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1698
1699 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1700
1701 if (rp->status)
1702 return;
1703
1704 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1705 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1706 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1707 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1708}
1709
1710static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1711 struct sk_buff *skb)
1712{
1713 struct hci_cp_write_le_host_supported *sent;
1714 __u8 status = *((__u8 *) skb->data);
1715
1716 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1717
1718 if (status)
1719 return;
1720
1721 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1722 if (!sent)
1723 return;
1724
1725 hci_dev_lock(hdev);
1726
1727 if (sent->le) {
1728 hdev->features[1][0] |= LMP_HOST_LE;
1729 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1730 } else {
1731 hdev->features[1][0] &= ~LMP_HOST_LE;
1732 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1733 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1734 }
1735
1736 if (sent->simul)
1737 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1738 else
1739 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1740
1741 hci_dev_unlock(hdev);
1742}
1743
1744static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1745{
1746 struct hci_cp_le_set_adv_param *cp;
1747 u8 status = *((u8 *) skb->data);
1748
1749 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1750
1751 if (status)
1752 return;
1753
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1755 if (!cp)
1756 return;
1757
1758 hci_dev_lock(hdev);
1759 hdev->adv_addr_type = cp->own_address_type;
1760 hci_dev_unlock(hdev);
1761}
1762
1763static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1764{
1765 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1766 struct hci_cp_le_set_ext_adv_params *cp;
1767 struct adv_info *adv_instance;
1768
1769 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1770
1771 if (rp->status)
1772 return;
1773
1774 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1775 if (!cp)
1776 return;
1777
1778 hci_dev_lock(hdev);
1779 hdev->adv_addr_type = cp->own_addr_type;
1780 if (!cp->handle) {
1781
1782 hdev->adv_tx_power = rp->tx_power;
1783 } else {
1784 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1785 if (adv_instance)
1786 adv_instance->tx_power = rp->tx_power;
1787 }
1788
1789 hci_req_update_adv_data(hdev, cp->handle);
1790
1791 hci_dev_unlock(hdev);
1792}
1793
1794static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1795{
1796 struct hci_rp_read_rssi *rp = (void *) skb->data;
1797 struct hci_conn *conn;
1798
1799 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1800
1801 if (rp->status)
1802 return;
1803
1804 hci_dev_lock(hdev);
1805
1806 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1807 if (conn)
1808 conn->rssi = rp->rssi;
1809
1810 hci_dev_unlock(hdev);
1811}
1812
1813static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1814{
1815 struct hci_cp_read_tx_power *sent;
1816 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1817 struct hci_conn *conn;
1818
1819 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1820
1821 if (rp->status)
1822 return;
1823
1824 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1825 if (!sent)
1826 return;
1827
1828 hci_dev_lock(hdev);
1829
1830 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1831 if (!conn)
1832 goto unlock;
1833
1834 switch (sent->type) {
1835 case 0x00:
1836 conn->tx_power = rp->tx_power;
1837 break;
1838 case 0x01:
1839 conn->max_tx_power = rp->tx_power;
1840 break;
1841 }
1842
1843unlock:
1844 hci_dev_unlock(hdev);
1845}
1846
1847static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1848{
1849 u8 status = *((u8 *) skb->data);
1850 u8 *mode;
1851
1852 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1853
1854 if (status)
1855 return;
1856
1857 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1858 if (mode)
1859 hdev->ssp_debug_mode = *mode;
1860}
1861
1862static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1863{
1864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865
1866 if (status) {
1867 hci_conn_check_pending(hdev);
1868 return;
1869 }
1870
1871 set_bit(HCI_INQUIRY, &hdev->flags);
1872}
1873
1874static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1875{
1876 struct hci_cp_create_conn *cp;
1877 struct hci_conn *conn;
1878
1879 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1880
1881 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1882 if (!cp)
1883 return;
1884
1885 hci_dev_lock(hdev);
1886
1887 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1888
1889 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1890
1891 if (status) {
1892 if (conn && conn->state == BT_CONNECT) {
1893 if (status != 0x0c || conn->attempt > 2) {
1894 conn->state = BT_CLOSED;
1895 hci_connect_cfm(conn, status);
1896 hci_conn_del(conn);
1897 } else
1898 conn->state = BT_CONNECT2;
1899 }
1900 } else {
1901 if (!conn) {
1902 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1903 HCI_ROLE_MASTER);
1904 if (!conn)
1905 bt_dev_err(hdev, "no memory for new connection");
1906 }
1907 }
1908
1909 hci_dev_unlock(hdev);
1910}
1911
1912static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1913{
1914 struct hci_cp_add_sco *cp;
1915 struct hci_conn *acl, *sco;
1916 __u16 handle;
1917
1918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1919
1920 if (!status)
1921 return;
1922
1923 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1924 if (!cp)
1925 return;
1926
1927 handle = __le16_to_cpu(cp->handle);
1928
1929 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1930
1931 hci_dev_lock(hdev);
1932
1933 acl = hci_conn_hash_lookup_handle(hdev, handle);
1934 if (acl) {
1935 sco = acl->link;
1936 if (sco) {
1937 sco->state = BT_CLOSED;
1938
1939 hci_connect_cfm(sco, status);
1940 hci_conn_del(sco);
1941 }
1942 }
1943
1944 hci_dev_unlock(hdev);
1945}
1946
1947static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1948{
1949 struct hci_cp_auth_requested *cp;
1950 struct hci_conn *conn;
1951
1952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954 if (!status)
1955 return;
1956
1957 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1958 if (!cp)
1959 return;
1960
1961 hci_dev_lock(hdev);
1962
1963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1964 if (conn) {
1965 if (conn->state == BT_CONFIG) {
1966 hci_connect_cfm(conn, status);
1967 hci_conn_drop(conn);
1968 }
1969 }
1970
1971 hci_dev_unlock(hdev);
1972}
1973
1974static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1975{
1976 struct hci_cp_set_conn_encrypt *cp;
1977 struct hci_conn *conn;
1978
1979 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1980
1981 if (!status)
1982 return;
1983
1984 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1985 if (!cp)
1986 return;
1987
1988 hci_dev_lock(hdev);
1989
1990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1991 if (conn) {
1992 if (conn->state == BT_CONFIG) {
1993 hci_connect_cfm(conn, status);
1994 hci_conn_drop(conn);
1995 }
1996 }
1997
1998 hci_dev_unlock(hdev);
1999}
2000
2001static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2002 struct hci_conn *conn)
2003{
2004 if (conn->state != BT_CONFIG || !conn->out)
2005 return 0;
2006
2007 if (conn->pending_sec_level == BT_SECURITY_SDP)
2008 return 0;
2009
2010
2011
2012
2013
2014 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2015 conn->pending_sec_level != BT_SECURITY_FIPS &&
2016 conn->pending_sec_level != BT_SECURITY_HIGH &&
2017 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2018 return 0;
2019
2020 return 1;
2021}
2022
2023static int hci_resolve_name(struct hci_dev *hdev,
2024 struct inquiry_entry *e)
2025{
2026 struct hci_cp_remote_name_req cp;
2027
2028 memset(&cp, 0, sizeof(cp));
2029
2030 bacpy(&cp.bdaddr, &e->data.bdaddr);
2031 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2032 cp.pscan_mode = e->data.pscan_mode;
2033 cp.clock_offset = e->data.clock_offset;
2034
2035 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2036}
2037
2038static bool hci_resolve_next_name(struct hci_dev *hdev)
2039{
2040 struct discovery_state *discov = &hdev->discovery;
2041 struct inquiry_entry *e;
2042
2043 if (list_empty(&discov->resolve))
2044 return false;
2045
2046 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2047 if (!e)
2048 return false;
2049
2050 if (hci_resolve_name(hdev, e) == 0) {
2051 e->name_state = NAME_PENDING;
2052 return true;
2053 }
2054
2055 return false;
2056}
2057
2058static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2059 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2060{
2061 struct discovery_state *discov = &hdev->discovery;
2062 struct inquiry_entry *e;
2063
2064
2065
2066
2067
2068
2069 if (conn &&
2070 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2071 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2072 mgmt_device_connected(hdev, conn, name, name_len);
2073
2074 if (discov->state == DISCOVERY_STOPPED)
2075 return;
2076
2077 if (discov->state == DISCOVERY_STOPPING)
2078 goto discov_complete;
2079
2080 if (discov->state != DISCOVERY_RESOLVING)
2081 return;
2082
2083 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2084
2085
2086
2087
2088 if (!e)
2089 return;
2090
2091 list_del(&e->list);
2092 if (name) {
2093 e->name_state = NAME_KNOWN;
2094 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2095 e->data.rssi, name, name_len);
2096 } else {
2097 e->name_state = NAME_NOT_KNOWN;
2098 }
2099
2100 if (hci_resolve_next_name(hdev))
2101 return;
2102
2103discov_complete:
2104 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2105}
2106
2107static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2108{
2109 struct hci_cp_remote_name_req *cp;
2110 struct hci_conn *conn;
2111
2112 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2113
2114
2115
2116 if (!status)
2117 return;
2118
2119 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2120 if (!cp)
2121 return;
2122
2123 hci_dev_lock(hdev);
2124
2125 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2126
2127 if (hci_dev_test_flag(hdev, HCI_MGMT))
2128 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2129
2130 if (!conn)
2131 goto unlock;
2132
2133 if (!hci_outgoing_auth_needed(hdev, conn))
2134 goto unlock;
2135
2136 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2137 struct hci_cp_auth_requested auth_cp;
2138
2139 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2140
2141 auth_cp.handle = __cpu_to_le16(conn->handle);
2142 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2143 sizeof(auth_cp), &auth_cp);
2144 }
2145
2146unlock:
2147 hci_dev_unlock(hdev);
2148}
2149
2150static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2151{
2152 struct hci_cp_read_remote_features *cp;
2153 struct hci_conn *conn;
2154
2155 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2156
2157 if (!status)
2158 return;
2159
2160 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2161 if (!cp)
2162 return;
2163
2164 hci_dev_lock(hdev);
2165
2166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2167 if (conn) {
2168 if (conn->state == BT_CONFIG) {
2169 hci_connect_cfm(conn, status);
2170 hci_conn_drop(conn);
2171 }
2172 }
2173
2174 hci_dev_unlock(hdev);
2175}
2176
2177static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2178{
2179 struct hci_cp_read_remote_ext_features *cp;
2180 struct hci_conn *conn;
2181
2182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2183
2184 if (!status)
2185 return;
2186
2187 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2188 if (!cp)
2189 return;
2190
2191 hci_dev_lock(hdev);
2192
2193 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2194 if (conn) {
2195 if (conn->state == BT_CONFIG) {
2196 hci_connect_cfm(conn, status);
2197 hci_conn_drop(conn);
2198 }
2199 }
2200
2201 hci_dev_unlock(hdev);
2202}
2203
2204static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2205{
2206 struct hci_cp_setup_sync_conn *cp;
2207 struct hci_conn *acl, *sco;
2208 __u16 handle;
2209
2210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2211
2212 if (!status)
2213 return;
2214
2215 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2216 if (!cp)
2217 return;
2218
2219 handle = __le16_to_cpu(cp->handle);
2220
2221 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2222
2223 hci_dev_lock(hdev);
2224
2225 acl = hci_conn_hash_lookup_handle(hdev, handle);
2226 if (acl) {
2227 sco = acl->link;
2228 if (sco) {
2229 sco->state = BT_CLOSED;
2230
2231 hci_connect_cfm(sco, status);
2232 hci_conn_del(sco);
2233 }
2234 }
2235
2236 hci_dev_unlock(hdev);
2237}
2238
2239static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2240{
2241 struct hci_cp_sniff_mode *cp;
2242 struct hci_conn *conn;
2243
2244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2245
2246 if (!status)
2247 return;
2248
2249 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2250 if (!cp)
2251 return;
2252
2253 hci_dev_lock(hdev);
2254
2255 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2256 if (conn) {
2257 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2258
2259 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2260 hci_sco_setup(conn, status);
2261 }
2262
2263 hci_dev_unlock(hdev);
2264}
2265
2266static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2267{
2268 struct hci_cp_exit_sniff_mode *cp;
2269 struct hci_conn *conn;
2270
2271 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2272
2273 if (!status)
2274 return;
2275
2276 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2277 if (!cp)
2278 return;
2279
2280 hci_dev_lock(hdev);
2281
2282 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2283 if (conn) {
2284 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2285
2286 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2287 hci_sco_setup(conn, status);
2288 }
2289
2290 hci_dev_unlock(hdev);
2291}
2292
2293static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2294{
2295 struct hci_cp_disconnect *cp;
2296 struct hci_conn *conn;
2297
2298 if (!status)
2299 return;
2300
2301 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2302 if (!cp)
2303 return;
2304
2305 hci_dev_lock(hdev);
2306
2307 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2308 if (conn) {
2309 u8 type = conn->type;
2310
2311 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2312 conn->dst_type, status);
2313
2314
2315
2316
2317
2318
2319 hci_conn_del(conn);
2320 if (type == LE_LINK)
2321 hci_req_reenable_advertising(hdev);
2322 }
2323
2324 hci_dev_unlock(hdev);
2325}
2326
2327static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2328 u8 peer_addr_type, u8 own_address_type,
2329 u8 filter_policy)
2330{
2331 struct hci_conn *conn;
2332
2333 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2334 peer_addr_type);
2335 if (!conn)
2336 return;
2337
2338
2339
2340
2341
2342 if (use_ll_privacy(hdev) &&
2343 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2344 switch (own_address_type) {
2345 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2346 own_address_type = ADDR_LE_DEV_PUBLIC;
2347 break;
2348 case ADDR_LE_DEV_RANDOM_RESOLVED:
2349 own_address_type = ADDR_LE_DEV_RANDOM;
2350 break;
2351 }
2352 }
2353
2354
2355
2356
2357
2358 conn->init_addr_type = own_address_type;
2359 if (own_address_type == ADDR_LE_DEV_RANDOM)
2360 bacpy(&conn->init_addr, &hdev->random_addr);
2361 else
2362 bacpy(&conn->init_addr, &hdev->bdaddr);
2363
2364 conn->resp_addr_type = peer_addr_type;
2365 bacpy(&conn->resp_addr, peer_addr);
2366
2367
2368
2369
2370
2371
2372 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2373 queue_delayed_work(conn->hdev->workqueue,
2374 &conn->le_conn_timeout,
2375 conn->conn_timeout);
2376}
2377
2378static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2379{
2380 struct hci_cp_le_create_conn *cp;
2381
2382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2383
2384
2385
2386
2387
2388 if (status)
2389 return;
2390
2391 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2392 if (!cp)
2393 return;
2394
2395 hci_dev_lock(hdev);
2396
2397 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2398 cp->own_address_type, cp->filter_policy);
2399
2400 hci_dev_unlock(hdev);
2401}
2402
2403static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2404{
2405 struct hci_cp_le_ext_create_conn *cp;
2406
2407 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2408
2409
2410
2411
2412
2413 if (status)
2414 return;
2415
2416 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2417 if (!cp)
2418 return;
2419
2420 hci_dev_lock(hdev);
2421
2422 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2423 cp->own_addr_type, cp->filter_policy);
2424
2425 hci_dev_unlock(hdev);
2426}
2427
2428static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2429{
2430 struct hci_cp_le_read_remote_features *cp;
2431 struct hci_conn *conn;
2432
2433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2434
2435 if (!status)
2436 return;
2437
2438 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2439 if (!cp)
2440 return;
2441
2442 hci_dev_lock(hdev);
2443
2444 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2445 if (conn) {
2446 if (conn->state == BT_CONFIG) {
2447 hci_connect_cfm(conn, status);
2448 hci_conn_drop(conn);
2449 }
2450 }
2451
2452 hci_dev_unlock(hdev);
2453}
2454
2455static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2456{
2457 struct hci_cp_le_start_enc *cp;
2458 struct hci_conn *conn;
2459
2460 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2461
2462 if (!status)
2463 return;
2464
2465 hci_dev_lock(hdev);
2466
2467 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2468 if (!cp)
2469 goto unlock;
2470
2471 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2472 if (!conn)
2473 goto unlock;
2474
2475 if (conn->state != BT_CONNECTED)
2476 goto unlock;
2477
2478 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2479 hci_conn_drop(conn);
2480
2481unlock:
2482 hci_dev_unlock(hdev);
2483}
2484
2485static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2486{
2487 struct hci_cp_switch_role *cp;
2488 struct hci_conn *conn;
2489
2490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2491
2492 if (!status)
2493 return;
2494
2495 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2496 if (!cp)
2497 return;
2498
2499 hci_dev_lock(hdev);
2500
2501 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2502 if (conn)
2503 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2504
2505 hci_dev_unlock(hdev);
2506}
2507
2508static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2509{
2510 __u8 status = *((__u8 *) skb->data);
2511 struct discovery_state *discov = &hdev->discovery;
2512 struct inquiry_entry *e;
2513
2514 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2515
2516 hci_conn_check_pending(hdev);
2517
2518 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2519 return;
2520
2521 smp_mb__after_atomic();
2522 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2523
2524 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2525 return;
2526
2527 hci_dev_lock(hdev);
2528
2529 if (discov->state != DISCOVERY_FINDING)
2530 goto unlock;
2531
2532 if (list_empty(&discov->resolve)) {
2533
2534
2535
2536
2537
2538
2539
2540 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2541 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2542 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2543 goto unlock;
2544 }
2545
2546 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2547 if (e && hci_resolve_name(hdev, e) == 0) {
2548 e->name_state = NAME_PENDING;
2549 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2550 } else {
2551
2552
2553
2554
2555
2556
2557
2558 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2559 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2560 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2561 }
2562
2563unlock:
2564 hci_dev_unlock(hdev);
2565}
2566
2567static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2568{
2569 struct inquiry_data data;
2570 struct inquiry_info *info = (void *) (skb->data + 1);
2571 int num_rsp = *((__u8 *) skb->data);
2572
2573 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2574
2575 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2576 return;
2577
2578 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2579 return;
2580
2581 hci_dev_lock(hdev);
2582
2583 for (; num_rsp; num_rsp--, info++) {
2584 u32 flags;
2585
2586 bacpy(&data.bdaddr, &info->bdaddr);
2587 data.pscan_rep_mode = info->pscan_rep_mode;
2588 data.pscan_period_mode = info->pscan_period_mode;
2589 data.pscan_mode = info->pscan_mode;
2590 memcpy(data.dev_class, info->dev_class, 3);
2591 data.clock_offset = info->clock_offset;
2592 data.rssi = HCI_RSSI_INVALID;
2593 data.ssp_mode = 0x00;
2594
2595 flags = hci_inquiry_cache_update(hdev, &data, false);
2596
2597 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2598 info->dev_class, HCI_RSSI_INVALID,
2599 flags, NULL, 0, NULL, 0);
2600 }
2601
2602 hci_dev_unlock(hdev);
2603}
2604
2605static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2606{
2607 struct hci_ev_conn_complete *ev = (void *) skb->data;
2608 struct hci_conn *conn;
2609
2610 BT_DBG("%s", hdev->name);
2611
2612 hci_dev_lock(hdev);
2613
2614 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2615 if (!conn) {
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625 if (ev->link_type == ACL_LINK &&
2626 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2627 &ev->bdaddr,
2628 BDADDR_BREDR)) {
2629 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2630 HCI_ROLE_SLAVE);
2631 if (!conn) {
2632 bt_dev_err(hdev, "no memory for new conn");
2633 goto unlock;
2634 }
2635 } else {
2636 if (ev->link_type != SCO_LINK)
2637 goto unlock;
2638
2639 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2640 &ev->bdaddr);
2641 if (!conn)
2642 goto unlock;
2643
2644 conn->type = SCO_LINK;
2645 }
2646 }
2647
2648 if (!ev->status) {
2649 conn->handle = __le16_to_cpu(ev->handle);
2650
2651 if (conn->type == ACL_LINK) {
2652 conn->state = BT_CONFIG;
2653 hci_conn_hold(conn);
2654
2655 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2656 !hci_find_link_key(hdev, &ev->bdaddr))
2657 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2658 else
2659 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2660 } else
2661 conn->state = BT_CONNECTED;
2662
2663 hci_debugfs_create_conn(conn);
2664 hci_conn_add_sysfs(conn);
2665
2666 if (test_bit(HCI_AUTH, &hdev->flags))
2667 set_bit(HCI_CONN_AUTH, &conn->flags);
2668
2669 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2670 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2671
2672
2673 if (conn->type == ACL_LINK) {
2674 struct hci_cp_read_remote_features cp;
2675 cp.handle = ev->handle;
2676 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2677 sizeof(cp), &cp);
2678
2679 hci_req_update_scan(hdev);
2680 }
2681
2682
2683 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2684 struct hci_cp_change_conn_ptype cp;
2685 cp.handle = ev->handle;
2686 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2687 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2688 &cp);
2689 }
2690 } else {
2691 conn->state = BT_CLOSED;
2692 if (conn->type == ACL_LINK)
2693 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2694 conn->dst_type, ev->status);
2695 }
2696
2697 if (conn->type == ACL_LINK)
2698 hci_sco_setup(conn, ev->status);
2699
2700 if (ev->status) {
2701 hci_connect_cfm(conn, ev->status);
2702 hci_conn_del(conn);
2703 } else if (ev->link_type == SCO_LINK) {
2704 switch (conn->setting & SCO_AIRMODE_MASK) {
2705 case SCO_AIRMODE_CVSD:
2706 if (hdev->notify)
2707 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2708 break;
2709 }
2710
2711 hci_connect_cfm(conn, ev->status);
2712 }
2713
2714unlock:
2715 hci_dev_unlock(hdev);
2716
2717 hci_conn_check_pending(hdev);
2718}
2719
2720static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2721{
2722 struct hci_cp_reject_conn_req cp;
2723
2724 bacpy(&cp.bdaddr, bdaddr);
2725 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2726 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2727}
2728
2729static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2730{
2731 struct hci_ev_conn_request *ev = (void *) skb->data;
2732 int mask = hdev->link_mode;
2733 struct inquiry_entry *ie;
2734 struct hci_conn *conn;
2735 __u8 flags = 0;
2736
2737 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2738 ev->link_type);
2739
2740 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2741 &flags);
2742
2743 if (!(mask & HCI_LM_ACCEPT)) {
2744 hci_reject_conn(hdev, &ev->bdaddr);
2745 return;
2746 }
2747
2748 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2749 BDADDR_BREDR)) {
2750 hci_reject_conn(hdev, &ev->bdaddr);
2751 return;
2752 }
2753
2754
2755
2756
2757
2758 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2759 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2760 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2761 BDADDR_BREDR)) {
2762 hci_reject_conn(hdev, &ev->bdaddr);
2763 return;
2764 }
2765
2766
2767
2768 hci_dev_lock(hdev);
2769
2770 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2771 if (ie)
2772 memcpy(ie->data.dev_class, ev->dev_class, 3);
2773
2774 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2775 &ev->bdaddr);
2776 if (!conn) {
2777 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2778 HCI_ROLE_SLAVE);
2779 if (!conn) {
2780 bt_dev_err(hdev, "no memory for new connection");
2781 hci_dev_unlock(hdev);
2782 return;
2783 }
2784 }
2785
2786 memcpy(conn->dev_class, ev->dev_class, 3);
2787
2788 hci_dev_unlock(hdev);
2789
2790 if (ev->link_type == ACL_LINK ||
2791 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2792 struct hci_cp_accept_conn_req cp;
2793 conn->state = BT_CONNECT;
2794
2795 bacpy(&cp.bdaddr, &ev->bdaddr);
2796
2797 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2798 cp.role = 0x00;
2799 else
2800 cp.role = 0x01;
2801
2802 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2803 } else if (!(flags & HCI_PROTO_DEFER)) {
2804 struct hci_cp_accept_sync_conn_req cp;
2805 conn->state = BT_CONNECT;
2806
2807 bacpy(&cp.bdaddr, &ev->bdaddr);
2808 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2809
2810 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2811 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2812 cp.max_latency = cpu_to_le16(0xffff);
2813 cp.content_format = cpu_to_le16(hdev->voice_setting);
2814 cp.retrans_effort = 0xff;
2815
2816 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2817 &cp);
2818 } else {
2819 conn->state = BT_CONNECT2;
2820 hci_connect_cfm(conn, 0);
2821 }
2822}
2823
2824static u8 hci_to_mgmt_reason(u8 err)
2825{
2826 switch (err) {
2827 case HCI_ERROR_CONNECTION_TIMEOUT:
2828 return MGMT_DEV_DISCONN_TIMEOUT;
2829 case HCI_ERROR_REMOTE_USER_TERM:
2830 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2831 case HCI_ERROR_REMOTE_POWER_OFF:
2832 return MGMT_DEV_DISCONN_REMOTE;
2833 case HCI_ERROR_LOCAL_HOST_TERM:
2834 return MGMT_DEV_DISCONN_LOCAL_HOST;
2835 default:
2836 return MGMT_DEV_DISCONN_UNKNOWN;
2837 }
2838}
2839
2840static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2841{
2842 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2843 u8 reason;
2844 struct hci_conn_params *params;
2845 struct hci_conn *conn;
2846 bool mgmt_connected;
2847 u8 type;
2848
2849 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2850
2851 hci_dev_lock(hdev);
2852
2853 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2854 if (!conn)
2855 goto unlock;
2856
2857 if (ev->status) {
2858 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2859 conn->dst_type, ev->status);
2860 goto unlock;
2861 }
2862
2863 conn->state = BT_CLOSED;
2864
2865 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2866
2867 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2868 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2869 else
2870 reason = hci_to_mgmt_reason(ev->reason);
2871
2872 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2873 reason, mgmt_connected);
2874
2875 if (conn->type == ACL_LINK) {
2876 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2877 hci_remove_link_key(hdev, &conn->dst);
2878
2879 hci_req_update_scan(hdev);
2880 }
2881
2882 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2883 if (params) {
2884 switch (params->auto_connect) {
2885 case HCI_AUTO_CONN_LINK_LOSS:
2886 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2887 break;
2888 fallthrough;
2889
2890 case HCI_AUTO_CONN_DIRECT:
2891 case HCI_AUTO_CONN_ALWAYS:
2892 list_del_init(¶ms->action);
2893 list_add(¶ms->action, &hdev->pend_le_conns);
2894 hci_update_background_scan(hdev);
2895 break;
2896
2897 default:
2898 break;
2899 }
2900 }
2901
2902 type = conn->type;
2903
2904 hci_disconn_cfm(conn, ev->reason);
2905 hci_conn_del(conn);
2906
2907
2908
2909
2910 if (list_empty(&hdev->conn_hash.list) &&
2911 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2912 wake_up(&hdev->suspend_wait_q);
2913 }
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925 if (type == LE_LINK)
2926 hci_req_reenable_advertising(hdev);
2927
2928unlock:
2929 hci_dev_unlock(hdev);
2930}
2931
2932static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2933{
2934 struct hci_ev_auth_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn;
2936
2937 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2938
2939 hci_dev_lock(hdev);
2940
2941 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2942 if (!conn)
2943 goto unlock;
2944
2945 if (!ev->status) {
2946 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2947
2948 if (!hci_conn_ssp_enabled(conn) &&
2949 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2950 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2951 } else {
2952 set_bit(HCI_CONN_AUTH, &conn->flags);
2953 conn->sec_level = conn->pending_sec_level;
2954 }
2955 } else {
2956 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2957 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2958
2959 mgmt_auth_failed(conn, ev->status);
2960 }
2961
2962 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2963 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2964
2965 if (conn->state == BT_CONFIG) {
2966 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2967 struct hci_cp_set_conn_encrypt cp;
2968 cp.handle = ev->handle;
2969 cp.encrypt = 0x01;
2970 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2971 &cp);
2972 } else {
2973 conn->state = BT_CONNECTED;
2974 hci_connect_cfm(conn, ev->status);
2975 hci_conn_drop(conn);
2976 }
2977 } else {
2978 hci_auth_cfm(conn, ev->status);
2979
2980 hci_conn_hold(conn);
2981 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2982 hci_conn_drop(conn);
2983 }
2984
2985 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2986 if (!ev->status) {
2987 struct hci_cp_set_conn_encrypt cp;
2988 cp.handle = ev->handle;
2989 cp.encrypt = 0x01;
2990 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2991 &cp);
2992 } else {
2993 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2994 hci_encrypt_cfm(conn, ev->status);
2995 }
2996 }
2997
2998unlock:
2999 hci_dev_unlock(hdev);
3000}
3001
3002static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3003{
3004 struct hci_ev_remote_name *ev = (void *) skb->data;
3005 struct hci_conn *conn;
3006
3007 BT_DBG("%s", hdev->name);
3008
3009 hci_conn_check_pending(hdev);
3010
3011 hci_dev_lock(hdev);
3012
3013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3014
3015 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3016 goto check_auth;
3017
3018 if (ev->status == 0)
3019 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3020 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3021 else
3022 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3023
3024check_auth:
3025 if (!conn)
3026 goto unlock;
3027
3028 if (!hci_outgoing_auth_needed(hdev, conn))
3029 goto unlock;
3030
3031 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3032 struct hci_cp_auth_requested cp;
3033
3034 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3035
3036 cp.handle = __cpu_to_le16(conn->handle);
3037 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3038 }
3039
3040unlock:
3041 hci_dev_unlock(hdev);
3042}
3043
3044static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3045 u16 opcode, struct sk_buff *skb)
3046{
3047 const struct hci_rp_read_enc_key_size *rp;
3048 struct hci_conn *conn;
3049 u16 handle;
3050
3051 BT_DBG("%s status 0x%02x", hdev->name, status);
3052
3053 if (!skb || skb->len < sizeof(*rp)) {
3054 bt_dev_err(hdev, "invalid read key size response");
3055 return;
3056 }
3057
3058 rp = (void *)skb->data;
3059 handle = le16_to_cpu(rp->handle);
3060
3061 hci_dev_lock(hdev);
3062
3063 conn = hci_conn_hash_lookup_handle(hdev, handle);
3064 if (!conn)
3065 goto unlock;
3066
3067
3068
3069
3070
3071 if (rp->status) {
3072 bt_dev_err(hdev, "failed to read key size for handle %u",
3073 handle);
3074 conn->enc_key_size = 0;
3075 } else {
3076 conn->enc_key_size = rp->key_size;
3077 }
3078
3079 hci_encrypt_cfm(conn, 0);
3080
3081unlock:
3082 hci_dev_unlock(hdev);
3083}
3084
3085static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3086{
3087 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3088 struct hci_conn *conn;
3089
3090 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3091
3092 hci_dev_lock(hdev);
3093
3094 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3095 if (!conn)
3096 goto unlock;
3097
3098 if (!ev->status) {
3099 if (ev->encrypt) {
3100
3101 set_bit(HCI_CONN_AUTH, &conn->flags);
3102 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3103 conn->sec_level = conn->pending_sec_level;
3104
3105
3106 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3107 set_bit(HCI_CONN_FIPS, &conn->flags);
3108
3109 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3110 conn->type == LE_LINK)
3111 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3112 } else {
3113 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3114 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3115 }
3116 }
3117
3118
3119
3120
3121 if (ev->status && conn->type == LE_LINK) {
3122 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3123 hci_adv_instances_set_rpa_expired(hdev, true);
3124 }
3125
3126 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3127
3128
3129 if (!hci_conn_check_link_mode(conn))
3130 ev->status = HCI_ERROR_AUTH_FAILURE;
3131
3132 if (ev->status && conn->state == BT_CONNECTED) {
3133 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3134 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3135
3136
3137
3138
3139 hci_encrypt_cfm(conn, ev->status);
3140 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3141 hci_conn_drop(conn);
3142 goto unlock;
3143 }
3144
3145
3146 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3147 struct hci_cp_read_enc_key_size cp;
3148 struct hci_request req;
3149
3150
3151
3152
3153
3154 if (!(hdev->commands[20] & 0x10)) {
3155 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3156 goto notify;
3157 }
3158
3159 hci_req_init(&req, hdev);
3160
3161 cp.handle = cpu_to_le16(conn->handle);
3162 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3163
3164 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3165 bt_dev_err(hdev, "sending read key size failed");
3166 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3167 goto notify;
3168 }
3169
3170 goto unlock;
3171 }
3172
3173
3174
3175
3176
3177
3178
3179
3180 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3181 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3182 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3183 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3184 struct hci_cp_write_auth_payload_to cp;
3185
3186 cp.handle = cpu_to_le16(conn->handle);
3187 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3188 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3189 sizeof(cp), &cp);
3190 }
3191
3192notify:
3193 hci_encrypt_cfm(conn, ev->status);
3194
3195unlock:
3196 hci_dev_unlock(hdev);
3197}
3198
3199static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3200 struct sk_buff *skb)
3201{
3202 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3203 struct hci_conn *conn;
3204
3205 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3206
3207 hci_dev_lock(hdev);
3208
3209 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3210 if (conn) {
3211 if (!ev->status)
3212 set_bit(HCI_CONN_SECURE, &conn->flags);
3213
3214 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3215
3216 hci_key_change_cfm(conn, ev->status);
3217 }
3218
3219 hci_dev_unlock(hdev);
3220}
3221
3222static void hci_remote_features_evt(struct hci_dev *hdev,
3223 struct sk_buff *skb)
3224{
3225 struct hci_ev_remote_features *ev = (void *) skb->data;
3226 struct hci_conn *conn;
3227
3228 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3229
3230 hci_dev_lock(hdev);
3231
3232 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3233 if (!conn)
3234 goto unlock;
3235
3236 if (!ev->status)
3237 memcpy(conn->features[0], ev->features, 8);
3238
3239 if (conn->state != BT_CONFIG)
3240 goto unlock;
3241
3242 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3243 lmp_ext_feat_capable(conn)) {
3244 struct hci_cp_read_remote_ext_features cp;
3245 cp.handle = ev->handle;
3246 cp.page = 0x01;
3247 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3248 sizeof(cp), &cp);
3249 goto unlock;
3250 }
3251
3252 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3253 struct hci_cp_remote_name_req cp;
3254 memset(&cp, 0, sizeof(cp));
3255 bacpy(&cp.bdaddr, &conn->dst);
3256 cp.pscan_rep_mode = 0x02;
3257 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3258 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3259 mgmt_device_connected(hdev, conn, NULL, 0);
3260
3261 if (!hci_outgoing_auth_needed(hdev, conn)) {
3262 conn->state = BT_CONNECTED;
3263 hci_connect_cfm(conn, ev->status);
3264 hci_conn_drop(conn);
3265 }
3266
3267unlock:
3268 hci_dev_unlock(hdev);
3269}
3270
3271static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev,
3272 u16 opcode, u8 ncmd)
3273{
3274 if (opcode != HCI_OP_NOP)
3275 cancel_delayed_work(&hdev->cmd_timer);
3276
3277 if (!test_bit(HCI_RESET, &hdev->flags)) {
3278 if (ncmd) {
3279 cancel_delayed_work(&hdev->ncmd_timer);
3280 atomic_set(&hdev->cmd_cnt, 1);
3281 } else {
3282 schedule_delayed_work(&hdev->ncmd_timer,
3283 HCI_NCMD_TIMEOUT);
3284 }
3285 }
3286}
3287
3288static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3289 u16 *opcode, u8 *status,
3290 hci_req_complete_t *req_complete,
3291 hci_req_complete_skb_t *req_complete_skb)
3292{
3293 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3294
3295 *opcode = __le16_to_cpu(ev->opcode);
3296 *status = skb->data[sizeof(*ev)];
3297
3298 skb_pull(skb, sizeof(*ev));
3299
3300 switch (*opcode) {
3301 case HCI_OP_INQUIRY_CANCEL:
3302 hci_cc_inquiry_cancel(hdev, skb, status);
3303 break;
3304
3305 case HCI_OP_PERIODIC_INQ:
3306 hci_cc_periodic_inq(hdev, skb);
3307 break;
3308
3309 case HCI_OP_EXIT_PERIODIC_INQ:
3310 hci_cc_exit_periodic_inq(hdev, skb);
3311 break;
3312
3313 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3314 hci_cc_remote_name_req_cancel(hdev, skb);
3315 break;
3316
3317 case HCI_OP_ROLE_DISCOVERY:
3318 hci_cc_role_discovery(hdev, skb);
3319 break;
3320
3321 case HCI_OP_READ_LINK_POLICY:
3322 hci_cc_read_link_policy(hdev, skb);
3323 break;
3324
3325 case HCI_OP_WRITE_LINK_POLICY:
3326 hci_cc_write_link_policy(hdev, skb);
3327 break;
3328
3329 case HCI_OP_READ_DEF_LINK_POLICY:
3330 hci_cc_read_def_link_policy(hdev, skb);
3331 break;
3332
3333 case HCI_OP_WRITE_DEF_LINK_POLICY:
3334 hci_cc_write_def_link_policy(hdev, skb);
3335 break;
3336
3337 case HCI_OP_RESET:
3338 hci_cc_reset(hdev, skb);
3339 break;
3340
3341 case HCI_OP_READ_STORED_LINK_KEY:
3342 hci_cc_read_stored_link_key(hdev, skb);
3343 break;
3344
3345 case HCI_OP_DELETE_STORED_LINK_KEY:
3346 hci_cc_delete_stored_link_key(hdev, skb);
3347 break;
3348
3349 case HCI_OP_WRITE_LOCAL_NAME:
3350 hci_cc_write_local_name(hdev, skb);
3351 break;
3352
3353 case HCI_OP_READ_LOCAL_NAME:
3354 hci_cc_read_local_name(hdev, skb);
3355 break;
3356
3357 case HCI_OP_WRITE_AUTH_ENABLE:
3358 hci_cc_write_auth_enable(hdev, skb);
3359 break;
3360
3361 case HCI_OP_WRITE_ENCRYPT_MODE:
3362 hci_cc_write_encrypt_mode(hdev, skb);
3363 break;
3364
3365 case HCI_OP_WRITE_SCAN_ENABLE:
3366 hci_cc_write_scan_enable(hdev, skb);
3367 break;
3368
3369 case HCI_OP_SET_EVENT_FLT:
3370 hci_cc_set_event_filter(hdev, skb);
3371 break;
3372
3373 case HCI_OP_READ_CLASS_OF_DEV:
3374 hci_cc_read_class_of_dev(hdev, skb);
3375 break;
3376
3377 case HCI_OP_WRITE_CLASS_OF_DEV:
3378 hci_cc_write_class_of_dev(hdev, skb);
3379 break;
3380
3381 case HCI_OP_READ_VOICE_SETTING:
3382 hci_cc_read_voice_setting(hdev, skb);
3383 break;
3384
3385 case HCI_OP_WRITE_VOICE_SETTING:
3386 hci_cc_write_voice_setting(hdev, skb);
3387 break;
3388
3389 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3390 hci_cc_read_num_supported_iac(hdev, skb);
3391 break;
3392
3393 case HCI_OP_WRITE_SSP_MODE:
3394 hci_cc_write_ssp_mode(hdev, skb);
3395 break;
3396
3397 case HCI_OP_WRITE_SC_SUPPORT:
3398 hci_cc_write_sc_support(hdev, skb);
3399 break;
3400
3401 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3402 hci_cc_read_auth_payload_timeout(hdev, skb);
3403 break;
3404
3405 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3406 hci_cc_write_auth_payload_timeout(hdev, skb);
3407 break;
3408
3409 case HCI_OP_READ_LOCAL_VERSION:
3410 hci_cc_read_local_version(hdev, skb);
3411 break;
3412
3413 case HCI_OP_READ_LOCAL_COMMANDS:
3414 hci_cc_read_local_commands(hdev, skb);
3415 break;
3416
3417 case HCI_OP_READ_LOCAL_FEATURES:
3418 hci_cc_read_local_features(hdev, skb);
3419 break;
3420
3421 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3422 hci_cc_read_local_ext_features(hdev, skb);
3423 break;
3424
3425 case HCI_OP_READ_BUFFER_SIZE:
3426 hci_cc_read_buffer_size(hdev, skb);
3427 break;
3428
3429 case HCI_OP_READ_BD_ADDR:
3430 hci_cc_read_bd_addr(hdev, skb);
3431 break;
3432
3433 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3434 hci_cc_read_local_pairing_opts(hdev, skb);
3435 break;
3436
3437 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3438 hci_cc_read_page_scan_activity(hdev, skb);
3439 break;
3440
3441 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3442 hci_cc_write_page_scan_activity(hdev, skb);
3443 break;
3444
3445 case HCI_OP_READ_PAGE_SCAN_TYPE:
3446 hci_cc_read_page_scan_type(hdev, skb);
3447 break;
3448
3449 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3450 hci_cc_write_page_scan_type(hdev, skb);
3451 break;
3452
3453 case HCI_OP_READ_DATA_BLOCK_SIZE:
3454 hci_cc_read_data_block_size(hdev, skb);
3455 break;
3456
3457 case HCI_OP_READ_FLOW_CONTROL_MODE:
3458 hci_cc_read_flow_control_mode(hdev, skb);
3459 break;
3460
3461 case HCI_OP_READ_LOCAL_AMP_INFO:
3462 hci_cc_read_local_amp_info(hdev, skb);
3463 break;
3464
3465 case HCI_OP_READ_CLOCK:
3466 hci_cc_read_clock(hdev, skb);
3467 break;
3468
3469 case HCI_OP_READ_INQ_RSP_TX_POWER:
3470 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3471 break;
3472
3473 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3474 hci_cc_read_def_err_data_reporting(hdev, skb);
3475 break;
3476
3477 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3478 hci_cc_write_def_err_data_reporting(hdev, skb);
3479 break;
3480
3481 case HCI_OP_PIN_CODE_REPLY:
3482 hci_cc_pin_code_reply(hdev, skb);
3483 break;
3484
3485 case HCI_OP_PIN_CODE_NEG_REPLY:
3486 hci_cc_pin_code_neg_reply(hdev, skb);
3487 break;
3488
3489 case HCI_OP_READ_LOCAL_OOB_DATA:
3490 hci_cc_read_local_oob_data(hdev, skb);
3491 break;
3492
3493 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3494 hci_cc_read_local_oob_ext_data(hdev, skb);
3495 break;
3496
3497 case HCI_OP_LE_READ_BUFFER_SIZE:
3498 hci_cc_le_read_buffer_size(hdev, skb);
3499 break;
3500
3501 case HCI_OP_LE_READ_LOCAL_FEATURES:
3502 hci_cc_le_read_local_features(hdev, skb);
3503 break;
3504
3505 case HCI_OP_LE_READ_ADV_TX_POWER:
3506 hci_cc_le_read_adv_tx_power(hdev, skb);
3507 break;
3508
3509 case HCI_OP_USER_CONFIRM_REPLY:
3510 hci_cc_user_confirm_reply(hdev, skb);
3511 break;
3512
3513 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3514 hci_cc_user_confirm_neg_reply(hdev, skb);
3515 break;
3516
3517 case HCI_OP_USER_PASSKEY_REPLY:
3518 hci_cc_user_passkey_reply(hdev, skb);
3519 break;
3520
3521 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3522 hci_cc_user_passkey_neg_reply(hdev, skb);
3523 break;
3524
3525 case HCI_OP_LE_SET_RANDOM_ADDR:
3526 hci_cc_le_set_random_addr(hdev, skb);
3527 break;
3528
3529 case HCI_OP_LE_SET_ADV_ENABLE:
3530 hci_cc_le_set_adv_enable(hdev, skb);
3531 break;
3532
3533 case HCI_OP_LE_SET_SCAN_PARAM:
3534 hci_cc_le_set_scan_param(hdev, skb);
3535 break;
3536
3537 case HCI_OP_LE_SET_SCAN_ENABLE:
3538 hci_cc_le_set_scan_enable(hdev, skb);
3539 break;
3540
3541 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3542 hci_cc_le_read_accept_list_size(hdev, skb);
3543 break;
3544
3545 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3546 hci_cc_le_clear_accept_list(hdev, skb);
3547 break;
3548
3549 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3550 hci_cc_le_add_to_accept_list(hdev, skb);
3551 break;
3552
3553 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3554 hci_cc_le_del_from_accept_list(hdev, skb);
3555 break;
3556
3557 case HCI_OP_LE_READ_SUPPORTED_STATES:
3558 hci_cc_le_read_supported_states(hdev, skb);
3559 break;
3560
3561 case HCI_OP_LE_READ_DEF_DATA_LEN:
3562 hci_cc_le_read_def_data_len(hdev, skb);
3563 break;
3564
3565 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3566 hci_cc_le_write_def_data_len(hdev, skb);
3567 break;
3568
3569 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3570 hci_cc_le_add_to_resolv_list(hdev, skb);
3571 break;
3572
3573 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3574 hci_cc_le_del_from_resolv_list(hdev, skb);
3575 break;
3576
3577 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3578 hci_cc_le_clear_resolv_list(hdev, skb);
3579 break;
3580
3581 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3582 hci_cc_le_read_resolv_list_size(hdev, skb);
3583 break;
3584
3585 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3586 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3587 break;
3588
3589 case HCI_OP_LE_READ_MAX_DATA_LEN:
3590 hci_cc_le_read_max_data_len(hdev, skb);
3591 break;
3592
3593 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3594 hci_cc_write_le_host_supported(hdev, skb);
3595 break;
3596
3597 case HCI_OP_LE_SET_ADV_PARAM:
3598 hci_cc_set_adv_param(hdev, skb);
3599 break;
3600
3601 case HCI_OP_READ_RSSI:
3602 hci_cc_read_rssi(hdev, skb);
3603 break;
3604
3605 case HCI_OP_READ_TX_POWER:
3606 hci_cc_read_tx_power(hdev, skb);
3607 break;
3608
3609 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3610 hci_cc_write_ssp_debug_mode(hdev, skb);
3611 break;
3612
3613 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3614 hci_cc_le_set_ext_scan_param(hdev, skb);
3615 break;
3616
3617 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3618 hci_cc_le_set_ext_scan_enable(hdev, skb);
3619 break;
3620
3621 case HCI_OP_LE_SET_DEFAULT_PHY:
3622 hci_cc_le_set_default_phy(hdev, skb);
3623 break;
3624
3625 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3626 hci_cc_le_read_num_adv_sets(hdev, skb);
3627 break;
3628
3629 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3630 hci_cc_set_ext_adv_param(hdev, skb);
3631 break;
3632
3633 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3634 hci_cc_le_set_ext_adv_enable(hdev, skb);
3635 break;
3636
3637 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3638 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3639 break;
3640
3641 case HCI_OP_LE_READ_TRANSMIT_POWER:
3642 hci_cc_le_read_transmit_power(hdev, skb);
3643 break;
3644
3645 default:
3646 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3647 break;
3648 }
3649
3650 handle_cmd_cnt_and_timer(hdev, *opcode, ev->ncmd);
3651
3652 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3653 req_complete_skb);
3654
3655 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3656 bt_dev_err(hdev,
3657 "unexpected event for opcode 0x%4.4x", *opcode);
3658 return;
3659 }
3660
3661 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3662 queue_work(hdev->workqueue, &hdev->cmd_work);
3663}
3664
3665static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3666 u16 *opcode, u8 *status,
3667 hci_req_complete_t *req_complete,
3668 hci_req_complete_skb_t *req_complete_skb)
3669{
3670 struct hci_ev_cmd_status *ev = (void *) skb->data;
3671
3672 skb_pull(skb, sizeof(*ev));
3673
3674 *opcode = __le16_to_cpu(ev->opcode);
3675 *status = ev->status;
3676
3677 switch (*opcode) {
3678 case HCI_OP_INQUIRY:
3679 hci_cs_inquiry(hdev, ev->status);
3680 break;
3681
3682 case HCI_OP_CREATE_CONN:
3683 hci_cs_create_conn(hdev, ev->status);
3684 break;
3685
3686 case HCI_OP_DISCONNECT:
3687 hci_cs_disconnect(hdev, ev->status);
3688 break;
3689
3690 case HCI_OP_ADD_SCO:
3691 hci_cs_add_sco(hdev, ev->status);
3692 break;
3693
3694 case HCI_OP_AUTH_REQUESTED:
3695 hci_cs_auth_requested(hdev, ev->status);
3696 break;
3697
3698 case HCI_OP_SET_CONN_ENCRYPT:
3699 hci_cs_set_conn_encrypt(hdev, ev->status);
3700 break;
3701
3702 case HCI_OP_REMOTE_NAME_REQ:
3703 hci_cs_remote_name_req(hdev, ev->status);
3704 break;
3705
3706 case HCI_OP_READ_REMOTE_FEATURES:
3707 hci_cs_read_remote_features(hdev, ev->status);
3708 break;
3709
3710 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3711 hci_cs_read_remote_ext_features(hdev, ev->status);
3712 break;
3713
3714 case HCI_OP_SETUP_SYNC_CONN:
3715 hci_cs_setup_sync_conn(hdev, ev->status);
3716 break;
3717
3718 case HCI_OP_SNIFF_MODE:
3719 hci_cs_sniff_mode(hdev, ev->status);
3720 break;
3721
3722 case HCI_OP_EXIT_SNIFF_MODE:
3723 hci_cs_exit_sniff_mode(hdev, ev->status);
3724 break;
3725
3726 case HCI_OP_SWITCH_ROLE:
3727 hci_cs_switch_role(hdev, ev->status);
3728 break;
3729
3730 case HCI_OP_LE_CREATE_CONN:
3731 hci_cs_le_create_conn(hdev, ev->status);
3732 break;
3733
3734 case HCI_OP_LE_READ_REMOTE_FEATURES:
3735 hci_cs_le_read_remote_features(hdev, ev->status);
3736 break;
3737
3738 case HCI_OP_LE_START_ENC:
3739 hci_cs_le_start_enc(hdev, ev->status);
3740 break;
3741
3742 case HCI_OP_LE_EXT_CREATE_CONN:
3743 hci_cs_le_ext_create_conn(hdev, ev->status);
3744 break;
3745
3746 default:
3747 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3748 break;
3749 }
3750
3751 handle_cmd_cnt_and_timer(hdev, *opcode, ev->ncmd);
3752
3753
3754
3755
3756
3757
3758
3759 if (ev->status ||
3760 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3761 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3762 req_complete_skb);
3763
3764 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3765 bt_dev_err(hdev,
3766 "unexpected event for opcode 0x%4.4x", *opcode);
3767 return;
3768 }
3769
3770 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3771 queue_work(hdev->workqueue, &hdev->cmd_work);
3772}
3773
3774static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3775{
3776 struct hci_ev_hardware_error *ev = (void *) skb->data;
3777
3778 hdev->hw_error_code = ev->code;
3779
3780 queue_work(hdev->req_workqueue, &hdev->error_reset);
3781}
3782
3783static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3784{
3785 struct hci_ev_role_change *ev = (void *) skb->data;
3786 struct hci_conn *conn;
3787
3788 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3789
3790 hci_dev_lock(hdev);
3791
3792 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3793 if (conn) {
3794 if (!ev->status)
3795 conn->role = ev->role;
3796
3797 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3798
3799 hci_role_switch_cfm(conn, ev->status, ev->role);
3800 }
3801
3802 hci_dev_unlock(hdev);
3803}
3804
3805static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3806{
3807 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3808 int i;
3809
3810 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3811 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3812 return;
3813 }
3814
3815 if (skb->len < sizeof(*ev) ||
3816 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3817 BT_DBG("%s bad parameters", hdev->name);
3818 return;
3819 }
3820
3821 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3822
3823 for (i = 0; i < ev->num_hndl; i++) {
3824 struct hci_comp_pkts_info *info = &ev->handles[i];
3825 struct hci_conn *conn;
3826 __u16 handle, count;
3827
3828 handle = __le16_to_cpu(info->handle);
3829 count = __le16_to_cpu(info->count);
3830
3831 conn = hci_conn_hash_lookup_handle(hdev, handle);
3832 if (!conn)
3833 continue;
3834
3835 conn->sent -= count;
3836
3837 switch (conn->type) {
3838 case ACL_LINK:
3839 hdev->acl_cnt += count;
3840 if (hdev->acl_cnt > hdev->acl_pkts)
3841 hdev->acl_cnt = hdev->acl_pkts;
3842 break;
3843
3844 case LE_LINK:
3845 if (hdev->le_pkts) {
3846 hdev->le_cnt += count;
3847 if (hdev->le_cnt > hdev->le_pkts)
3848 hdev->le_cnt = hdev->le_pkts;
3849 } else {
3850 hdev->acl_cnt += count;
3851 if (hdev->acl_cnt > hdev->acl_pkts)
3852 hdev->acl_cnt = hdev->acl_pkts;
3853 }
3854 break;
3855
3856 case SCO_LINK:
3857 hdev->sco_cnt += count;
3858 if (hdev->sco_cnt > hdev->sco_pkts)
3859 hdev->sco_cnt = hdev->sco_pkts;
3860 break;
3861
3862 default:
3863 bt_dev_err(hdev, "unknown type %d conn %p",
3864 conn->type, conn);
3865 break;
3866 }
3867 }
3868
3869 queue_work(hdev->workqueue, &hdev->tx_work);
3870}
3871
3872static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3873 __u16 handle)
3874{
3875 struct hci_chan *chan;
3876
3877 switch (hdev->dev_type) {
3878 case HCI_PRIMARY:
3879 return hci_conn_hash_lookup_handle(hdev, handle);
3880 case HCI_AMP:
3881 chan = hci_chan_lookup_handle(hdev, handle);
3882 if (chan)
3883 return chan->conn;
3884 break;
3885 default:
3886 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3887 break;
3888 }
3889
3890 return NULL;
3891}
3892
3893static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3894{
3895 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3896 int i;
3897
3898 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3899 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3900 return;
3901 }
3902
3903 if (skb->len < sizeof(*ev) ||
3904 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3905 BT_DBG("%s bad parameters", hdev->name);
3906 return;
3907 }
3908
3909 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3910 ev->num_hndl);
3911
3912 for (i = 0; i < ev->num_hndl; i++) {
3913 struct hci_comp_blocks_info *info = &ev->handles[i];
3914 struct hci_conn *conn = NULL;
3915 __u16 handle, block_count;
3916
3917 handle = __le16_to_cpu(info->handle);
3918 block_count = __le16_to_cpu(info->blocks);
3919
3920 conn = __hci_conn_lookup_handle(hdev, handle);
3921 if (!conn)
3922 continue;
3923
3924 conn->sent -= block_count;
3925
3926 switch (conn->type) {
3927 case ACL_LINK:
3928 case AMP_LINK:
3929 hdev->block_cnt += block_count;
3930 if (hdev->block_cnt > hdev->num_blocks)
3931 hdev->block_cnt = hdev->num_blocks;
3932 break;
3933
3934 default:
3935 bt_dev_err(hdev, "unknown type %d conn %p",
3936 conn->type, conn);
3937 break;
3938 }
3939 }
3940
3941 queue_work(hdev->workqueue, &hdev->tx_work);
3942}
3943
3944static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3945{
3946 struct hci_ev_mode_change *ev = (void *) skb->data;
3947 struct hci_conn *conn;
3948
3949 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3950
3951 hci_dev_lock(hdev);
3952
3953 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3954 if (conn) {
3955 conn->mode = ev->mode;
3956
3957 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3958 &conn->flags)) {
3959 if (conn->mode == HCI_CM_ACTIVE)
3960 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3961 else
3962 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3963 }
3964
3965 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3966 hci_sco_setup(conn, ev->status);
3967 }
3968
3969 hci_dev_unlock(hdev);
3970}
3971
3972static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3973{
3974 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3975 struct hci_conn *conn;
3976
3977 BT_DBG("%s", hdev->name);
3978
3979 hci_dev_lock(hdev);
3980
3981 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3982 if (!conn)
3983 goto unlock;
3984
3985 if (conn->state == BT_CONNECTED) {
3986 hci_conn_hold(conn);
3987 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3988 hci_conn_drop(conn);
3989 }
3990
3991 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3992 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3993 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3994 sizeof(ev->bdaddr), &ev->bdaddr);
3995 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3996 u8 secure;
3997
3998 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3999 secure = 1;
4000 else
4001 secure = 0;
4002
4003 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4004 }
4005
4006unlock:
4007 hci_dev_unlock(hdev);
4008}
4009
4010static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4011{
4012 if (key_type == HCI_LK_CHANGED_COMBINATION)
4013 return;
4014
4015 conn->pin_length = pin_len;
4016 conn->key_type = key_type;
4017
4018 switch (key_type) {
4019 case HCI_LK_LOCAL_UNIT:
4020 case HCI_LK_REMOTE_UNIT:
4021 case HCI_LK_DEBUG_COMBINATION:
4022 return;
4023 case HCI_LK_COMBINATION:
4024 if (pin_len == 16)
4025 conn->pending_sec_level = BT_SECURITY_HIGH;
4026 else
4027 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4028 break;
4029 case HCI_LK_UNAUTH_COMBINATION_P192:
4030 case HCI_LK_UNAUTH_COMBINATION_P256:
4031 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4032 break;
4033 case HCI_LK_AUTH_COMBINATION_P192:
4034 conn->pending_sec_level = BT_SECURITY_HIGH;
4035 break;
4036 case HCI_LK_AUTH_COMBINATION_P256:
4037 conn->pending_sec_level = BT_SECURITY_FIPS;
4038 break;
4039 }
4040}
4041
4042static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4043{
4044 struct hci_ev_link_key_req *ev = (void *) skb->data;
4045 struct hci_cp_link_key_reply cp;
4046 struct hci_conn *conn;
4047 struct link_key *key;
4048
4049 BT_DBG("%s", hdev->name);
4050
4051 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4052 return;
4053
4054 hci_dev_lock(hdev);
4055
4056 key = hci_find_link_key(hdev, &ev->bdaddr);
4057 if (!key) {
4058 BT_DBG("%s link key not found for %pMR", hdev->name,
4059 &ev->bdaddr);
4060 goto not_found;
4061 }
4062
4063 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4064 &ev->bdaddr);
4065
4066 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4067 if (conn) {
4068 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4069
4070 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4071 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4072 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4073 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4074 goto not_found;
4075 }
4076
4077 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4078 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4079 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4080 BT_DBG("%s ignoring key unauthenticated for high security",
4081 hdev->name);
4082 goto not_found;
4083 }
4084
4085 conn_set_key(conn, key->type, key->pin_len);
4086 }
4087
4088 bacpy(&cp.bdaddr, &ev->bdaddr);
4089 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4090
4091 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4092
4093 hci_dev_unlock(hdev);
4094
4095 return;
4096
4097not_found:
4098 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4099 hci_dev_unlock(hdev);
4100}
4101
4102static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4103{
4104 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4105 struct hci_conn *conn;
4106 struct link_key *key;
4107 bool persistent;
4108 u8 pin_len = 0;
4109
4110 BT_DBG("%s", hdev->name);
4111
4112 hci_dev_lock(hdev);
4113
4114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4115 if (!conn)
4116 goto unlock;
4117
4118 hci_conn_hold(conn);
4119 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4120 hci_conn_drop(conn);
4121
4122 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4123 conn_set_key(conn, ev->key_type, conn->pin_length);
4124
4125 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4126 goto unlock;
4127
4128 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4129 ev->key_type, pin_len, &persistent);
4130 if (!key)
4131 goto unlock;
4132
4133
4134
4135
4136 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4137 conn_set_key(conn, key->type, key->pin_len);
4138
4139 mgmt_new_link_key(hdev, key, persistent);
4140
4141
4142
4143
4144
4145
4146 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4147 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4148 list_del_rcu(&key->list);
4149 kfree_rcu(key, rcu);
4150 goto unlock;
4151 }
4152
4153 if (persistent)
4154 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4155 else
4156 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4157
4158unlock:
4159 hci_dev_unlock(hdev);
4160}
4161
4162static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4163{
4164 struct hci_ev_clock_offset *ev = (void *) skb->data;
4165 struct hci_conn *conn;
4166
4167 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4168
4169 hci_dev_lock(hdev);
4170
4171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4172 if (conn && !ev->status) {
4173 struct inquiry_entry *ie;
4174
4175 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4176 if (ie) {
4177 ie->data.clock_offset = ev->clock_offset;
4178 ie->timestamp = jiffies;
4179 }
4180 }
4181
4182 hci_dev_unlock(hdev);
4183}
4184
4185static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4186{
4187 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4188 struct hci_conn *conn;
4189
4190 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4191
4192 hci_dev_lock(hdev);
4193
4194 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4195 if (conn && !ev->status)
4196 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4197
4198 hci_dev_unlock(hdev);
4199}
4200
4201static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4202{
4203 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4204 struct inquiry_entry *ie;
4205
4206 BT_DBG("%s", hdev->name);
4207
4208 hci_dev_lock(hdev);
4209
4210 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4211 if (ie) {
4212 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4213 ie->timestamp = jiffies;
4214 }
4215
4216 hci_dev_unlock(hdev);
4217}
4218
4219static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4220 struct sk_buff *skb)
4221{
4222 struct inquiry_data data;
4223 int num_rsp = *((__u8 *) skb->data);
4224
4225 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4226
4227 if (!num_rsp)
4228 return;
4229
4230 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4231 return;
4232
4233 hci_dev_lock(hdev);
4234
4235 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4236 struct inquiry_info_with_rssi_and_pscan_mode *info;
4237 info = (void *) (skb->data + 1);
4238
4239 if (skb->len < num_rsp * sizeof(*info) + 1)
4240 goto unlock;
4241
4242 for (; num_rsp; num_rsp--, info++) {
4243 u32 flags;
4244
4245 bacpy(&data.bdaddr, &info->bdaddr);
4246 data.pscan_rep_mode = info->pscan_rep_mode;
4247 data.pscan_period_mode = info->pscan_period_mode;
4248 data.pscan_mode = info->pscan_mode;
4249 memcpy(data.dev_class, info->dev_class, 3);
4250 data.clock_offset = info->clock_offset;
4251 data.rssi = info->rssi;
4252 data.ssp_mode = 0x00;
4253
4254 flags = hci_inquiry_cache_update(hdev, &data, false);
4255
4256 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4257 info->dev_class, info->rssi,
4258 flags, NULL, 0, NULL, 0);
4259 }
4260 } else {
4261 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4262
4263 if (skb->len < num_rsp * sizeof(*info) + 1)
4264 goto unlock;
4265
4266 for (; num_rsp; num_rsp--, info++) {
4267 u32 flags;
4268
4269 bacpy(&data.bdaddr, &info->bdaddr);
4270 data.pscan_rep_mode = info->pscan_rep_mode;
4271 data.pscan_period_mode = info->pscan_period_mode;
4272 data.pscan_mode = 0x00;
4273 memcpy(data.dev_class, info->dev_class, 3);
4274 data.clock_offset = info->clock_offset;
4275 data.rssi = info->rssi;
4276 data.ssp_mode = 0x00;
4277
4278 flags = hci_inquiry_cache_update(hdev, &data, false);
4279
4280 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4281 info->dev_class, info->rssi,
4282 flags, NULL, 0, NULL, 0);
4283 }
4284 }
4285
4286unlock:
4287 hci_dev_unlock(hdev);
4288}
4289
4290static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4291 struct sk_buff *skb)
4292{
4293 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4294 struct hci_conn *conn;
4295
4296 BT_DBG("%s", hdev->name);
4297
4298 hci_dev_lock(hdev);
4299
4300 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4301 if (!conn)
4302 goto unlock;
4303
4304 if (ev->page < HCI_MAX_PAGES)
4305 memcpy(conn->features[ev->page], ev->features, 8);
4306
4307 if (!ev->status && ev->page == 0x01) {
4308 struct inquiry_entry *ie;
4309
4310 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4311 if (ie)
4312 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4313
4314 if (ev->features[0] & LMP_HOST_SSP) {
4315 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4316 } else {
4317
4318
4319
4320
4321
4322
4323
4324
4325 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4326 }
4327
4328 if (ev->features[0] & LMP_HOST_SC)
4329 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4330 }
4331
4332 if (conn->state != BT_CONFIG)
4333 goto unlock;
4334
4335 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4336 struct hci_cp_remote_name_req cp;
4337 memset(&cp, 0, sizeof(cp));
4338 bacpy(&cp.bdaddr, &conn->dst);
4339 cp.pscan_rep_mode = 0x02;
4340 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4341 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4342 mgmt_device_connected(hdev, conn, NULL, 0);
4343
4344 if (!hci_outgoing_auth_needed(hdev, conn)) {
4345 conn->state = BT_CONNECTED;
4346 hci_connect_cfm(conn, ev->status);
4347 hci_conn_drop(conn);
4348 }
4349
4350unlock:
4351 hci_dev_unlock(hdev);
4352}
4353
4354static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4355 struct sk_buff *skb)
4356{
4357 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4358 struct hci_conn *conn;
4359
4360 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4361
4362 hci_dev_lock(hdev);
4363
4364 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4365 if (!conn) {
4366 if (ev->link_type == ESCO_LINK)
4367 goto unlock;
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4379 if (!conn)
4380 goto unlock;
4381 }
4382
4383 switch (ev->status) {
4384 case 0x00:
4385 conn->handle = __le16_to_cpu(ev->handle);
4386 conn->state = BT_CONNECTED;
4387 conn->type = ev->link_type;
4388
4389 hci_debugfs_create_conn(conn);
4390 hci_conn_add_sysfs(conn);
4391 break;
4392
4393 case 0x10:
4394 case 0x0d:
4395 case 0x11:
4396 case 0x1c:
4397 case 0x1a:
4398 case 0x1e:
4399 case 0x1f:
4400 case 0x20:
4401 if (conn->out) {
4402 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4403 (hdev->esco_type & EDR_ESCO_MASK);
4404 if (hci_setup_sync(conn, conn->link->handle))
4405 goto unlock;
4406 }
4407 fallthrough;
4408
4409 default:
4410 conn->state = BT_CLOSED;
4411 break;
4412 }
4413
4414 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4415
4416 switch (ev->air_mode) {
4417 case 0x02:
4418 if (hdev->notify)
4419 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4420 break;
4421 case 0x03:
4422 if (hdev->notify)
4423 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4424 break;
4425 }
4426
4427 hci_connect_cfm(conn, ev->status);
4428 if (ev->status)
4429 hci_conn_del(conn);
4430
4431unlock:
4432 hci_dev_unlock(hdev);
4433}
4434
4435static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4436{
4437 size_t parsed = 0;
4438
4439 while (parsed < eir_len) {
4440 u8 field_len = eir[0];
4441
4442 if (field_len == 0)
4443 return parsed;
4444
4445 parsed += field_len + 1;
4446 eir += field_len + 1;
4447 }
4448
4449 return eir_len;
4450}
4451
4452static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4453 struct sk_buff *skb)
4454{
4455 struct inquiry_data data;
4456 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4457 int num_rsp = *((__u8 *) skb->data);
4458 size_t eir_len;
4459
4460 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4461
4462 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4463 return;
4464
4465 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4466 return;
4467
4468 hci_dev_lock(hdev);
4469
4470 for (; num_rsp; num_rsp--, info++) {
4471 u32 flags;
4472 bool name_known;
4473
4474 bacpy(&data.bdaddr, &info->bdaddr);
4475 data.pscan_rep_mode = info->pscan_rep_mode;
4476 data.pscan_period_mode = info->pscan_period_mode;
4477 data.pscan_mode = 0x00;
4478 memcpy(data.dev_class, info->dev_class, 3);
4479 data.clock_offset = info->clock_offset;
4480 data.rssi = info->rssi;
4481 data.ssp_mode = 0x01;
4482
4483 if (hci_dev_test_flag(hdev, HCI_MGMT))
4484 name_known = eir_get_data(info->data,
4485 sizeof(info->data),
4486 EIR_NAME_COMPLETE, NULL);
4487 else
4488 name_known = true;
4489
4490 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4491
4492 eir_len = eir_get_length(info->data, sizeof(info->data));
4493
4494 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4495 info->dev_class, info->rssi,
4496 flags, info->data, eir_len, NULL, 0);
4497 }
4498
4499 hci_dev_unlock(hdev);
4500}
4501
4502static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4503 struct sk_buff *skb)
4504{
4505 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4506 struct hci_conn *conn;
4507
4508 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4509 __le16_to_cpu(ev->handle));
4510
4511 hci_dev_lock(hdev);
4512
4513 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4514 if (!conn)
4515 goto unlock;
4516
4517
4518
4519
4520 if (conn->type != LE_LINK)
4521 goto unlock;
4522
4523 if (!ev->status)
4524 conn->sec_level = conn->pending_sec_level;
4525
4526 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4527
4528 if (ev->status && conn->state == BT_CONNECTED) {
4529 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4530 hci_conn_drop(conn);
4531 goto unlock;
4532 }
4533
4534 if (conn->state == BT_CONFIG) {
4535 if (!ev->status)
4536 conn->state = BT_CONNECTED;
4537
4538 hci_connect_cfm(conn, ev->status);
4539 hci_conn_drop(conn);
4540 } else {
4541 hci_auth_cfm(conn, ev->status);
4542
4543 hci_conn_hold(conn);
4544 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4545 hci_conn_drop(conn);
4546 }
4547
4548unlock:
4549 hci_dev_unlock(hdev);
4550}
4551
4552static u8 hci_get_auth_req(struct hci_conn *conn)
4553{
4554
4555 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4556 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4557 return conn->remote_auth | (conn->auth_type & 0x01);
4558
4559
4560
4561
4562 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4563 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4564 return conn->remote_auth | 0x01;
4565
4566
4567 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4568}
4569
4570static u8 bredr_oob_data_present(struct hci_conn *conn)
4571{
4572 struct hci_dev *hdev = conn->hdev;
4573 struct oob_data *data;
4574
4575 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4576 if (!data)
4577 return 0x00;
4578
4579 if (bredr_sc_enabled(hdev)) {
4580
4581
4582
4583
4584
4585
4586 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4587 return data->present;
4588
4589
4590
4591
4592
4593
4594 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4595 !memcmp(data->hash256, ZERO_KEY, 16))
4596 return 0x00;
4597
4598 return 0x02;
4599 }
4600
4601
4602
4603
4604
4605 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4606 !memcmp(data->hash192, ZERO_KEY, 16))
4607 return 0x00;
4608
4609 return 0x01;
4610}
4611
4612static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4613{
4614 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4615 struct hci_conn *conn;
4616
4617 BT_DBG("%s", hdev->name);
4618
4619 hci_dev_lock(hdev);
4620
4621 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4622 if (!conn)
4623 goto unlock;
4624
4625 hci_conn_hold(conn);
4626
4627 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4628 goto unlock;
4629
4630
4631
4632
4633 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4634 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4635 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4636 struct hci_cp_io_capability_reply cp;
4637
4638 bacpy(&cp.bdaddr, &ev->bdaddr);
4639
4640
4641 cp.capability = (conn->io_capability == 0x04) ?
4642 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4643
4644
4645 if (conn->remote_auth == 0xff) {
4646
4647
4648
4649 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4650 conn->auth_type != HCI_AT_NO_BONDING)
4651 conn->auth_type |= 0x01;
4652 } else {
4653 conn->auth_type = hci_get_auth_req(conn);
4654 }
4655
4656
4657
4658
4659 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4660 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4661
4662 cp.authentication = conn->auth_type;
4663 cp.oob_data = bredr_oob_data_present(conn);
4664
4665 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4666 sizeof(cp), &cp);
4667 } else {
4668 struct hci_cp_io_capability_neg_reply cp;
4669
4670 bacpy(&cp.bdaddr, &ev->bdaddr);
4671 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4672
4673 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4674 sizeof(cp), &cp);
4675 }
4676
4677unlock:
4678 hci_dev_unlock(hdev);
4679}
4680
4681static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4682{
4683 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4684 struct hci_conn *conn;
4685
4686 BT_DBG("%s", hdev->name);
4687
4688 hci_dev_lock(hdev);
4689
4690 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4691 if (!conn)
4692 goto unlock;
4693
4694 conn->remote_cap = ev->capability;
4695 conn->remote_auth = ev->authentication;
4696
4697unlock:
4698 hci_dev_unlock(hdev);
4699}
4700
4701static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4702 struct sk_buff *skb)
4703{
4704 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4705 int loc_mitm, rem_mitm, confirm_hint = 0;
4706 struct hci_conn *conn;
4707
4708 BT_DBG("%s", hdev->name);
4709
4710 hci_dev_lock(hdev);
4711
4712 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4713 goto unlock;
4714
4715 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4716 if (!conn)
4717 goto unlock;
4718
4719 loc_mitm = (conn->auth_type & 0x01);
4720 rem_mitm = (conn->remote_auth & 0x01);
4721
4722
4723
4724
4725
4726
4727 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4728 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4729 BT_DBG("Rejecting request: remote device can't provide MITM");
4730 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4731 sizeof(ev->bdaddr), &ev->bdaddr);
4732 goto unlock;
4733 }
4734
4735
4736 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4737 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4738
4739
4740
4741
4742
4743
4744
4745 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4746 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4747 (loc_mitm || rem_mitm)) {
4748 BT_DBG("Confirming auto-accept as acceptor");
4749 confirm_hint = 1;
4750 goto confirm;
4751 }
4752
4753
4754
4755
4756
4757 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4758 bt_dev_dbg(hdev, "Local host already has link key");
4759 confirm_hint = 1;
4760 goto confirm;
4761 }
4762
4763 BT_DBG("Auto-accept of user confirmation with %ums delay",
4764 hdev->auto_accept_delay);
4765
4766 if (hdev->auto_accept_delay > 0) {
4767 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4768 queue_delayed_work(conn->hdev->workqueue,
4769 &conn->auto_accept_work, delay);
4770 goto unlock;
4771 }
4772
4773 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4774 sizeof(ev->bdaddr), &ev->bdaddr);
4775 goto unlock;
4776 }
4777
4778confirm:
4779 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4780 le32_to_cpu(ev->passkey), confirm_hint);
4781
4782unlock:
4783 hci_dev_unlock(hdev);
4784}
4785
4786static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4787 struct sk_buff *skb)
4788{
4789 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4790
4791 BT_DBG("%s", hdev->name);
4792
4793 if (hci_dev_test_flag(hdev, HCI_MGMT))
4794 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4795}
4796
4797static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4798 struct sk_buff *skb)
4799{
4800 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4801 struct hci_conn *conn;
4802
4803 BT_DBG("%s", hdev->name);
4804
4805 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4806 if (!conn)
4807 return;
4808
4809 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4810 conn->passkey_entered = 0;
4811
4812 if (hci_dev_test_flag(hdev, HCI_MGMT))
4813 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4814 conn->dst_type, conn->passkey_notify,
4815 conn->passkey_entered);
4816}
4817
4818static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4819{
4820 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4821 struct hci_conn *conn;
4822
4823 BT_DBG("%s", hdev->name);
4824
4825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4826 if (!conn)
4827 return;
4828
4829 switch (ev->type) {
4830 case HCI_KEYPRESS_STARTED:
4831 conn->passkey_entered = 0;
4832 return;
4833
4834 case HCI_KEYPRESS_ENTERED:
4835 conn->passkey_entered++;
4836 break;
4837
4838 case HCI_KEYPRESS_ERASED:
4839 conn->passkey_entered--;
4840 break;
4841
4842 case HCI_KEYPRESS_CLEARED:
4843 conn->passkey_entered = 0;
4844 break;
4845
4846 case HCI_KEYPRESS_COMPLETED:
4847 return;
4848 }
4849
4850 if (hci_dev_test_flag(hdev, HCI_MGMT))
4851 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4852 conn->dst_type, conn->passkey_notify,
4853 conn->passkey_entered);
4854}
4855
4856static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4857 struct sk_buff *skb)
4858{
4859 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4860 struct hci_conn *conn;
4861
4862 BT_DBG("%s", hdev->name);
4863
4864 hci_dev_lock(hdev);
4865
4866 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4867 if (!conn)
4868 goto unlock;
4869
4870
4871 conn->remote_auth = 0xff;
4872
4873
4874
4875
4876
4877
4878 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4879 mgmt_auth_failed(conn, ev->status);
4880
4881 hci_conn_drop(conn);
4882
4883unlock:
4884 hci_dev_unlock(hdev);
4885}
4886
4887static void hci_remote_host_features_evt(struct hci_dev *hdev,
4888 struct sk_buff *skb)
4889{
4890 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4891 struct inquiry_entry *ie;
4892 struct hci_conn *conn;
4893
4894 BT_DBG("%s", hdev->name);
4895
4896 hci_dev_lock(hdev);
4897
4898 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4899 if (conn)
4900 memcpy(conn->features[1], ev->features, 8);
4901
4902 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4903 if (ie)
4904 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4905
4906 hci_dev_unlock(hdev);
4907}
4908
4909static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4910 struct sk_buff *skb)
4911{
4912 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4913 struct oob_data *data;
4914
4915 BT_DBG("%s", hdev->name);
4916
4917 hci_dev_lock(hdev);
4918
4919 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4920 goto unlock;
4921
4922 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4923 if (!data) {
4924 struct hci_cp_remote_oob_data_neg_reply cp;
4925
4926 bacpy(&cp.bdaddr, &ev->bdaddr);
4927 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4928 sizeof(cp), &cp);
4929 goto unlock;
4930 }
4931
4932 if (bredr_sc_enabled(hdev)) {
4933 struct hci_cp_remote_oob_ext_data_reply cp;
4934
4935 bacpy(&cp.bdaddr, &ev->bdaddr);
4936 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4937 memset(cp.hash192, 0, sizeof(cp.hash192));
4938 memset(cp.rand192, 0, sizeof(cp.rand192));
4939 } else {
4940 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4941 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4942 }
4943 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4944 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4945
4946 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4947 sizeof(cp), &cp);
4948 } else {
4949 struct hci_cp_remote_oob_data_reply cp;
4950
4951 bacpy(&cp.bdaddr, &ev->bdaddr);
4952 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4953 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4954
4955 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4956 sizeof(cp), &cp);
4957 }
4958
4959unlock:
4960 hci_dev_unlock(hdev);
4961}
4962
4963#if IS_ENABLED(CONFIG_BT_HS)
4964static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4965{
4966 struct hci_ev_channel_selected *ev = (void *)skb->data;
4967 struct hci_conn *hcon;
4968
4969 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4970
4971 skb_pull(skb, sizeof(*ev));
4972
4973 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4974 if (!hcon)
4975 return;
4976
4977 amp_read_loc_assoc_final_data(hdev, hcon);
4978}
4979
4980static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4981 struct sk_buff *skb)
4982{
4983 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4984 struct hci_conn *hcon, *bredr_hcon;
4985
4986 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4987 ev->status);
4988
4989 hci_dev_lock(hdev);
4990
4991 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4992 if (!hcon)
4993 goto unlock;
4994
4995 if (!hcon->amp_mgr)
4996 goto unlock;
4997
4998 if (ev->status) {
4999 hci_conn_del(hcon);
5000 goto unlock;
5001 }
5002
5003 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5004
5005 hcon->state = BT_CONNECTED;
5006 bacpy(&hcon->dst, &bredr_hcon->dst);
5007
5008 hci_conn_hold(hcon);
5009 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5010 hci_conn_drop(hcon);
5011
5012 hci_debugfs_create_conn(hcon);
5013 hci_conn_add_sysfs(hcon);
5014
5015 amp_physical_cfm(bredr_hcon, hcon);
5016
5017unlock:
5018 hci_dev_unlock(hdev);
5019}
5020
5021static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5022{
5023 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5024 struct hci_conn *hcon;
5025 struct hci_chan *hchan;
5026 struct amp_mgr *mgr;
5027
5028 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5029 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5030 ev->status);
5031
5032 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5033 if (!hcon)
5034 return;
5035
5036
5037 hchan = hci_chan_create(hcon);
5038 if (!hchan)
5039 return;
5040
5041 hchan->handle = le16_to_cpu(ev->handle);
5042 hchan->amp = true;
5043
5044 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5045
5046 mgr = hcon->amp_mgr;
5047 if (mgr && mgr->bredr_chan) {
5048 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5049
5050 l2cap_chan_lock(bredr_chan);
5051
5052 bredr_chan->conn->mtu = hdev->block_mtu;
5053 l2cap_logical_cfm(bredr_chan, hchan, 0);
5054 hci_conn_hold(hcon);
5055
5056 l2cap_chan_unlock(bredr_chan);
5057 }
5058}
5059
5060static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5061 struct sk_buff *skb)
5062{
5063 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5064 struct hci_chan *hchan;
5065
5066 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5067 le16_to_cpu(ev->handle), ev->status);
5068
5069 if (ev->status)
5070 return;
5071
5072 hci_dev_lock(hdev);
5073
5074 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5075 if (!hchan || !hchan->amp)
5076 goto unlock;
5077
5078 amp_destroy_logical_link(hchan, ev->reason);
5079
5080unlock:
5081 hci_dev_unlock(hdev);
5082}
5083
5084static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5085 struct sk_buff *skb)
5086{
5087 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5088 struct hci_conn *hcon;
5089
5090 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5091
5092 if (ev->status)
5093 return;
5094
5095 hci_dev_lock(hdev);
5096
5097 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5098 if (hcon) {
5099 hcon->state = BT_CLOSED;
5100 hci_conn_del(hcon);
5101 }
5102
5103 hci_dev_unlock(hdev);
5104}
5105#endif
5106
5107static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5108 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
5109 u16 interval, u16 latency, u16 supervision_timeout)
5110{
5111 struct hci_conn_params *params;
5112 struct hci_conn *conn;
5113 struct smp_irk *irk;
5114 u8 addr_type;
5115
5116 hci_dev_lock(hdev);
5117
5118
5119
5120
5121 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5122
5123 conn = hci_lookup_le_connect(hdev);
5124 if (!conn) {
5125 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5126 if (!conn) {
5127 bt_dev_err(hdev, "no memory for new connection");
5128 goto unlock;
5129 }
5130
5131 conn->dst_type = bdaddr_type;
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141 if (conn->out) {
5142 conn->resp_addr_type = bdaddr_type;
5143 bacpy(&conn->resp_addr, bdaddr);
5144 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5145 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5146 bacpy(&conn->init_addr, &hdev->rpa);
5147 } else {
5148 hci_copy_identity_address(hdev,
5149 &conn->init_addr,
5150 &conn->init_addr_type);
5151 }
5152 }
5153 } else {
5154 cancel_delayed_work(&conn->le_conn_timeout);
5155 }
5156
5157 if (!conn->out) {
5158
5159
5160
5161 conn->resp_addr_type = hdev->adv_addr_type;
5162 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5163
5164
5165
5166 if (!ext_adv_capable(hdev))
5167 bacpy(&conn->resp_addr, &hdev->random_addr);
5168 } else {
5169 bacpy(&conn->resp_addr, &hdev->bdaddr);
5170 }
5171
5172 conn->init_addr_type = bdaddr_type;
5173 bacpy(&conn->init_addr, bdaddr);
5174
5175
5176
5177
5178
5179
5180 conn->le_conn_min_interval = hdev->le_conn_min_interval;
5181 conn->le_conn_max_interval = hdev->le_conn_max_interval;
5182 }
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5194 if (irk) {
5195 bacpy(&conn->dst, &irk->bdaddr);
5196 conn->dst_type = irk->addr_type;
5197 }
5198
5199
5200
5201
5202
5203 if (use_ll_privacy(hdev) &&
5204 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5205 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5206 switch (conn->dst_type) {
5207 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5208 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5209 break;
5210 case ADDR_LE_DEV_RANDOM_RESOLVED:
5211 conn->dst_type = ADDR_LE_DEV_RANDOM;
5212 break;
5213 }
5214 }
5215
5216 if (status) {
5217 hci_le_conn_failed(conn, status);
5218 goto unlock;
5219 }
5220
5221 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5222 addr_type = BDADDR_LE_PUBLIC;
5223 else
5224 addr_type = BDADDR_LE_RANDOM;
5225
5226
5227 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5228 hci_conn_drop(conn);
5229 goto unlock;
5230 }
5231
5232 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5233 mgmt_device_connected(hdev, conn, NULL, 0);
5234
5235 conn->sec_level = BT_SECURITY_LOW;
5236 conn->handle = handle;
5237 conn->state = BT_CONFIG;
5238
5239 conn->le_conn_interval = interval;
5240 conn->le_conn_latency = latency;
5241 conn->le_supv_timeout = supervision_timeout;
5242
5243 hci_debugfs_create_conn(conn);
5244 hci_conn_add_sysfs(conn);
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255 if (conn->out ||
5256 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5257 struct hci_cp_le_read_remote_features cp;
5258
5259 cp.handle = __cpu_to_le16(conn->handle);
5260
5261 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5262 sizeof(cp), &cp);
5263
5264 hci_conn_hold(conn);
5265 } else {
5266 conn->state = BT_CONNECTED;
5267 hci_connect_cfm(conn, status);
5268 }
5269
5270 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5271 conn->dst_type);
5272 if (params) {
5273 list_del_init(¶ms->action);
5274 if (params->conn) {
5275 hci_conn_drop(params->conn);
5276 hci_conn_put(params->conn);
5277 params->conn = NULL;
5278 }
5279 }
5280
5281unlock:
5282 hci_update_background_scan(hdev);
5283 hci_dev_unlock(hdev);
5284}
5285
5286static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5287{
5288 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5289
5290 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5291
5292 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5293 ev->role, le16_to_cpu(ev->handle),
5294 le16_to_cpu(ev->interval),
5295 le16_to_cpu(ev->latency),
5296 le16_to_cpu(ev->supervision_timeout));
5297}
5298
5299static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5300 struct sk_buff *skb)
5301{
5302 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5303
5304 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5305
5306 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5307 ev->role, le16_to_cpu(ev->handle),
5308 le16_to_cpu(ev->interval),
5309 le16_to_cpu(ev->latency),
5310 le16_to_cpu(ev->supervision_timeout));
5311
5312 if (use_ll_privacy(hdev) &&
5313 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5314 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5315 hci_req_disable_address_resolution(hdev);
5316}
5317
5318static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5319{
5320 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5321 struct hci_conn *conn;
5322
5323 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5324
5325 if (ev->status) {
5326 struct adv_info *adv;
5327
5328 adv = hci_find_adv_instance(hdev, ev->handle);
5329 if (!adv)
5330 return;
5331
5332
5333 hci_remove_adv_instance(hdev, ev->handle);
5334 mgmt_advertising_removed(NULL, hdev, ev->handle);
5335
5336 return;
5337 }
5338
5339 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5340 if (conn) {
5341 struct adv_info *adv_instance;
5342
5343 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5344 return;
5345
5346 if (!ev->handle) {
5347 bacpy(&conn->resp_addr, &hdev->random_addr);
5348 return;
5349 }
5350
5351 adv_instance = hci_find_adv_instance(hdev, ev->handle);
5352 if (adv_instance)
5353 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5354 }
5355}
5356
5357static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5358 struct sk_buff *skb)
5359{
5360 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5361 struct hci_conn *conn;
5362
5363 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5364
5365 if (ev->status)
5366 return;
5367
5368 hci_dev_lock(hdev);
5369
5370 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5371 if (conn) {
5372 conn->le_conn_interval = le16_to_cpu(ev->interval);
5373 conn->le_conn_latency = le16_to_cpu(ev->latency);
5374 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5375 }
5376
5377 hci_dev_unlock(hdev);
5378}
5379
5380
5381static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5382 bdaddr_t *addr,
5383 u8 addr_type, u8 adv_type,
5384 bdaddr_t *direct_rpa)
5385{
5386 struct hci_conn *conn;
5387 struct hci_conn_params *params;
5388
5389
5390 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5391 return NULL;
5392
5393
5394 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5395 return NULL;
5396
5397
5398
5399
5400 if (hdev->conn_hash.le_num_peripheral > 0 &&
5401 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5402 !(hdev->le_states[3] & 0x10)))
5403 return NULL;
5404
5405
5406
5407
5408 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5409 addr_type);
5410 if (!params)
5411 return NULL;
5412
5413 if (!params->explicit_connect) {
5414 switch (params->auto_connect) {
5415 case HCI_AUTO_CONN_DIRECT:
5416
5417
5418
5419
5420 if (adv_type != LE_ADV_DIRECT_IND)
5421 return NULL;
5422 break;
5423 case HCI_AUTO_CONN_ALWAYS:
5424
5425
5426
5427
5428
5429
5430 break;
5431 default:
5432 return NULL;
5433 }
5434 }
5435
5436 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5437 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5438 direct_rpa);
5439 if (!IS_ERR(conn)) {
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449 if (!params->explicit_connect)
5450 params->conn = hci_conn_get(conn);
5451
5452 return conn;
5453 }
5454
5455 switch (PTR_ERR(conn)) {
5456 case -EBUSY:
5457
5458
5459
5460
5461
5462 break;
5463 default:
5464 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5465 return NULL;
5466 }
5467
5468 return NULL;
5469}
5470
5471static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5472 u8 bdaddr_type, bdaddr_t *direct_addr,
5473 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5474 bool ext_adv)
5475{
5476 struct discovery_state *d = &hdev->discovery;
5477 struct smp_irk *irk;
5478 struct hci_conn *conn;
5479 bool match;
5480 u32 flags;
5481 u8 *ptr;
5482
5483 switch (type) {
5484 case LE_ADV_IND:
5485 case LE_ADV_DIRECT_IND:
5486 case LE_ADV_SCAN_IND:
5487 case LE_ADV_NONCONN_IND:
5488 case LE_ADV_SCAN_RSP:
5489 break;
5490 default:
5491 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5492 "type: 0x%02x", type);
5493 return;
5494 }
5495
5496 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5497 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5498 return;
5499 }
5500
5501
5502
5503
5504
5505
5506
5507 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5508 if (ptr + 1 + *ptr > data + len)
5509 break;
5510 }
5511
5512
5513
5514
5515 len = ptr - data;
5516
5517
5518
5519
5520
5521
5522 if (direct_addr) {
5523
5524
5525
5526 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5527 return;
5528
5529
5530
5531
5532 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5533 return;
5534
5535
5536
5537
5538
5539 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5540 return;
5541 }
5542
5543
5544 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5545 if (irk) {
5546 bdaddr = &irk->bdaddr;
5547 bdaddr_type = irk->addr_type;
5548 }
5549
5550
5551
5552
5553
5554
5555 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5556 direct_addr);
5557 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5558
5559
5560
5561 memcpy(conn->le_adv_data, data, len);
5562 conn->le_adv_data_len = len;
5563 }
5564
5565
5566
5567
5568
5569 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5570 if (type == LE_ADV_DIRECT_IND)
5571 return;
5572
5573 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5574 bdaddr, bdaddr_type) &&
5575 idr_is_empty(&hdev->adv_monitors_idr))
5576 return;
5577
5578 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5579 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5580 else
5581 flags = 0;
5582 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5583 rssi, flags, data, len, NULL, 0);
5584 return;
5585 }
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5603 type == LE_ADV_SCAN_RSP)
5604 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5605 else
5606 flags = 0;
5607
5608
5609
5610
5611
5612 if (!ext_adv && !has_pending_adv_report(hdev)) {
5613
5614
5615
5616 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5617 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5618 rssi, flags, data, len);
5619 return;
5620 }
5621
5622 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5623 rssi, flags, data, len, NULL, 0);
5624 return;
5625 }
5626
5627
5628 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5629 bdaddr_type == d->last_adv_addr_type);
5630
5631
5632
5633
5634
5635 if (type != LE_ADV_SCAN_RSP || !match) {
5636
5637 if (!match)
5638 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5639 d->last_adv_addr_type, NULL,
5640 d->last_adv_rssi, d->last_adv_flags,
5641 d->last_adv_data,
5642 d->last_adv_data_len, NULL, 0);
5643
5644
5645
5646
5647 if (!ext_adv && (type == LE_ADV_IND ||
5648 type == LE_ADV_SCAN_IND)) {
5649 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5650 rssi, flags, data, len);
5651 return;
5652 }
5653
5654
5655
5656
5657 clear_pending_adv_report(hdev);
5658 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5659 rssi, flags, data, len, NULL, 0);
5660 return;
5661 }
5662
5663
5664
5665
5666
5667 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5668 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5669 d->last_adv_data, d->last_adv_data_len, data, len);
5670 clear_pending_adv_report(hdev);
5671}
5672
5673static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5674{
5675 u8 num_reports = skb->data[0];
5676 void *ptr = &skb->data[1];
5677
5678 hci_dev_lock(hdev);
5679
5680 while (num_reports--) {
5681 struct hci_ev_le_advertising_info *ev = ptr;
5682 s8 rssi;
5683
5684 if (ev->length <= HCI_MAX_AD_LENGTH) {
5685 rssi = ev->data[ev->length];
5686 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5687 ev->bdaddr_type, NULL, 0, rssi,
5688 ev->data, ev->length, false);
5689 } else {
5690 bt_dev_err(hdev, "Dropping invalid advertising data");
5691 }
5692
5693 ptr += sizeof(*ev) + ev->length + 1;
5694 }
5695
5696 hci_dev_unlock(hdev);
5697}
5698
5699static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5700{
5701 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5702 switch (evt_type) {
5703 case LE_LEGACY_ADV_IND:
5704 return LE_ADV_IND;
5705 case LE_LEGACY_ADV_DIRECT_IND:
5706 return LE_ADV_DIRECT_IND;
5707 case LE_LEGACY_ADV_SCAN_IND:
5708 return LE_ADV_SCAN_IND;
5709 case LE_LEGACY_NONCONN_IND:
5710 return LE_ADV_NONCONN_IND;
5711 case LE_LEGACY_SCAN_RSP_ADV:
5712 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5713 return LE_ADV_SCAN_RSP;
5714 }
5715
5716 goto invalid;
5717 }
5718
5719 if (evt_type & LE_EXT_ADV_CONN_IND) {
5720 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5721 return LE_ADV_DIRECT_IND;
5722
5723 return LE_ADV_IND;
5724 }
5725
5726 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5727 return LE_ADV_SCAN_RSP;
5728
5729 if (evt_type & LE_EXT_ADV_SCAN_IND)
5730 return LE_ADV_SCAN_IND;
5731
5732 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5733 evt_type & LE_EXT_ADV_DIRECT_IND)
5734 return LE_ADV_NONCONN_IND;
5735
5736invalid:
5737 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5738 evt_type);
5739
5740 return LE_ADV_INVALID;
5741}
5742
5743static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5744{
5745 u8 num_reports = skb->data[0];
5746 void *ptr = &skb->data[1];
5747
5748 hci_dev_lock(hdev);
5749
5750 while (num_reports--) {
5751 struct hci_ev_le_ext_adv_report *ev = ptr;
5752 u8 legacy_evt_type;
5753 u16 evt_type;
5754
5755 evt_type = __le16_to_cpu(ev->evt_type);
5756 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5757 if (legacy_evt_type != LE_ADV_INVALID) {
5758 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5759 ev->bdaddr_type, NULL, 0, ev->rssi,
5760 ev->data, ev->length,
5761 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5762 }
5763
5764 ptr += sizeof(*ev) + ev->length;
5765 }
5766
5767 hci_dev_unlock(hdev);
5768}
5769
5770static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5771 struct sk_buff *skb)
5772{
5773 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5774 struct hci_conn *conn;
5775
5776 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5777
5778 hci_dev_lock(hdev);
5779
5780 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5781 if (conn) {
5782 if (!ev->status)
5783 memcpy(conn->features[0], ev->features, 8);
5784
5785 if (conn->state == BT_CONFIG) {
5786 __u8 status;
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797 if (!conn->out && ev->status == 0x1a &&
5798 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5799 status = 0x00;
5800 else
5801 status = ev->status;
5802
5803 conn->state = BT_CONNECTED;
5804 hci_connect_cfm(conn, status);
5805 hci_conn_drop(conn);
5806 }
5807 }
5808
5809 hci_dev_unlock(hdev);
5810}
5811
5812static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5813{
5814 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5815 struct hci_cp_le_ltk_reply cp;
5816 struct hci_cp_le_ltk_neg_reply neg;
5817 struct hci_conn *conn;
5818 struct smp_ltk *ltk;
5819
5820 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5821
5822 hci_dev_lock(hdev);
5823
5824 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5825 if (conn == NULL)
5826 goto not_found;
5827
5828 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5829 if (!ltk)
5830 goto not_found;
5831
5832 if (smp_ltk_is_sc(ltk)) {
5833
5834 if (ev->ediv || ev->rand)
5835 goto not_found;
5836 } else {
5837
5838 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5839 goto not_found;
5840 }
5841
5842 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5843 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5844 cp.handle = cpu_to_le16(conn->handle);
5845
5846 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5847
5848 conn->enc_key_size = ltk->enc_size;
5849
5850 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5851
5852
5853
5854
5855
5856
5857
5858 if (ltk->type == SMP_STK) {
5859 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5860 list_del_rcu(<k->list);
5861 kfree_rcu(ltk, rcu);
5862 } else {
5863 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5864 }
5865
5866 hci_dev_unlock(hdev);
5867
5868 return;
5869
5870not_found:
5871 neg.handle = ev->handle;
5872 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5873 hci_dev_unlock(hdev);
5874}
5875
5876static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5877 u8 reason)
5878{
5879 struct hci_cp_le_conn_param_req_neg_reply cp;
5880
5881 cp.handle = cpu_to_le16(handle);
5882 cp.reason = reason;
5883
5884 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5885 &cp);
5886}
5887
5888static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5889 struct sk_buff *skb)
5890{
5891 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5892 struct hci_cp_le_conn_param_req_reply cp;
5893 struct hci_conn *hcon;
5894 u16 handle, min, max, latency, timeout;
5895
5896 handle = le16_to_cpu(ev->handle);
5897 min = le16_to_cpu(ev->interval_min);
5898 max = le16_to_cpu(ev->interval_max);
5899 latency = le16_to_cpu(ev->latency);
5900 timeout = le16_to_cpu(ev->timeout);
5901
5902 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5903 if (!hcon || hcon->state != BT_CONNECTED)
5904 return send_conn_param_neg_reply(hdev, handle,
5905 HCI_ERROR_UNKNOWN_CONN_ID);
5906
5907 if (hci_check_conn_params(min, max, latency, timeout))
5908 return send_conn_param_neg_reply(hdev, handle,
5909 HCI_ERROR_INVALID_LL_PARAMS);
5910
5911 if (hcon->role == HCI_ROLE_MASTER) {
5912 struct hci_conn_params *params;
5913 u8 store_hint;
5914
5915 hci_dev_lock(hdev);
5916
5917 params = hci_conn_params_lookup(hdev, &hcon->dst,
5918 hcon->dst_type);
5919 if (params) {
5920 params->conn_min_interval = min;
5921 params->conn_max_interval = max;
5922 params->conn_latency = latency;
5923 params->supervision_timeout = timeout;
5924 store_hint = 0x01;
5925 } else {
5926 store_hint = 0x00;
5927 }
5928
5929 hci_dev_unlock(hdev);
5930
5931 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5932 store_hint, min, max, latency, timeout);
5933 }
5934
5935 cp.handle = ev->handle;
5936 cp.interval_min = ev->interval_min;
5937 cp.interval_max = ev->interval_max;
5938 cp.latency = ev->latency;
5939 cp.timeout = ev->timeout;
5940 cp.min_ce_len = 0;
5941 cp.max_ce_len = 0;
5942
5943 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5944}
5945
5946static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5947 struct sk_buff *skb)
5948{
5949 u8 num_reports = skb->data[0];
5950 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5951
5952 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5953 return;
5954
5955 hci_dev_lock(hdev);
5956
5957 for (; num_reports; num_reports--, ev++)
5958 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5959 ev->bdaddr_type, &ev->direct_addr,
5960 ev->direct_addr_type, ev->rssi, NULL, 0,
5961 false);
5962
5963 hci_dev_unlock(hdev);
5964}
5965
5966static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5967{
5968 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5969 struct hci_conn *conn;
5970
5971 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5972
5973 if (ev->status)
5974 return;
5975
5976 hci_dev_lock(hdev);
5977
5978 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5979 if (!conn)
5980 goto unlock;
5981
5982 conn->le_tx_phy = ev->tx_phy;
5983 conn->le_rx_phy = ev->rx_phy;
5984
5985unlock:
5986 hci_dev_unlock(hdev);
5987}
5988
5989static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5990{
5991 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5992
5993 skb_pull(skb, sizeof(*le_ev));
5994
5995 switch (le_ev->subevent) {
5996 case HCI_EV_LE_CONN_COMPLETE:
5997 hci_le_conn_complete_evt(hdev, skb);
5998 break;
5999
6000 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6001 hci_le_conn_update_complete_evt(hdev, skb);
6002 break;
6003
6004 case HCI_EV_LE_ADVERTISING_REPORT:
6005 hci_le_adv_report_evt(hdev, skb);
6006 break;
6007
6008 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6009 hci_le_remote_feat_complete_evt(hdev, skb);
6010 break;
6011
6012 case HCI_EV_LE_LTK_REQ:
6013 hci_le_ltk_request_evt(hdev, skb);
6014 break;
6015
6016 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6017 hci_le_remote_conn_param_req_evt(hdev, skb);
6018 break;
6019
6020 case HCI_EV_LE_DIRECT_ADV_REPORT:
6021 hci_le_direct_adv_report_evt(hdev, skb);
6022 break;
6023
6024 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6025 hci_le_phy_update_evt(hdev, skb);
6026 break;
6027
6028 case HCI_EV_LE_EXT_ADV_REPORT:
6029 hci_le_ext_adv_report_evt(hdev, skb);
6030 break;
6031
6032 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6033 hci_le_enh_conn_complete_evt(hdev, skb);
6034 break;
6035
6036 case HCI_EV_LE_EXT_ADV_SET_TERM:
6037 hci_le_ext_adv_term_evt(hdev, skb);
6038 break;
6039
6040 default:
6041 break;
6042 }
6043}
6044
6045static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6046 u8 event, struct sk_buff *skb)
6047{
6048 struct hci_ev_cmd_complete *ev;
6049 struct hci_event_hdr *hdr;
6050
6051 if (!skb)
6052 return false;
6053
6054 if (skb->len < sizeof(*hdr)) {
6055 bt_dev_err(hdev, "too short HCI event");
6056 return false;
6057 }
6058
6059 hdr = (void *) skb->data;
6060 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6061
6062 if (event) {
6063 if (hdr->evt != event)
6064 return false;
6065 return true;
6066 }
6067
6068
6069
6070
6071 if (hdr->evt == HCI_EV_CMD_STATUS)
6072 return false;
6073
6074 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6075 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6076 hdr->evt);
6077 return false;
6078 }
6079
6080 if (skb->len < sizeof(*ev)) {
6081 bt_dev_err(hdev, "too short cmd_complete event");
6082 return false;
6083 }
6084
6085 ev = (void *) skb->data;
6086 skb_pull(skb, sizeof(*ev));
6087
6088 if (opcode != __le16_to_cpu(ev->opcode)) {
6089 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6090 __le16_to_cpu(ev->opcode));
6091 return false;
6092 }
6093
6094 return true;
6095}
6096
6097static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6098 struct sk_buff *skb)
6099{
6100 struct hci_ev_le_advertising_info *adv;
6101 struct hci_ev_le_direct_adv_info *direct_adv;
6102 struct hci_ev_le_ext_adv_report *ext_adv;
6103 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6104 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6105
6106 hci_dev_lock(hdev);
6107
6108
6109
6110
6111 if (!hdev->suspended || hdev->wake_reason)
6112 goto unlock;
6113
6114
6115
6116
6117 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6118
6119
6120
6121
6122
6123 if (event == HCI_EV_CONN_REQUEST) {
6124 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6125 hdev->wake_addr_type = BDADDR_BREDR;
6126 } else if (event == HCI_EV_CONN_COMPLETE) {
6127 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6128 hdev->wake_addr_type = BDADDR_BREDR;
6129 } else if (event == HCI_EV_LE_META) {
6130 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6131 u8 subevent = le_ev->subevent;
6132 u8 *ptr = &skb->data[sizeof(*le_ev)];
6133 u8 num_reports = *ptr;
6134
6135 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6136 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6137 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6138 num_reports) {
6139 adv = (void *)(ptr + 1);
6140 direct_adv = (void *)(ptr + 1);
6141 ext_adv = (void *)(ptr + 1);
6142
6143 switch (subevent) {
6144 case HCI_EV_LE_ADVERTISING_REPORT:
6145 bacpy(&hdev->wake_addr, &adv->bdaddr);
6146 hdev->wake_addr_type = adv->bdaddr_type;
6147 break;
6148 case HCI_EV_LE_DIRECT_ADV_REPORT:
6149 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6150 hdev->wake_addr_type = direct_adv->bdaddr_type;
6151 break;
6152 case HCI_EV_LE_EXT_ADV_REPORT:
6153 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6154 hdev->wake_addr_type = ext_adv->bdaddr_type;
6155 break;
6156 }
6157 }
6158 } else {
6159 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6160 }
6161
6162unlock:
6163 hci_dev_unlock(hdev);
6164}
6165
6166void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6167{
6168 struct hci_event_hdr *hdr = (void *) skb->data;
6169 hci_req_complete_t req_complete = NULL;
6170 hci_req_complete_skb_t req_complete_skb = NULL;
6171 struct sk_buff *orig_skb = NULL;
6172 u8 status = 0, event = hdr->evt, req_evt = 0;
6173 u16 opcode = HCI_OP_NOP;
6174
6175 if (!event) {
6176 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6177 goto done;
6178 }
6179
6180 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6181 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6182 opcode = __le16_to_cpu(cmd_hdr->opcode);
6183 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6184 &req_complete_skb);
6185 req_evt = event;
6186 }
6187
6188
6189
6190
6191
6192
6193 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6194 event == HCI_EV_CMD_COMPLETE)
6195 orig_skb = skb_clone(skb, GFP_KERNEL);
6196
6197 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6198
6199
6200 hci_store_wake_reason(hdev, event, skb);
6201
6202 switch (event) {
6203 case HCI_EV_INQUIRY_COMPLETE:
6204 hci_inquiry_complete_evt(hdev, skb);
6205 break;
6206
6207 case HCI_EV_INQUIRY_RESULT:
6208 hci_inquiry_result_evt(hdev, skb);
6209 break;
6210
6211 case HCI_EV_CONN_COMPLETE:
6212 hci_conn_complete_evt(hdev, skb);
6213 break;
6214
6215 case HCI_EV_CONN_REQUEST:
6216 hci_conn_request_evt(hdev, skb);
6217 break;
6218
6219 case HCI_EV_DISCONN_COMPLETE:
6220 hci_disconn_complete_evt(hdev, skb);
6221 break;
6222
6223 case HCI_EV_AUTH_COMPLETE:
6224 hci_auth_complete_evt(hdev, skb);
6225 break;
6226
6227 case HCI_EV_REMOTE_NAME:
6228 hci_remote_name_evt(hdev, skb);
6229 break;
6230
6231 case HCI_EV_ENCRYPT_CHANGE:
6232 hci_encrypt_change_evt(hdev, skb);
6233 break;
6234
6235 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6236 hci_change_link_key_complete_evt(hdev, skb);
6237 break;
6238
6239 case HCI_EV_REMOTE_FEATURES:
6240 hci_remote_features_evt(hdev, skb);
6241 break;
6242
6243 case HCI_EV_CMD_COMPLETE:
6244 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6245 &req_complete, &req_complete_skb);
6246 break;
6247
6248 case HCI_EV_CMD_STATUS:
6249 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6250 &req_complete_skb);
6251 break;
6252
6253 case HCI_EV_HARDWARE_ERROR:
6254 hci_hardware_error_evt(hdev, skb);
6255 break;
6256
6257 case HCI_EV_ROLE_CHANGE:
6258 hci_role_change_evt(hdev, skb);
6259 break;
6260
6261 case HCI_EV_NUM_COMP_PKTS:
6262 hci_num_comp_pkts_evt(hdev, skb);
6263 break;
6264
6265 case HCI_EV_MODE_CHANGE:
6266 hci_mode_change_evt(hdev, skb);
6267 break;
6268
6269 case HCI_EV_PIN_CODE_REQ:
6270 hci_pin_code_request_evt(hdev, skb);
6271 break;
6272
6273 case HCI_EV_LINK_KEY_REQ:
6274 hci_link_key_request_evt(hdev, skb);
6275 break;
6276
6277 case HCI_EV_LINK_KEY_NOTIFY:
6278 hci_link_key_notify_evt(hdev, skb);
6279 break;
6280
6281 case HCI_EV_CLOCK_OFFSET:
6282 hci_clock_offset_evt(hdev, skb);
6283 break;
6284
6285 case HCI_EV_PKT_TYPE_CHANGE:
6286 hci_pkt_type_change_evt(hdev, skb);
6287 break;
6288
6289 case HCI_EV_PSCAN_REP_MODE:
6290 hci_pscan_rep_mode_evt(hdev, skb);
6291 break;
6292
6293 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6294 hci_inquiry_result_with_rssi_evt(hdev, skb);
6295 break;
6296
6297 case HCI_EV_REMOTE_EXT_FEATURES:
6298 hci_remote_ext_features_evt(hdev, skb);
6299 break;
6300
6301 case HCI_EV_SYNC_CONN_COMPLETE:
6302 hci_sync_conn_complete_evt(hdev, skb);
6303 break;
6304
6305 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6306 hci_extended_inquiry_result_evt(hdev, skb);
6307 break;
6308
6309 case HCI_EV_KEY_REFRESH_COMPLETE:
6310 hci_key_refresh_complete_evt(hdev, skb);
6311 break;
6312
6313 case HCI_EV_IO_CAPA_REQUEST:
6314 hci_io_capa_request_evt(hdev, skb);
6315 break;
6316
6317 case HCI_EV_IO_CAPA_REPLY:
6318 hci_io_capa_reply_evt(hdev, skb);
6319 break;
6320
6321 case HCI_EV_USER_CONFIRM_REQUEST:
6322 hci_user_confirm_request_evt(hdev, skb);
6323 break;
6324
6325 case HCI_EV_USER_PASSKEY_REQUEST:
6326 hci_user_passkey_request_evt(hdev, skb);
6327 break;
6328
6329 case HCI_EV_USER_PASSKEY_NOTIFY:
6330 hci_user_passkey_notify_evt(hdev, skb);
6331 break;
6332
6333 case HCI_EV_KEYPRESS_NOTIFY:
6334 hci_keypress_notify_evt(hdev, skb);
6335 break;
6336
6337 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6338 hci_simple_pair_complete_evt(hdev, skb);
6339 break;
6340
6341 case HCI_EV_REMOTE_HOST_FEATURES:
6342 hci_remote_host_features_evt(hdev, skb);
6343 break;
6344
6345 case HCI_EV_LE_META:
6346 hci_le_meta_evt(hdev, skb);
6347 break;
6348
6349 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6350 hci_remote_oob_data_request_evt(hdev, skb);
6351 break;
6352
6353#if IS_ENABLED(CONFIG_BT_HS)
6354 case HCI_EV_CHANNEL_SELECTED:
6355 hci_chan_selected_evt(hdev, skb);
6356 break;
6357
6358 case HCI_EV_PHY_LINK_COMPLETE:
6359 hci_phy_link_complete_evt(hdev, skb);
6360 break;
6361
6362 case HCI_EV_LOGICAL_LINK_COMPLETE:
6363 hci_loglink_complete_evt(hdev, skb);
6364 break;
6365
6366 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6367 hci_disconn_loglink_complete_evt(hdev, skb);
6368 break;
6369
6370 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6371 hci_disconn_phylink_complete_evt(hdev, skb);
6372 break;
6373#endif
6374
6375 case HCI_EV_NUM_COMP_BLOCKS:
6376 hci_num_comp_blocks_evt(hdev, skb);
6377 break;
6378
6379 case HCI_EV_VENDOR:
6380 msft_vendor_evt(hdev, skb);
6381 break;
6382
6383 default:
6384 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6385 break;
6386 }
6387
6388 if (req_complete) {
6389 req_complete(hdev, status, opcode);
6390 } else if (req_complete_skb) {
6391 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6392 kfree_skb(orig_skb);
6393 orig_skb = NULL;
6394 }
6395 req_complete_skb(hdev, status, opcode, orig_skb);
6396 }
6397
6398done:
6399 kfree_skb(orig_skb);
6400 kfree_skb(skb);
6401 hdev->stat.evt_rx++;
6402}
6403