~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/rfkill.h>
 30 #include <linux/debugfs.h>
 31 #include <linux/crypto.h>
 32 #include <linux/property.h>
 33 #include <linux/suspend.h>
 34 #include <linux/wait.h>
 35 #include <asm/unaligned.h>
 36 
 37 #include <net/bluetooth/bluetooth.h>
 38 #include <net/bluetooth/hci_core.h>
 39 #include <net/bluetooth/l2cap.h>
 40 #include <net/bluetooth/mgmt.h>
 41 
 42 #include "hci_request.h"
 43 #include "hci_debugfs.h"
 44 #include "smp.h"
 45 #include "leds.h"
 46 #include "msft.h"
 47 #include "aosp.h"
 48 #include "hci_codec.h"
 49 
 50 static void hci_rx_work(struct work_struct *work);
 51 static void hci_cmd_work(struct work_struct *work);
 52 static void hci_tx_work(struct work_struct *work);
 53 
 54 /* HCI device list */
 55 LIST_HEAD(hci_dev_list);
 56 DEFINE_RWLOCK(hci_dev_list_lock);
 57 
 58 /* HCI callback list */
 59 LIST_HEAD(hci_cb_list);
 60 DEFINE_MUTEX(hci_cb_list_lock);
 61 
 62 /* HCI ID Numbering */
 63 static DEFINE_IDA(hci_index_ida);
 64 
 65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
 66 {
 67         __u8 scan = opt;
 68 
 69         BT_DBG("%s %x", req->hdev->name, scan);
 70 
 71         /* Inquiry and Page scans */
 72         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 73         return 0;
 74 }
 75 
 76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
 77 {
 78         __u8 auth = opt;
 79 
 80         BT_DBG("%s %x", req->hdev->name, auth);
 81 
 82         /* Authentication */
 83         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
 84         return 0;
 85 }
 86 
 87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
 88 {
 89         __u8 encrypt = opt;
 90 
 91         BT_DBG("%s %x", req->hdev->name, encrypt);
 92 
 93         /* Encryption */
 94         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
 95         return 0;
 96 }
 97 
 98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
 99 {
100         __le16 policy = cpu_to_le16(opt);
101 
102         BT_DBG("%s %x", req->hdev->name, policy);
103 
104         /* Default link policy */
105         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
106         return 0;
107 }
108 
109 /* Get HCI device by index.
110  * Device is held on return. */
111 struct hci_dev *hci_dev_get(int index)
112 {
113         struct hci_dev *hdev = NULL, *d;
114 
115         BT_DBG("%d", index);
116 
117         if (index < 0)
118                 return NULL;
119 
120         read_lock(&hci_dev_list_lock);
121         list_for_each_entry(d, &hci_dev_list, list) {
122                 if (d->id == index) {
123                         hdev = hci_dev_hold(d);
124                         break;
125                 }
126         }
127         read_unlock(&hci_dev_list_lock);
128         return hdev;
129 }
130 
131 /* ---- Inquiry support ---- */
132 
133 bool hci_discovery_active(struct hci_dev *hdev)
134 {
135         struct discovery_state *discov = &hdev->discovery;
136 
137         switch (discov->state) {
138         case DISCOVERY_FINDING:
139         case DISCOVERY_RESOLVING:
140                 return true;
141 
142         default:
143                 return false;
144         }
145 }
146 
147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
148 {
149         int old_state = hdev->discovery.state;
150 
151         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152 
153         if (old_state == state)
154                 return;
155 
156         hdev->discovery.state = state;
157 
158         switch (state) {
159         case DISCOVERY_STOPPED:
160                 hci_update_passive_scan(hdev);
161 
162                 if (old_state != DISCOVERY_STARTING)
163                         mgmt_discovering(hdev, 0);
164                 break;
165         case DISCOVERY_STARTING:
166                 break;
167         case DISCOVERY_FINDING:
168                 mgmt_discovering(hdev, 1);
169                 break;
170         case DISCOVERY_RESOLVING:
171                 break;
172         case DISCOVERY_STOPPING:
173                 break;
174         }
175 }
176 
177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
178 {
179         struct discovery_state *cache = &hdev->discovery;
180         struct inquiry_entry *p, *n;
181 
182         list_for_each_entry_safe(p, n, &cache->all, all) {
183                 list_del(&p->all);
184                 kfree(p);
185         }
186 
187         INIT_LIST_HEAD(&cache->unknown);
188         INIT_LIST_HEAD(&cache->resolve);
189 }
190 
191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192                                                bdaddr_t *bdaddr)
193 {
194         struct discovery_state *cache = &hdev->discovery;
195         struct inquiry_entry *e;
196 
197         BT_DBG("cache %p, %pMR", cache, bdaddr);
198 
199         list_for_each_entry(e, &cache->all, all) {
200                 if (!bacmp(&e->data.bdaddr, bdaddr))
201                         return e;
202         }
203 
204         return NULL;
205 }
206 
207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
208                                                        bdaddr_t *bdaddr)
209 {
210         struct discovery_state *cache = &hdev->discovery;
211         struct inquiry_entry *e;
212 
213         BT_DBG("cache %p, %pMR", cache, bdaddr);
214 
215         list_for_each_entry(e, &cache->unknown, list) {
216                 if (!bacmp(&e->data.bdaddr, bdaddr))
217                         return e;
218         }
219 
220         return NULL;
221 }
222 
223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
224                                                        bdaddr_t *bdaddr,
225                                                        int state)
226 {
227         struct discovery_state *cache = &hdev->discovery;
228         struct inquiry_entry *e;
229 
230         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
231 
232         list_for_each_entry(e, &cache->resolve, list) {
233                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234                         return e;
235                 if (!bacmp(&e->data.bdaddr, bdaddr))
236                         return e;
237         }
238 
239         return NULL;
240 }
241 
242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243                                       struct inquiry_entry *ie)
244 {
245         struct discovery_state *cache = &hdev->discovery;
246         struct list_head *pos = &cache->resolve;
247         struct inquiry_entry *p;
248 
249         list_del(&ie->list);
250 
251         list_for_each_entry(p, &cache->resolve, list) {
252                 if (p->name_state != NAME_PENDING &&
253                     abs(p->data.rssi) >= abs(ie->data.rssi))
254                         break;
255                 pos = &p->list;
256         }
257 
258         list_add(&ie->list, pos);
259 }
260 
261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262                              bool name_known)
263 {
264         struct discovery_state *cache = &hdev->discovery;
265         struct inquiry_entry *ie;
266         u32 flags = 0;
267 
268         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
269 
270         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
271 
272         if (!data->ssp_mode)
273                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
274 
275         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
276         if (ie) {
277                 if (!ie->data.ssp_mode)
278                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
279 
280                 if (ie->name_state == NAME_NEEDED &&
281                     data->rssi != ie->data.rssi) {
282                         ie->data.rssi = data->rssi;
283                         hci_inquiry_cache_update_resolve(hdev, ie);
284                 }
285 
286                 goto update;
287         }
288 
289         /* Entry not in the cache. Add new one. */
290         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
291         if (!ie) {
292                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293                 goto done;
294         }
295 
296         list_add(&ie->all, &cache->all);
297 
298         if (name_known) {
299                 ie->name_state = NAME_KNOWN;
300         } else {
301                 ie->name_state = NAME_NOT_KNOWN;
302                 list_add(&ie->list, &cache->unknown);
303         }
304 
305 update:
306         if (name_known && ie->name_state != NAME_KNOWN &&
307             ie->name_state != NAME_PENDING) {
308                 ie->name_state = NAME_KNOWN;
309                 list_del(&ie->list);
310         }
311 
312         memcpy(&ie->data, data, sizeof(*data));
313         ie->timestamp = jiffies;
314         cache->timestamp = jiffies;
315 
316         if (ie->name_state == NAME_NOT_KNOWN)
317                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
318 
319 done:
320         return flags;
321 }
322 
323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324 {
325         struct discovery_state *cache = &hdev->discovery;
326         struct inquiry_info *info = (struct inquiry_info *) buf;
327         struct inquiry_entry *e;
328         int copied = 0;
329 
330         list_for_each_entry(e, &cache->all, all) {
331                 struct inquiry_data *data = &e->data;
332 
333                 if (copied >= num)
334                         break;
335 
336                 bacpy(&info->bdaddr, &data->bdaddr);
337                 info->pscan_rep_mode    = data->pscan_rep_mode;
338                 info->pscan_period_mode = data->pscan_period_mode;
339                 info->pscan_mode        = data->pscan_mode;
340                 memcpy(info->dev_class, data->dev_class, 3);
341                 info->clock_offset      = data->clock_offset;
342 
343                 info++;
344                 copied++;
345         }
346 
347         BT_DBG("cache %p, copied %d", cache, copied);
348         return copied;
349 }
350 
351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
352 {
353         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_inquiry cp;
356 
357         BT_DBG("%s", hdev->name);
358 
359         if (test_bit(HCI_INQUIRY, &hdev->flags))
360                 return 0;
361 
362         /* Start Inquiry */
363         memcpy(&cp.lap, &ir->lap, 3);
364         cp.length  = ir->length;
365         cp.num_rsp = ir->num_rsp;
366         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
367 
368         return 0;
369 }
370 
371 int hci_inquiry(void __user *arg)
372 {
373         __u8 __user *ptr = arg;
374         struct hci_inquiry_req ir;
375         struct hci_dev *hdev;
376         int err = 0, do_inquiry = 0, max_rsp;
377         long timeo;
378         __u8 *buf;
379 
380         if (copy_from_user(&ir, ptr, sizeof(ir)))
381                 return -EFAULT;
382 
383         hdev = hci_dev_get(ir.dev_id);
384         if (!hdev)
385                 return -ENODEV;
386 
387         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
388                 err = -EBUSY;
389                 goto done;
390         }
391 
392         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
393                 err = -EOPNOTSUPP;
394                 goto done;
395         }
396 
397         if (hdev->dev_type != HCI_PRIMARY) {
398                 err = -EOPNOTSUPP;
399                 goto done;
400         }
401 
402         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
403                 err = -EOPNOTSUPP;
404                 goto done;
405         }
406 
407         /* Restrict maximum inquiry length to 60 seconds */
408         if (ir.length > 60) {
409                 err = -EINVAL;
410                 goto done;
411         }
412 
413         hci_dev_lock(hdev);
414         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416                 hci_inquiry_cache_flush(hdev);
417                 do_inquiry = 1;
418         }
419         hci_dev_unlock(hdev);
420 
421         timeo = ir.length * msecs_to_jiffies(2000);
422 
423         if (do_inquiry) {
424                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
425                                    timeo, NULL);
426                 if (err < 0)
427                         goto done;
428 
429                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430                  * cleared). If it is interrupted by a signal, return -EINTR.
431                  */
432                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433                                 TASK_INTERRUPTIBLE)) {
434                         err = -EINTR;
435                         goto done;
436                 }
437         }
438 
439         /* for unlimited number of responses we will use buffer with
440          * 255 entries
441          */
442         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443 
444         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445          * copy it to the user space.
446          */
447         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
448         if (!buf) {
449                 err = -ENOMEM;
450                 goto done;
451         }
452 
453         hci_dev_lock(hdev);
454         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455         hci_dev_unlock(hdev);
456 
457         BT_DBG("num_rsp %d", ir.num_rsp);
458 
459         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460                 ptr += sizeof(ir);
461                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
462                                  ir.num_rsp))
463                         err = -EFAULT;
464         } else
465                 err = -EFAULT;
466 
467         kfree(buf);
468 
469 done:
470         hci_dev_put(hdev);
471         return err;
472 }
473 
474 static int hci_dev_do_open(struct hci_dev *hdev)
475 {
476         int ret = 0;
477 
478         BT_DBG("%s %p", hdev->name, hdev);
479 
480         hci_req_sync_lock(hdev);
481 
482         ret = hci_dev_open_sync(hdev);
483 
484         hci_req_sync_unlock(hdev);
485         return ret;
486 }
487 
488 /* ---- HCI ioctl helpers ---- */
489 
490 int hci_dev_open(__u16 dev)
491 {
492         struct hci_dev *hdev;
493         int err;
494 
495         hdev = hci_dev_get(dev);
496         if (!hdev)
497                 return -ENODEV;
498 
499         /* Devices that are marked as unconfigured can only be powered
500          * up as user channel. Trying to bring them up as normal devices
501          * will result into a failure. Only user channel operation is
502          * possible.
503          *
504          * When this function is called for a user channel, the flag
505          * HCI_USER_CHANNEL will be set first before attempting to
506          * open the device.
507          */
508         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
510                 err = -EOPNOTSUPP;
511                 goto done;
512         }
513 
514         /* We need to ensure that no other power on/off work is pending
515          * before proceeding to call hci_dev_do_open. This is
516          * particularly important if the setup procedure has not yet
517          * completed.
518          */
519         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520                 cancel_delayed_work(&hdev->power_off);
521 
522         /* After this call it is guaranteed that the setup procedure
523          * has finished. This means that error conditions like RFKILL
524          * or no valid public or static random address apply.
525          */
526         flush_workqueue(hdev->req_workqueue);
527 
528         /* For controllers not using the management interface and that
529          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530          * so that pairing works for them. Once the management interface
531          * is in use this bit will be cleared again and userspace has
532          * to explicitly enable it.
533          */
534         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535             !hci_dev_test_flag(hdev, HCI_MGMT))
536                 hci_dev_set_flag(hdev, HCI_BONDABLE);
537 
538         err = hci_dev_do_open(hdev);
539 
540 done:
541         hci_dev_put(hdev);
542         return err;
543 }
544 
545 int hci_dev_do_close(struct hci_dev *hdev)
546 {
547         int err;
548 
549         BT_DBG("%s %p", hdev->name, hdev);
550 
551         hci_req_sync_lock(hdev);
552 
553         err = hci_dev_close_sync(hdev);
554 
555         hci_req_sync_unlock(hdev);
556 
557         return err;
558 }
559 
560 int hci_dev_close(__u16 dev)
561 {
562         struct hci_dev *hdev;
563         int err;
564 
565         hdev = hci_dev_get(dev);
566         if (!hdev)
567                 return -ENODEV;
568 
569         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
570                 err = -EBUSY;
571                 goto done;
572         }
573 
574         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
575                 cancel_delayed_work(&hdev->power_off);
576 
577         err = hci_dev_do_close(hdev);
578 
579 done:
580         hci_dev_put(hdev);
581         return err;
582 }
583 
584 static int hci_dev_do_reset(struct hci_dev *hdev)
585 {
586         int ret;
587 
588         BT_DBG("%s %p", hdev->name, hdev);
589 
590         hci_req_sync_lock(hdev);
591 
592         /* Drop queues */
593         skb_queue_purge(&hdev->rx_q);
594         skb_queue_purge(&hdev->cmd_q);
595 
596         /* Avoid potential lockdep warnings from the *_flush() calls by
597          * ensuring the workqueue is empty up front.
598          */
599         drain_workqueue(hdev->workqueue);
600 
601         hci_dev_lock(hdev);
602         hci_inquiry_cache_flush(hdev);
603         hci_conn_hash_flush(hdev);
604         hci_dev_unlock(hdev);
605 
606         if (hdev->flush)
607                 hdev->flush(hdev);
608 
609         atomic_set(&hdev->cmd_cnt, 1);
610         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
611 
612         ret = hci_reset_sync(hdev);
613 
614         hci_req_sync_unlock(hdev);
615         return ret;
616 }
617 
618 int hci_dev_reset(__u16 dev)
619 {
620         struct hci_dev *hdev;
621         int err;
622 
623         hdev = hci_dev_get(dev);
624         if (!hdev)
625                 return -ENODEV;
626 
627         if (!test_bit(HCI_UP, &hdev->flags)) {
628                 err = -ENETDOWN;
629                 goto done;
630         }
631 
632         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
633                 err = -EBUSY;
634                 goto done;
635         }
636 
637         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
638                 err = -EOPNOTSUPP;
639                 goto done;
640         }
641 
642         err = hci_dev_do_reset(hdev);
643 
644 done:
645         hci_dev_put(hdev);
646         return err;
647 }
648 
649 int hci_dev_reset_stat(__u16 dev)
650 {
651         struct hci_dev *hdev;
652         int ret = 0;
653 
654         hdev = hci_dev_get(dev);
655         if (!hdev)
656                 return -ENODEV;
657 
658         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
659                 ret = -EBUSY;
660                 goto done;
661         }
662 
663         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
664                 ret = -EOPNOTSUPP;
665                 goto done;
666         }
667 
668         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669 
670 done:
671         hci_dev_put(hdev);
672         return ret;
673 }
674 
675 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
676 {
677         bool conn_changed, discov_changed;
678 
679         BT_DBG("%s scan 0x%02x", hdev->name, scan);
680 
681         if ((scan & SCAN_PAGE))
682                 conn_changed = !hci_dev_test_and_set_flag(hdev,
683                                                           HCI_CONNECTABLE);
684         else
685                 conn_changed = hci_dev_test_and_clear_flag(hdev,
686                                                            HCI_CONNECTABLE);
687 
688         if ((scan & SCAN_INQUIRY)) {
689                 discov_changed = !hci_dev_test_and_set_flag(hdev,
690                                                             HCI_DISCOVERABLE);
691         } else {
692                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
693                 discov_changed = hci_dev_test_and_clear_flag(hdev,
694                                                              HCI_DISCOVERABLE);
695         }
696 
697         if (!hci_dev_test_flag(hdev, HCI_MGMT))
698                 return;
699 
700         if (conn_changed || discov_changed) {
701                 /* In case this was disabled through mgmt */
702                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
703 
704                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
705                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
706 
707                 mgmt_new_settings(hdev);
708         }
709 }
710 
711 int hci_dev_cmd(unsigned int cmd, void __user *arg)
712 {
713         struct hci_dev *hdev;
714         struct hci_dev_req dr;
715         int err = 0;
716 
717         if (copy_from_user(&dr, arg, sizeof(dr)))
718                 return -EFAULT;
719 
720         hdev = hci_dev_get(dr.dev_id);
721         if (!hdev)
722                 return -ENODEV;
723 
724         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
725                 err = -EBUSY;
726                 goto done;
727         }
728 
729         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
730                 err = -EOPNOTSUPP;
731                 goto done;
732         }
733 
734         if (hdev->dev_type != HCI_PRIMARY) {
735                 err = -EOPNOTSUPP;
736                 goto done;
737         }
738 
739         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
740                 err = -EOPNOTSUPP;
741                 goto done;
742         }
743 
744         switch (cmd) {
745         case HCISETAUTH:
746                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
747                                    HCI_INIT_TIMEOUT, NULL);
748                 break;
749 
750         case HCISETENCRYPT:
751                 if (!lmp_encrypt_capable(hdev)) {
752                         err = -EOPNOTSUPP;
753                         break;
754                 }
755 
756                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
757                         /* Auth must be enabled first */
758                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
759                                            HCI_INIT_TIMEOUT, NULL);
760                         if (err)
761                                 break;
762                 }
763 
764                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
765                                    HCI_INIT_TIMEOUT, NULL);
766                 break;
767 
768         case HCISETSCAN:
769                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
770                                    HCI_INIT_TIMEOUT, NULL);
771 
772                 /* Ensure that the connectable and discoverable states
773                  * get correctly modified as this was a non-mgmt change.
774                  */
775                 if (!err)
776                         hci_update_passive_scan_state(hdev, dr.dev_opt);
777                 break;
778 
779         case HCISETLINKPOL:
780                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
781                                    HCI_INIT_TIMEOUT, NULL);
782                 break;
783 
784         case HCISETLINKMODE:
785                 hdev->link_mode = ((__u16) dr.dev_opt) &
786                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
787                 break;
788 
789         case HCISETPTYPE:
790                 if (hdev->pkt_type == (__u16) dr.dev_opt)
791                         break;
792 
793                 hdev->pkt_type = (__u16) dr.dev_opt;
794                 mgmt_phy_configuration_changed(hdev, NULL);
795                 break;
796 
797         case HCISETACLMTU:
798                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
799                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
800                 break;
801 
802         case HCISETSCOMTU:
803                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
804                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
805                 break;
806 
807         default:
808                 err = -EINVAL;
809                 break;
810         }
811 
812 done:
813         hci_dev_put(hdev);
814         return err;
815 }
816 
817 int hci_get_dev_list(void __user *arg)
818 {
819         struct hci_dev *hdev;
820         struct hci_dev_list_req *dl;
821         struct hci_dev_req *dr;
822         int n = 0, size, err;
823         __u16 dev_num;
824 
825         if (get_user(dev_num, (__u16 __user *) arg))
826                 return -EFAULT;
827 
828         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
829                 return -EINVAL;
830 
831         size = sizeof(*dl) + dev_num * sizeof(*dr);
832 
833         dl = kzalloc(size, GFP_KERNEL);
834         if (!dl)
835                 return -ENOMEM;
836 
837         dr = dl->dev_req;
838 
839         read_lock(&hci_dev_list_lock);
840         list_for_each_entry(hdev, &hci_dev_list, list) {
841                 unsigned long flags = hdev->flags;
842 
843                 /* When the auto-off is configured it means the transport
844                  * is running, but in that case still indicate that the
845                  * device is actually down.
846                  */
847                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
848                         flags &= ~BIT(HCI_UP);
849 
850                 (dr + n)->dev_id  = hdev->id;
851                 (dr + n)->dev_opt = flags;
852 
853                 if (++n >= dev_num)
854                         break;
855         }
856         read_unlock(&hci_dev_list_lock);
857 
858         dl->dev_num = n;
859         size = sizeof(*dl) + n * sizeof(*dr);
860 
861         err = copy_to_user(arg, dl, size);
862         kfree(dl);
863 
864         return err ? -EFAULT : 0;
865 }
866 
867 int hci_get_dev_info(void __user *arg)
868 {
869         struct hci_dev *hdev;
870         struct hci_dev_info di;
871         unsigned long flags;
872         int err = 0;
873 
874         if (copy_from_user(&di, arg, sizeof(di)))
875                 return -EFAULT;
876 
877         hdev = hci_dev_get(di.dev_id);
878         if (!hdev)
879                 return -ENODEV;
880 
881         /* When the auto-off is configured it means the transport
882          * is running, but in that case still indicate that the
883          * device is actually down.
884          */
885         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
886                 flags = hdev->flags & ~BIT(HCI_UP);
887         else
888                 flags = hdev->flags;
889 
890         strcpy(di.name, hdev->name);
891         di.bdaddr   = hdev->bdaddr;
892         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
893         di.flags    = flags;
894         di.pkt_type = hdev->pkt_type;
895         if (lmp_bredr_capable(hdev)) {
896                 di.acl_mtu  = hdev->acl_mtu;
897                 di.acl_pkts = hdev->acl_pkts;
898                 di.sco_mtu  = hdev->sco_mtu;
899                 di.sco_pkts = hdev->sco_pkts;
900         } else {
901                 di.acl_mtu  = hdev->le_mtu;
902                 di.acl_pkts = hdev->le_pkts;
903                 di.sco_mtu  = 0;
904                 di.sco_pkts = 0;
905         }
906         di.link_policy = hdev->link_policy;
907         di.link_mode   = hdev->link_mode;
908 
909         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910         memcpy(&di.features, &hdev->features, sizeof(di.features));
911 
912         if (copy_to_user(arg, &di, sizeof(di)))
913                 err = -EFAULT;
914 
915         hci_dev_put(hdev);
916 
917         return err;
918 }
919 
920 /* ---- Interface to HCI drivers ---- */
921 
922 static int hci_rfkill_set_block(void *data, bool blocked)
923 {
924         struct hci_dev *hdev = data;
925 
926         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
927 
928         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
929                 return -EBUSY;
930 
931         if (blocked) {
932                 hci_dev_set_flag(hdev, HCI_RFKILLED);
933                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
934                     !hci_dev_test_flag(hdev, HCI_CONFIG))
935                         hci_dev_do_close(hdev);
936         } else {
937                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
938         }
939 
940         return 0;
941 }
942 
943 static const struct rfkill_ops hci_rfkill_ops = {
944         .set_block = hci_rfkill_set_block,
945 };
946 
947 static void hci_power_on(struct work_struct *work)
948 {
949         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
950         int err;
951 
952         BT_DBG("%s", hdev->name);
953 
954         if (test_bit(HCI_UP, &hdev->flags) &&
955             hci_dev_test_flag(hdev, HCI_MGMT) &&
956             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
957                 cancel_delayed_work(&hdev->power_off);
958                 err = hci_powered_update_sync(hdev);
959                 mgmt_power_on(hdev, err);
960                 return;
961         }
962 
963         err = hci_dev_do_open(hdev);
964         if (err < 0) {
965                 hci_dev_lock(hdev);
966                 mgmt_set_powered_failed(hdev, err);
967                 hci_dev_unlock(hdev);
968                 return;
969         }
970 
971         /* During the HCI setup phase, a few error conditions are
972          * ignored and they need to be checked now. If they are still
973          * valid, it is important to turn the device back off.
974          */
975         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
976             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
977             (hdev->dev_type == HCI_PRIMARY &&
978              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
979              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
980                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
981                 hci_dev_do_close(hdev);
982         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
983                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
984                                    HCI_AUTO_OFF_TIMEOUT);
985         }
986 
987         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
988                 /* For unconfigured devices, set the HCI_RAW flag
989                  * so that userspace can easily identify them.
990                  */
991                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
992                         set_bit(HCI_RAW, &hdev->flags);
993 
994                 /* For fully configured devices, this will send
995                  * the Index Added event. For unconfigured devices,
996                  * it will send Unconfigued Index Added event.
997                  *
998                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
999                  * and no event will be send.
1000                  */
1001                 mgmt_index_added(hdev);
1002         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1003                 /* When the controller is now configured, then it
1004                  * is important to clear the HCI_RAW flag.
1005                  */
1006                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1007                         clear_bit(HCI_RAW, &hdev->flags);
1008 
1009                 /* Powering on the controller with HCI_CONFIG set only
1010                  * happens with the transition from unconfigured to
1011                  * configured. This will send the Index Added event.
1012                  */
1013                 mgmt_index_added(hdev);
1014         }
1015 }
1016 
1017 static void hci_power_off(struct work_struct *work)
1018 {
1019         struct hci_dev *hdev = container_of(work, struct hci_dev,
1020                                             power_off.work);
1021 
1022         BT_DBG("%s", hdev->name);
1023 
1024         hci_dev_do_close(hdev);
1025 }
1026 
1027 static void hci_error_reset(struct work_struct *work)
1028 {
1029         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1030 
1031         BT_DBG("%s", hdev->name);
1032 
1033         if (hdev->hw_error)
1034                 hdev->hw_error(hdev, hdev->hw_error_code);
1035         else
1036                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1037 
1038         if (hci_dev_do_close(hdev))
1039                 return;
1040 
1041         hci_dev_do_open(hdev);
1042 }
1043 
1044 void hci_uuids_clear(struct hci_dev *hdev)
1045 {
1046         struct bt_uuid *uuid, *tmp;
1047 
1048         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1049                 list_del(&uuid->list);
1050                 kfree(uuid);
1051         }
1052 }
1053 
1054 void hci_link_keys_clear(struct hci_dev *hdev)
1055 {
1056         struct link_key *key;
1057 
1058         list_for_each_entry(key, &hdev->link_keys, list) {
1059                 list_del_rcu(&key->list);
1060                 kfree_rcu(key, rcu);
1061         }
1062 }
1063 
1064 void hci_smp_ltks_clear(struct hci_dev *hdev)
1065 {
1066         struct smp_ltk *k;
1067 
1068         list_for_each_entry(k, &hdev->long_term_keys, list) {
1069                 list_del_rcu(&k->list);
1070                 kfree_rcu(k, rcu);
1071         }
1072 }
1073 
1074 void hci_smp_irks_clear(struct hci_dev *hdev)
1075 {
1076         struct smp_irk *k;
1077 
1078         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1079                 list_del_rcu(&k->list);
1080                 kfree_rcu(k, rcu);
1081         }
1082 }
1083 
1084 void hci_blocked_keys_clear(struct hci_dev *hdev)
1085 {
1086         struct blocked_key *b;
1087 
1088         list_for_each_entry(b, &hdev->blocked_keys, list) {
1089                 list_del_rcu(&b->list);
1090                 kfree_rcu(b, rcu);
1091         }
1092 }
1093 
1094 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1095 {
1096         bool blocked = false;
1097         struct blocked_key *b;
1098 
1099         rcu_read_lock();
1100         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1101                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1102                         blocked = true;
1103                         break;
1104                 }
1105         }
1106 
1107         rcu_read_unlock();
1108         return blocked;
1109 }
1110 
1111 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1112 {
1113         struct link_key *k;
1114 
1115         rcu_read_lock();
1116         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1117                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1118                         rcu_read_unlock();
1119 
1120                         if (hci_is_blocked_key(hdev,
1121                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1122                                                k->val)) {
1123                                 bt_dev_warn_ratelimited(hdev,
1124                                                         "Link key blocked for %pMR",
1125                                                         &k->bdaddr);
1126                                 return NULL;
1127                         }
1128 
1129                         return k;
1130                 }
1131         }
1132         rcu_read_unlock();
1133 
1134         return NULL;
1135 }
1136 
1137 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1138                                u8 key_type, u8 old_key_type)
1139 {
1140         /* Legacy key */
1141         if (key_type < 0x03)
1142                 return true;
1143 
1144         /* Debug keys are insecure so don't store them persistently */
1145         if (key_type == HCI_LK_DEBUG_COMBINATION)
1146                 return false;
1147 
1148         /* Changed combination key and there's no previous one */
1149         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1150                 return false;
1151 
1152         /* Security mode 3 case */
1153         if (!conn)
1154                 return true;
1155 
1156         /* BR/EDR key derived using SC from an LE link */
1157         if (conn->type == LE_LINK)
1158                 return true;
1159 
1160         /* Neither local nor remote side had no-bonding as requirement */
1161         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1162                 return true;
1163 
1164         /* Local side had dedicated bonding as requirement */
1165         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1166                 return true;
1167 
1168         /* Remote side had dedicated bonding as requirement */
1169         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1170                 return true;
1171 
1172         /* If none of the above criteria match, then don't store the key
1173          * persistently */
1174         return false;
1175 }
1176 
1177 static u8 ltk_role(u8 type)
1178 {
1179         if (type == SMP_LTK)
1180                 return HCI_ROLE_MASTER;
1181 
1182         return HCI_ROLE_SLAVE;
1183 }
1184 
1185 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1186                              u8 addr_type, u8 role)
1187 {
1188         struct smp_ltk *k;
1189 
1190         rcu_read_lock();
1191         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1192                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1193                         continue;
1194 
1195                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1196                         rcu_read_unlock();
1197 
1198                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1199                                                k->val)) {
1200                                 bt_dev_warn_ratelimited(hdev,
1201                                                         "LTK blocked for %pMR",
1202                                                         &k->bdaddr);
1203                                 return NULL;
1204                         }
1205 
1206                         return k;
1207                 }
1208         }
1209         rcu_read_unlock();
1210 
1211         return NULL;
1212 }
1213 
1214 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1215 {
1216         struct smp_irk *irk_to_return = NULL;
1217         struct smp_irk *irk;
1218 
1219         rcu_read_lock();
1220         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1221                 if (!bacmp(&irk->rpa, rpa)) {
1222                         irk_to_return = irk;
1223                         goto done;
1224                 }
1225         }
1226 
1227         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1228                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1229                         bacpy(&irk->rpa, rpa);
1230                         irk_to_return = irk;
1231                         goto done;
1232                 }
1233         }
1234 
1235 done:
1236         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1237                                                 irk_to_return->val)) {
1238                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1239                                         &irk_to_return->bdaddr);
1240                 irk_to_return = NULL;
1241         }
1242 
1243         rcu_read_unlock();
1244 
1245         return irk_to_return;
1246 }
1247 
1248 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1249                                      u8 addr_type)
1250 {
1251         struct smp_irk *irk_to_return = NULL;
1252         struct smp_irk *irk;
1253 
1254         /* Identity Address must be public or static random */
1255         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1256                 return NULL;
1257 
1258         rcu_read_lock();
1259         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1260                 if (addr_type == irk->addr_type &&
1261                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1262                         irk_to_return = irk;
1263                         goto done;
1264                 }
1265         }
1266 
1267 done:
1268 
1269         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1270                                                 irk_to_return->val)) {
1271                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1272                                         &irk_to_return->bdaddr);
1273                 irk_to_return = NULL;
1274         }
1275 
1276         rcu_read_unlock();
1277 
1278         return irk_to_return;
1279 }
1280 
1281 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1282                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1283                                   u8 pin_len, bool *persistent)
1284 {
1285         struct link_key *key, *old_key;
1286         u8 old_key_type;
1287 
1288         old_key = hci_find_link_key(hdev, bdaddr);
1289         if (old_key) {
1290                 old_key_type = old_key->type;
1291                 key = old_key;
1292         } else {
1293                 old_key_type = conn ? conn->key_type : 0xff;
1294                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1295                 if (!key)
1296                         return NULL;
1297                 list_add_rcu(&key->list, &hdev->link_keys);
1298         }
1299 
1300         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1301 
1302         /* Some buggy controller combinations generate a changed
1303          * combination key for legacy pairing even when there's no
1304          * previous key */
1305         if (type == HCI_LK_CHANGED_COMBINATION &&
1306             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1307                 type = HCI_LK_COMBINATION;
1308                 if (conn)
1309                         conn->key_type = type;
1310         }
1311 
1312         bacpy(&key->bdaddr, bdaddr);
1313         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1314         key->pin_len = pin_len;
1315 
1316         if (type == HCI_LK_CHANGED_COMBINATION)
1317                 key->type = old_key_type;
1318         else
1319                 key->type = type;
1320 
1321         if (persistent)
1322                 *persistent = hci_persistent_key(hdev, conn, type,
1323                                                  old_key_type);
1324 
1325         return key;
1326 }
1327 
1328 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1329                             u8 addr_type, u8 type, u8 authenticated,
1330                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1331 {
1332         struct smp_ltk *key, *old_key;
1333         u8 role = ltk_role(type);
1334 
1335         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1336         if (old_key)
1337                 key = old_key;
1338         else {
1339                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1340                 if (!key)
1341                         return NULL;
1342                 list_add_rcu(&key->list, &hdev->long_term_keys);
1343         }
1344 
1345         bacpy(&key->bdaddr, bdaddr);
1346         key->bdaddr_type = addr_type;
1347         memcpy(key->val, tk, sizeof(key->val));
1348         key->authenticated = authenticated;
1349         key->ediv = ediv;
1350         key->rand = rand;
1351         key->enc_size = enc_size;
1352         key->type = type;
1353 
1354         return key;
1355 }
1356 
1357 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1358                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1359 {
1360         struct smp_irk *irk;
1361 
1362         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1363         if (!irk) {
1364                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1365                 if (!irk)
1366                         return NULL;
1367 
1368                 bacpy(&irk->bdaddr, bdaddr);
1369                 irk->addr_type = addr_type;
1370 
1371                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1372         }
1373 
1374         memcpy(irk->val, val, 16);
1375         bacpy(&irk->rpa, rpa);
1376 
1377         return irk;
1378 }
1379 
1380 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381 {
1382         struct link_key *key;
1383 
1384         key = hci_find_link_key(hdev, bdaddr);
1385         if (!key)
1386                 return -ENOENT;
1387 
1388         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1389 
1390         list_del_rcu(&key->list);
1391         kfree_rcu(key, rcu);
1392 
1393         return 0;
1394 }
1395 
1396 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1397 {
1398         struct smp_ltk *k;
1399         int removed = 0;
1400 
1401         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1402                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1403                         continue;
1404 
1405                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1406 
1407                 list_del_rcu(&k->list);
1408                 kfree_rcu(k, rcu);
1409                 removed++;
1410         }
1411 
1412         return removed ? 0 : -ENOENT;
1413 }
1414 
1415 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1416 {
1417         struct smp_irk *k;
1418 
1419         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1420                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1421                         continue;
1422 
1423                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1424 
1425                 list_del_rcu(&k->list);
1426                 kfree_rcu(k, rcu);
1427         }
1428 }
1429 
1430 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1431 {
1432         struct smp_ltk *k;
1433         struct smp_irk *irk;
1434         u8 addr_type;
1435 
1436         if (type == BDADDR_BREDR) {
1437                 if (hci_find_link_key(hdev, bdaddr))
1438                         return true;
1439                 return false;
1440         }
1441 
1442         /* Convert to HCI addr type which struct smp_ltk uses */
1443         if (type == BDADDR_LE_PUBLIC)
1444                 addr_type = ADDR_LE_DEV_PUBLIC;
1445         else
1446                 addr_type = ADDR_LE_DEV_RANDOM;
1447 
1448         irk = hci_get_irk(hdev, bdaddr, addr_type);
1449         if (irk) {
1450                 bdaddr = &irk->bdaddr;
1451                 addr_type = irk->addr_type;
1452         }
1453 
1454         rcu_read_lock();
1455         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1456                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1457                         rcu_read_unlock();
1458                         return true;
1459                 }
1460         }
1461         rcu_read_unlock();
1462 
1463         return false;
1464 }
1465 
1466 /* HCI command timer function */
1467 static void hci_cmd_timeout(struct work_struct *work)
1468 {
1469         struct hci_dev *hdev = container_of(work, struct hci_dev,
1470                                             cmd_timer.work);
1471 
1472         if (hdev->sent_cmd) {
1473                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1474                 u16 opcode = __le16_to_cpu(sent->opcode);
1475 
1476                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1477         } else {
1478                 bt_dev_err(hdev, "command tx timeout");
1479         }
1480 
1481         if (hdev->cmd_timeout)
1482                 hdev->cmd_timeout(hdev);
1483 
1484         atomic_set(&hdev->cmd_cnt, 1);
1485         queue_work(hdev->workqueue, &hdev->cmd_work);
1486 }
1487 
1488 /* HCI ncmd timer function */
1489 static void hci_ncmd_timeout(struct work_struct *work)
1490 {
1491         struct hci_dev *hdev = container_of(work, struct hci_dev,
1492                                             ncmd_timer.work);
1493 
1494         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1495 
1496         /* During HCI_INIT phase no events can be injected if the ncmd timer
1497          * triggers since the procedure has its own timeout handling.
1498          */
1499         if (test_bit(HCI_INIT, &hdev->flags))
1500                 return;
1501 
1502         /* This is an irrecoverable state, inject hardware error event */
1503         hci_reset_dev(hdev);
1504 }
1505 
1506 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1507                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1508 {
1509         struct oob_data *data;
1510 
1511         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1512                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1513                         continue;
1514                 if (data->bdaddr_type != bdaddr_type)
1515                         continue;
1516                 return data;
1517         }
1518 
1519         return NULL;
1520 }
1521 
1522 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1523                                u8 bdaddr_type)
1524 {
1525         struct oob_data *data;
1526 
1527         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1528         if (!data)
1529                 return -ENOENT;
1530 
1531         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1532 
1533         list_del(&data->list);
1534         kfree(data);
1535 
1536         return 0;
1537 }
1538 
1539 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1540 {
1541         struct oob_data *data, *n;
1542 
1543         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1544                 list_del(&data->list);
1545                 kfree(data);
1546         }
1547 }
1548 
1549 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1550                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1551                             u8 *hash256, u8 *rand256)
1552 {
1553         struct oob_data *data;
1554 
1555         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1556         if (!data) {
1557                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1558                 if (!data)
1559                         return -ENOMEM;
1560 
1561                 bacpy(&data->bdaddr, bdaddr);
1562                 data->bdaddr_type = bdaddr_type;
1563                 list_add(&data->list, &hdev->remote_oob_data);
1564         }
1565 
1566         if (hash192 && rand192) {
1567                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1568                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1569                 if (hash256 && rand256)
1570                         data->present = 0x03;
1571         } else {
1572                 memset(data->hash192, 0, sizeof(data->hash192));
1573                 memset(data->rand192, 0, sizeof(data->rand192));
1574                 if (hash256 && rand256)
1575                         data->present = 0x02;
1576                 else
1577                         data->present = 0x00;
1578         }
1579 
1580         if (hash256 && rand256) {
1581                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1582                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1583         } else {
1584                 memset(data->hash256, 0, sizeof(data->hash256));
1585                 memset(data->rand256, 0, sizeof(data->rand256));
1586                 if (hash192 && rand192)
1587                         data->present = 0x01;
1588         }
1589 
1590         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1591 
1592         return 0;
1593 }
1594 
1595 /* This function requires the caller holds hdev->lock */
1596 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1597 {
1598         struct adv_info *adv_instance;
1599 
1600         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1601                 if (adv_instance->instance == instance)
1602                         return adv_instance;
1603         }
1604 
1605         return NULL;
1606 }
1607 
1608 /* This function requires the caller holds hdev->lock */
1609 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1610 {
1611         struct adv_info *cur_instance;
1612 
1613         cur_instance = hci_find_adv_instance(hdev, instance);
1614         if (!cur_instance)
1615                 return NULL;
1616 
1617         if (cur_instance == list_last_entry(&hdev->adv_instances,
1618                                             struct adv_info, list))
1619                 return list_first_entry(&hdev->adv_instances,
1620                                                  struct adv_info, list);
1621         else
1622                 return list_next_entry(cur_instance, list);
1623 }
1624 
1625 /* This function requires the caller holds hdev->lock */
1626 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1627 {
1628         struct adv_info *adv_instance;
1629 
1630         adv_instance = hci_find_adv_instance(hdev, instance);
1631         if (!adv_instance)
1632                 return -ENOENT;
1633 
1634         BT_DBG("%s removing %dMR", hdev->name, instance);
1635 
1636         if (hdev->cur_adv_instance == instance) {
1637                 if (hdev->adv_instance_timeout) {
1638                         cancel_delayed_work(&hdev->adv_instance_expire);
1639                         hdev->adv_instance_timeout = 0;
1640                 }
1641                 hdev->cur_adv_instance = 0x00;
1642         }
1643 
1644         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1645 
1646         list_del(&adv_instance->list);
1647         kfree(adv_instance);
1648 
1649         hdev->adv_instance_cnt--;
1650 
1651         return 0;
1652 }
1653 
1654 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1655 {
1656         struct adv_info *adv_instance, *n;
1657 
1658         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1659                 adv_instance->rpa_expired = rpa_expired;
1660 }
1661 
1662 /* This function requires the caller holds hdev->lock */
1663 void hci_adv_instances_clear(struct hci_dev *hdev)
1664 {
1665         struct adv_info *adv_instance, *n;
1666 
1667         if (hdev->adv_instance_timeout) {
1668                 cancel_delayed_work(&hdev->adv_instance_expire);
1669                 hdev->adv_instance_timeout = 0;
1670         }
1671 
1672         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1673                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1674                 list_del(&adv_instance->list);
1675                 kfree(adv_instance);
1676         }
1677 
1678         hdev->adv_instance_cnt = 0;
1679         hdev->cur_adv_instance = 0x00;
1680 }
1681 
1682 static void adv_instance_rpa_expired(struct work_struct *work)
1683 {
1684         struct adv_info *adv_instance = container_of(work, struct adv_info,
1685                                                      rpa_expired_cb.work);
1686 
1687         BT_DBG("");
1688 
1689         adv_instance->rpa_expired = true;
1690 }
1691 
1692 /* This function requires the caller holds hdev->lock */
1693 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1694                          u16 adv_data_len, u8 *adv_data,
1695                          u16 scan_rsp_len, u8 *scan_rsp_data,
1696                          u16 timeout, u16 duration, s8 tx_power,
1697                          u32 min_interval, u32 max_interval)
1698 {
1699         struct adv_info *adv_instance;
1700 
1701         adv_instance = hci_find_adv_instance(hdev, instance);
1702         if (adv_instance) {
1703                 memset(adv_instance->adv_data, 0,
1704                        sizeof(adv_instance->adv_data));
1705                 memset(adv_instance->scan_rsp_data, 0,
1706                        sizeof(adv_instance->scan_rsp_data));
1707         } else {
1708                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1709                     instance < 1 || instance > hdev->le_num_of_adv_sets)
1710                         return -EOVERFLOW;
1711 
1712                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1713                 if (!adv_instance)
1714                         return -ENOMEM;
1715 
1716                 adv_instance->pending = true;
1717                 adv_instance->instance = instance;
1718                 list_add(&adv_instance->list, &hdev->adv_instances);
1719                 hdev->adv_instance_cnt++;
1720         }
1721 
1722         adv_instance->flags = flags;
1723         adv_instance->adv_data_len = adv_data_len;
1724         adv_instance->scan_rsp_len = scan_rsp_len;
1725         adv_instance->min_interval = min_interval;
1726         adv_instance->max_interval = max_interval;
1727         adv_instance->tx_power = tx_power;
1728 
1729         if (adv_data_len)
1730                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1731 
1732         if (scan_rsp_len)
1733                 memcpy(adv_instance->scan_rsp_data,
1734                        scan_rsp_data, scan_rsp_len);
1735 
1736         adv_instance->timeout = timeout;
1737         adv_instance->remaining_time = timeout;
1738 
1739         if (duration == 0)
1740                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1741         else
1742                 adv_instance->duration = duration;
1743 
1744         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1745                           adv_instance_rpa_expired);
1746 
1747         BT_DBG("%s for %dMR", hdev->name, instance);
1748 
1749         return 0;
1750 }
1751 
1752 /* This function requires the caller holds hdev->lock */
1753 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1754                               u16 adv_data_len, u8 *adv_data,
1755                               u16 scan_rsp_len, u8 *scan_rsp_data)
1756 {
1757         struct adv_info *adv_instance;
1758 
1759         adv_instance = hci_find_adv_instance(hdev, instance);
1760 
1761         /* If advertisement doesn't exist, we can't modify its data */
1762         if (!adv_instance)
1763                 return -ENOENT;
1764 
1765         if (adv_data_len) {
1766                 memset(adv_instance->adv_data, 0,
1767                        sizeof(adv_instance->adv_data));
1768                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1769                 adv_instance->adv_data_len = adv_data_len;
1770         }
1771 
1772         if (scan_rsp_len) {
1773                 memset(adv_instance->scan_rsp_data, 0,
1774                        sizeof(adv_instance->scan_rsp_data));
1775                 memcpy(adv_instance->scan_rsp_data,
1776                        scan_rsp_data, scan_rsp_len);
1777                 adv_instance->scan_rsp_len = scan_rsp_len;
1778         }
1779 
1780         return 0;
1781 }
1782 
1783 /* This function requires the caller holds hdev->lock */
1784 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1785 {
1786         u32 flags;
1787         struct adv_info *adv;
1788 
1789         if (instance == 0x00) {
1790                 /* Instance 0 always manages the "Tx Power" and "Flags"
1791                  * fields
1792                  */
1793                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1794 
1795                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1796                  * corresponds to the "connectable" instance flag.
1797                  */
1798                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1799                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1800 
1801                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1802                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1803                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1804                         flags |= MGMT_ADV_FLAG_DISCOV;
1805 
1806                 return flags;
1807         }
1808 
1809         adv = hci_find_adv_instance(hdev, instance);
1810 
1811         /* Return 0 when we got an invalid instance identifier. */
1812         if (!adv)
1813                 return 0;
1814 
1815         return adv->flags;
1816 }
1817 
1818 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1819 {
1820         struct adv_info *adv;
1821 
1822         /* Instance 0x00 always set local name */
1823         if (instance == 0x00)
1824                 return true;
1825 
1826         adv = hci_find_adv_instance(hdev, instance);
1827         if (!adv)
1828                 return false;
1829 
1830         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1831             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1832                 return true;
1833 
1834         return adv->scan_rsp_len ? true : false;
1835 }
1836 
1837 /* This function requires the caller holds hdev->lock */
1838 void hci_adv_monitors_clear(struct hci_dev *hdev)
1839 {
1840         struct adv_monitor *monitor;
1841         int handle;
1842 
1843         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1844                 hci_free_adv_monitor(hdev, monitor);
1845 
1846         idr_destroy(&hdev->adv_monitors_idr);
1847 }
1848 
1849 /* Frees the monitor structure and do some bookkeepings.
1850  * This function requires the caller holds hdev->lock.
1851  */
1852 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1853 {
1854         struct adv_pattern *pattern;
1855         struct adv_pattern *tmp;
1856 
1857         if (!monitor)
1858                 return;
1859 
1860         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1861                 list_del(&pattern->list);
1862                 kfree(pattern);
1863         }
1864 
1865         if (monitor->handle)
1866                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1867 
1868         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1869                 hdev->adv_monitors_cnt--;
1870                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1871         }
1872 
1873         kfree(monitor);
1874 }
1875 
1876 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1877 {
1878         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1879 }
1880 
1881 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1882 {
1883         return mgmt_remove_adv_monitor_complete(hdev, status);
1884 }
1885 
1886 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1887  * also attempts to forward the request to the controller.
1888  * Returns true if request is forwarded (result is pending), false otherwise.
1889  * This function requires the caller holds hdev->lock.
1890  */
1891 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1892                          int *err)
1893 {
1894         int min, max, handle;
1895 
1896         *err = 0;
1897 
1898         if (!monitor) {
1899                 *err = -EINVAL;
1900                 return false;
1901         }
1902 
1903         min = HCI_MIN_ADV_MONITOR_HANDLE;
1904         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1905         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1906                            GFP_KERNEL);
1907         if (handle < 0) {
1908                 *err = handle;
1909                 return false;
1910         }
1911 
1912         monitor->handle = handle;
1913 
1914         if (!hdev_is_powered(hdev))
1915                 return false;
1916 
1917         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918         case HCI_ADV_MONITOR_EXT_NONE:
1919                 hci_update_passive_scan(hdev);
1920                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1921                 /* Message was not forwarded to controller - not an error */
1922                 return false;
1923         case HCI_ADV_MONITOR_EXT_MSFT:
1924                 *err = msft_add_monitor_pattern(hdev, monitor);
1925                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1926                            *err);
1927                 break;
1928         }
1929 
1930         return (*err == 0);
1931 }
1932 
1933 /* Attempts to tell the controller and free the monitor. If somehow the
1934  * controller doesn't have a corresponding handle, remove anyway.
1935  * Returns true if request is forwarded (result is pending), false otherwise.
1936  * This function requires the caller holds hdev->lock.
1937  */
1938 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1939                                    struct adv_monitor *monitor,
1940                                    u16 handle, int *err)
1941 {
1942         *err = 0;
1943 
1944         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946                 goto free_monitor;
1947         case HCI_ADV_MONITOR_EXT_MSFT:
1948                 *err = msft_remove_monitor(hdev, monitor, handle);
1949                 break;
1950         }
1951 
1952         /* In case no matching handle registered, just free the monitor */
1953         if (*err == -ENOENT)
1954                 goto free_monitor;
1955 
1956         return (*err == 0);
1957 
1958 free_monitor:
1959         if (*err == -ENOENT)
1960                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1961                             monitor->handle);
1962         hci_free_adv_monitor(hdev, monitor);
1963 
1964         *err = 0;
1965         return false;
1966 }
1967 
1968 /* Returns true if request is forwarded (result is pending), false otherwise.
1969  * This function requires the caller holds hdev->lock.
1970  */
1971 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1972 {
1973         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1974         bool pending;
1975 
1976         if (!monitor) {
1977                 *err = -EINVAL;
1978                 return false;
1979         }
1980 
1981         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1982         if (!*err && !pending)
1983                 hci_update_passive_scan(hdev);
1984 
1985         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1986                    hdev->name, handle, *err, pending ? "" : "not ");
1987 
1988         return pending;
1989 }
1990 
1991 /* Returns true if request is forwarded (result is pending), false otherwise.
1992  * This function requires the caller holds hdev->lock.
1993  */
1994 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
1995 {
1996         struct adv_monitor *monitor;
1997         int idr_next_id = 0;
1998         bool pending = false;
1999         bool update = false;
2000 
2001         *err = 0;
2002 
2003         while (!*err && !pending) {
2004                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2005                 if (!monitor)
2006                         break;
2007 
2008                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2009 
2010                 if (!*err && !pending)
2011                         update = true;
2012         }
2013 
2014         if (update)
2015                 hci_update_passive_scan(hdev);
2016 
2017         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2018                    hdev->name, *err, pending ? "" : "not ");
2019 
2020         return pending;
2021 }
2022 
2023 /* This function requires the caller holds hdev->lock */
2024 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2025 {
2026         return !idr_is_empty(&hdev->adv_monitors_idr);
2027 }
2028 
2029 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2030 {
2031         if (msft_monitor_supported(hdev))
2032                 return HCI_ADV_MONITOR_EXT_MSFT;
2033 
2034         return HCI_ADV_MONITOR_EXT_NONE;
2035 }
2036 
2037 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2038                                          bdaddr_t *bdaddr, u8 type)
2039 {
2040         struct bdaddr_list *b;
2041 
2042         list_for_each_entry(b, bdaddr_list, list) {
2043                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2044                         return b;
2045         }
2046 
2047         return NULL;
2048 }
2049 
2050 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2051                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2052                                 u8 type)
2053 {
2054         struct bdaddr_list_with_irk *b;
2055 
2056         list_for_each_entry(b, bdaddr_list, list) {
2057                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2058                         return b;
2059         }
2060 
2061         return NULL;
2062 }
2063 
2064 struct bdaddr_list_with_flags *
2065 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2066                                   bdaddr_t *bdaddr, u8 type)
2067 {
2068         struct bdaddr_list_with_flags *b;
2069 
2070         list_for_each_entry(b, bdaddr_list, list) {
2071                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2072                         return b;
2073         }
2074 
2075         return NULL;
2076 }
2077 
2078 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2079 {
2080         struct bdaddr_list *b, *n;
2081 
2082         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2083                 list_del(&b->list);
2084                 kfree(b);
2085         }
2086 }
2087 
2088 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2089 {
2090         struct bdaddr_list *entry;
2091 
2092         if (!bacmp(bdaddr, BDADDR_ANY))
2093                 return -EBADF;
2094 
2095         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2096                 return -EEXIST;
2097 
2098         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2099         if (!entry)
2100                 return -ENOMEM;
2101 
2102         bacpy(&entry->bdaddr, bdaddr);
2103         entry->bdaddr_type = type;
2104 
2105         list_add(&entry->list, list);
2106 
2107         return 0;
2108 }
2109 
2110 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2111                                         u8 type, u8 *peer_irk, u8 *local_irk)
2112 {
2113         struct bdaddr_list_with_irk *entry;
2114 
2115         if (!bacmp(bdaddr, BDADDR_ANY))
2116                 return -EBADF;
2117 
2118         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2119                 return -EEXIST;
2120 
2121         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2122         if (!entry)
2123                 return -ENOMEM;
2124 
2125         bacpy(&entry->bdaddr, bdaddr);
2126         entry->bdaddr_type = type;
2127 
2128         if (peer_irk)
2129                 memcpy(entry->peer_irk, peer_irk, 16);
2130 
2131         if (local_irk)
2132                 memcpy(entry->local_irk, local_irk, 16);
2133 
2134         list_add(&entry->list, list);
2135 
2136         return 0;
2137 }
2138 
2139 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2140                                    u8 type, u32 flags)
2141 {
2142         struct bdaddr_list_with_flags *entry;
2143 
2144         if (!bacmp(bdaddr, BDADDR_ANY))
2145                 return -EBADF;
2146 
2147         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2148                 return -EEXIST;
2149 
2150         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2151         if (!entry)
2152                 return -ENOMEM;
2153 
2154         bacpy(&entry->bdaddr, bdaddr);
2155         entry->bdaddr_type = type;
2156         entry->flags = flags;
2157 
2158         list_add(&entry->list, list);
2159 
2160         return 0;
2161 }
2162 
2163 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2164 {
2165         struct bdaddr_list *entry;
2166 
2167         if (!bacmp(bdaddr, BDADDR_ANY)) {
2168                 hci_bdaddr_list_clear(list);
2169                 return 0;
2170         }
2171 
2172         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2173         if (!entry)
2174                 return -ENOENT;
2175 
2176         list_del(&entry->list);
2177         kfree(entry);
2178 
2179         return 0;
2180 }
2181 
2182 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2183                                                         u8 type)
2184 {
2185         struct bdaddr_list_with_irk *entry;
2186 
2187         if (!bacmp(bdaddr, BDADDR_ANY)) {
2188                 hci_bdaddr_list_clear(list);
2189                 return 0;
2190         }
2191 
2192         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2193         if (!entry)
2194                 return -ENOENT;
2195 
2196         list_del(&entry->list);
2197         kfree(entry);
2198 
2199         return 0;
2200 }
2201 
2202 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2203                                    u8 type)
2204 {
2205         struct bdaddr_list_with_flags *entry;
2206 
2207         if (!bacmp(bdaddr, BDADDR_ANY)) {
2208                 hci_bdaddr_list_clear(list);
2209                 return 0;
2210         }
2211 
2212         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2213         if (!entry)
2214                 return -ENOENT;
2215 
2216         list_del(&entry->list);
2217         kfree(entry);
2218 
2219         return 0;
2220 }
2221 
2222 /* This function requires the caller holds hdev->lock */
2223 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2224                                                bdaddr_t *addr, u8 addr_type)
2225 {
2226         struct hci_conn_params *params;
2227 
2228         list_for_each_entry(params, &hdev->le_conn_params, list) {
2229                 if (bacmp(&params->addr, addr) == 0 &&
2230                     params->addr_type == addr_type) {
2231                         return params;
2232                 }
2233         }
2234 
2235         return NULL;
2236 }
2237 
2238 /* This function requires the caller holds hdev->lock */
2239 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2240                                                   bdaddr_t *addr, u8 addr_type)
2241 {
2242         struct hci_conn_params *param;
2243 
2244         list_for_each_entry(param, list, action) {
2245                 if (bacmp(&param->addr, addr) == 0 &&
2246                     param->addr_type == addr_type)
2247                         return param;
2248         }
2249 
2250         return NULL;
2251 }
2252 
2253 /* This function requires the caller holds hdev->lock */
2254 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2255                                             bdaddr_t *addr, u8 addr_type)
2256 {
2257         struct hci_conn_params *params;
2258 
2259         params = hci_conn_params_lookup(hdev, addr, addr_type);
2260         if (params)
2261                 return params;
2262 
2263         params = kzalloc(sizeof(*params), GFP_KERNEL);
2264         if (!params) {
2265                 bt_dev_err(hdev, "out of memory");
2266                 return NULL;
2267         }
2268 
2269         bacpy(&params->addr, addr);
2270         params->addr_type = addr_type;
2271 
2272         list_add(&params->list, &hdev->le_conn_params);
2273         INIT_LIST_HEAD(&params->action);
2274 
2275         params->conn_min_interval = hdev->le_conn_min_interval;
2276         params->conn_max_interval = hdev->le_conn_max_interval;
2277         params->conn_latency = hdev->le_conn_latency;
2278         params->supervision_timeout = hdev->le_supv_timeout;
2279         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2280 
2281         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2282 
2283         return params;
2284 }
2285 
2286 static void hci_conn_params_free(struct hci_conn_params *params)
2287 {
2288         if (params->conn) {
2289                 hci_conn_drop(params->conn);
2290                 hci_conn_put(params->conn);
2291         }
2292 
2293         list_del(&params->action);
2294         list_del(&params->list);
2295         kfree(params);
2296 }
2297 
2298 /* This function requires the caller holds hdev->lock */
2299 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2300 {
2301         struct hci_conn_params *params;
2302 
2303         params = hci_conn_params_lookup(hdev, addr, addr_type);
2304         if (!params)
2305                 return;
2306 
2307         hci_conn_params_free(params);
2308 
2309         hci_update_passive_scan(hdev);
2310 
2311         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2312 }
2313 
2314 /* This function requires the caller holds hdev->lock */
2315 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2316 {
2317         struct hci_conn_params *params, *tmp;
2318 
2319         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2320                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2321                         continue;
2322 
2323                 /* If trying to establish one time connection to disabled
2324                  * device, leave the params, but mark them as just once.
2325                  */
2326                 if (params->explicit_connect) {
2327                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2328                         continue;
2329                 }
2330 
2331                 list_del(&params->list);
2332                 kfree(params);
2333         }
2334 
2335         BT_DBG("All LE disabled connection parameters were removed");
2336 }
2337 
2338 /* This function requires the caller holds hdev->lock */
2339 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2340 {
2341         struct hci_conn_params *params, *tmp;
2342 
2343         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2344                 hci_conn_params_free(params);
2345 
2346         BT_DBG("All LE connection parameters were removed");
2347 }
2348 
2349 /* Copy the Identity Address of the controller.
2350  *
2351  * If the controller has a public BD_ADDR, then by default use that one.
2352  * If this is a LE only controller without a public address, default to
2353  * the static random address.
2354  *
2355  * For debugging purposes it is possible to force controllers with a
2356  * public address to use the static random address instead.
2357  *
2358  * In case BR/EDR has been disabled on a dual-mode controller and
2359  * userspace has configured a static address, then that address
2360  * becomes the identity address instead of the public BR/EDR address.
2361  */
2362 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2363                                u8 *bdaddr_type)
2364 {
2365         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2366             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2367             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2368              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2369                 bacpy(bdaddr, &hdev->static_addr);
2370                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2371         } else {
2372                 bacpy(bdaddr, &hdev->bdaddr);
2373                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2374         }
2375 }
2376 
2377 static void hci_clear_wake_reason(struct hci_dev *hdev)
2378 {
2379         hci_dev_lock(hdev);
2380 
2381         hdev->wake_reason = 0;
2382         bacpy(&hdev->wake_addr, BDADDR_ANY);
2383         hdev->wake_addr_type = 0;
2384 
2385         hci_dev_unlock(hdev);
2386 }
2387 
2388 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2389                                 void *data)
2390 {
2391         struct hci_dev *hdev =
2392                 container_of(nb, struct hci_dev, suspend_notifier);
2393         int ret = 0;
2394 
2395         if (action == PM_SUSPEND_PREPARE)
2396                 ret = hci_suspend_dev(hdev);
2397         else if (action == PM_POST_SUSPEND)
2398                 ret = hci_resume_dev(hdev);
2399 
2400         if (ret)
2401                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2402                            action, ret);
2403 
2404         return NOTIFY_DONE;
2405 }
2406 
2407 /* Alloc HCI device */
2408 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2409 {
2410         struct hci_dev *hdev;
2411         unsigned int alloc_size;
2412 
2413         alloc_size = sizeof(*hdev);
2414         if (sizeof_priv) {
2415                 /* Fixme: May need ALIGN-ment? */
2416                 alloc_size += sizeof_priv;
2417         }
2418 
2419         hdev = kzalloc(alloc_size, GFP_KERNEL);
2420         if (!hdev)
2421                 return NULL;
2422 
2423         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2424         hdev->esco_type = (ESCO_HV1);
2425         hdev->link_mode = (HCI_LM_ACCEPT);
2426         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2427         hdev->io_capability = 0x03;     /* No Input No Output */
2428         hdev->manufacturer = 0xffff;    /* Default to internal use */
2429         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2430         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2431         hdev->adv_instance_cnt = 0;
2432         hdev->cur_adv_instance = 0x00;
2433         hdev->adv_instance_timeout = 0;
2434 
2435         hdev->advmon_allowlist_duration = 300;
2436         hdev->advmon_no_filter_duration = 500;
2437         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2438 
2439         hdev->sniff_max_interval = 800;
2440         hdev->sniff_min_interval = 80;
2441 
2442         hdev->le_adv_channel_map = 0x07;
2443         hdev->le_adv_min_interval = 0x0800;
2444         hdev->le_adv_max_interval = 0x0800;
2445         hdev->le_scan_interval = 0x0060;
2446         hdev->le_scan_window = 0x0030;
2447         hdev->le_scan_int_suspend = 0x0400;
2448         hdev->le_scan_window_suspend = 0x0012;
2449         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2450         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2451         hdev->le_scan_int_adv_monitor = 0x0060;
2452         hdev->le_scan_window_adv_monitor = 0x0030;
2453         hdev->le_scan_int_connect = 0x0060;
2454         hdev->le_scan_window_connect = 0x0060;
2455         hdev->le_conn_min_interval = 0x0018;
2456         hdev->le_conn_max_interval = 0x0028;
2457         hdev->le_conn_latency = 0x0000;
2458         hdev->le_supv_timeout = 0x002a;
2459         hdev->le_def_tx_len = 0x001b;
2460         hdev->le_def_tx_time = 0x0148;
2461         hdev->le_max_tx_len = 0x001b;
2462         hdev->le_max_tx_time = 0x0148;
2463         hdev->le_max_rx_len = 0x001b;
2464         hdev->le_max_rx_time = 0x0148;
2465         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2466         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2467         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2468         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2469         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2470         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2471         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2472         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2473         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2474 
2475         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2476         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2477         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2478         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2479         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2480         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2481 
2482         /* default 1.28 sec page scan */
2483         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2484         hdev->def_page_scan_int = 0x0800;
2485         hdev->def_page_scan_window = 0x0012;
2486 
2487         mutex_init(&hdev->lock);
2488         mutex_init(&hdev->req_lock);
2489 
2490         INIT_LIST_HEAD(&hdev->mgmt_pending);
2491         INIT_LIST_HEAD(&hdev->reject_list);
2492         INIT_LIST_HEAD(&hdev->accept_list);
2493         INIT_LIST_HEAD(&hdev->uuids);
2494         INIT_LIST_HEAD(&hdev->link_keys);
2495         INIT_LIST_HEAD(&hdev->long_term_keys);
2496         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2497         INIT_LIST_HEAD(&hdev->remote_oob_data);
2498         INIT_LIST_HEAD(&hdev->le_accept_list);
2499         INIT_LIST_HEAD(&hdev->le_resolv_list);
2500         INIT_LIST_HEAD(&hdev->le_conn_params);
2501         INIT_LIST_HEAD(&hdev->pend_le_conns);
2502         INIT_LIST_HEAD(&hdev->pend_le_reports);
2503         INIT_LIST_HEAD(&hdev->conn_hash.list);
2504         INIT_LIST_HEAD(&hdev->adv_instances);
2505         INIT_LIST_HEAD(&hdev->blocked_keys);
2506 
2507         INIT_LIST_HEAD(&hdev->local_codecs);
2508         INIT_WORK(&hdev->rx_work, hci_rx_work);
2509         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2510         INIT_WORK(&hdev->tx_work, hci_tx_work);
2511         INIT_WORK(&hdev->power_on, hci_power_on);
2512         INIT_WORK(&hdev->error_reset, hci_error_reset);
2513 
2514         hci_cmd_sync_init(hdev);
2515 
2516         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2517 
2518         skb_queue_head_init(&hdev->rx_q);
2519         skb_queue_head_init(&hdev->cmd_q);
2520         skb_queue_head_init(&hdev->raw_q);
2521 
2522         init_waitqueue_head(&hdev->req_wait_q);
2523 
2524         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2525         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2526 
2527         hci_request_setup(hdev);
2528 
2529         hci_init_sysfs(hdev);
2530         discovery_init(hdev);
2531 
2532         return hdev;
2533 }
2534 EXPORT_SYMBOL(hci_alloc_dev_priv);
2535 
2536 /* Free HCI device */
2537 void hci_free_dev(struct hci_dev *hdev)
2538 {
2539         /* will free via device release */
2540         put_device(&hdev->dev);
2541 }
2542 EXPORT_SYMBOL(hci_free_dev);
2543 
2544 /* Register HCI device */
2545 int hci_register_dev(struct hci_dev *hdev)
2546 {
2547         int id, error;
2548 
2549         if (!hdev->open || !hdev->close || !hdev->send)
2550                 return -EINVAL;
2551 
2552         /* Do not allow HCI_AMP devices to register at index 0,
2553          * so the index can be used as the AMP controller ID.
2554          */
2555         switch (hdev->dev_type) {
2556         case HCI_PRIMARY:
2557                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2558                 break;
2559         case HCI_AMP:
2560                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2561                 break;
2562         default:
2563                 return -EINVAL;
2564         }
2565 
2566         if (id < 0)
2567                 return id;
2568 
2569         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2570         hdev->id = id;
2571 
2572         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2573 
2574         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2575         if (!hdev->workqueue) {
2576                 error = -ENOMEM;
2577                 goto err;
2578         }
2579 
2580         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2581                                                       hdev->name);
2582         if (!hdev->req_workqueue) {
2583                 destroy_workqueue(hdev->workqueue);
2584                 error = -ENOMEM;
2585                 goto err;
2586         }
2587 
2588         if (!IS_ERR_OR_NULL(bt_debugfs))
2589                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2590 
2591         dev_set_name(&hdev->dev, "%s", hdev->name);
2592 
2593         error = device_add(&hdev->dev);
2594         if (error < 0)
2595                 goto err_wqueue;
2596 
2597         hci_leds_init(hdev);
2598 
2599         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2600                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2601                                     hdev);
2602         if (hdev->rfkill) {
2603                 if (rfkill_register(hdev->rfkill) < 0) {
2604                         rfkill_destroy(hdev->rfkill);
2605                         hdev->rfkill = NULL;
2606                 }
2607         }
2608 
2609         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2610                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2611 
2612         hci_dev_set_flag(hdev, HCI_SETUP);
2613         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2614 
2615         if (hdev->dev_type == HCI_PRIMARY) {
2616                 /* Assume BR/EDR support until proven otherwise (such as
2617                  * through reading supported features during init.
2618                  */
2619                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2620         }
2621 
2622         write_lock(&hci_dev_list_lock);
2623         list_add(&hdev->list, &hci_dev_list);
2624         write_unlock(&hci_dev_list_lock);
2625 
2626         /* Devices that are marked for raw-only usage are unconfigured
2627          * and should not be included in normal operation.
2628          */
2629         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2630                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2631 
2632         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2633          * callback.
2634          */
2635         if (hdev->wakeup)
2636                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2637 
2638         hci_sock_dev_event(hdev, HCI_DEV_REG);
2639         hci_dev_hold(hdev);
2640 
2641         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2642                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2643                 error = register_pm_notifier(&hdev->suspend_notifier);
2644                 if (error)
2645                         goto err_wqueue;
2646         }
2647 
2648         queue_work(hdev->req_workqueue, &hdev->power_on);
2649 
2650         idr_init(&hdev->adv_monitors_idr);
2651         msft_register(hdev);
2652 
2653         return id;
2654 
2655 err_wqueue:
2656         debugfs_remove_recursive(hdev->debugfs);
2657         destroy_workqueue(hdev->workqueue);
2658         destroy_workqueue(hdev->req_workqueue);
2659 err:
2660         ida_simple_remove(&hci_index_ida, hdev->id);
2661 
2662         return error;
2663 }
2664 EXPORT_SYMBOL(hci_register_dev);
2665 
2666 /* Unregister HCI device */
2667 void hci_unregister_dev(struct hci_dev *hdev)
2668 {
2669         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2670 
2671         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2672 
2673         write_lock(&hci_dev_list_lock);
2674         list_del(&hdev->list);
2675         write_unlock(&hci_dev_list_lock);
2676 
2677         cancel_work_sync(&hdev->power_on);
2678 
2679         hci_cmd_sync_clear(hdev);
2680 
2681         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2682                 unregister_pm_notifier(&hdev->suspend_notifier);
2683 
2684         msft_unregister(hdev);
2685 
2686         hci_dev_do_close(hdev);
2687 
2688         if (!test_bit(HCI_INIT, &hdev->flags) &&
2689             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2690             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2691                 hci_dev_lock(hdev);
2692                 mgmt_index_removed(hdev);
2693                 hci_dev_unlock(hdev);
2694         }
2695 
2696         /* mgmt_index_removed should take care of emptying the
2697          * pending list */
2698         BUG_ON(!list_empty(&hdev->mgmt_pending));
2699 
2700         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2701 
2702         if (hdev->rfkill) {
2703                 rfkill_unregister(hdev->rfkill);
2704                 rfkill_destroy(hdev->rfkill);
2705         }
2706 
2707         device_del(&hdev->dev);
2708         /* Actual cleanup is deferred until hci_release_dev(). */
2709         hci_dev_put(hdev);
2710 }
2711 EXPORT_SYMBOL(hci_unregister_dev);
2712 
2713 /* Release HCI device */
2714 void hci_release_dev(struct hci_dev *hdev)
2715 {
2716         debugfs_remove_recursive(hdev->debugfs);
2717         kfree_const(hdev->hw_info);
2718         kfree_const(hdev->fw_info);
2719 
2720         destroy_workqueue(hdev->workqueue);
2721         destroy_workqueue(hdev->req_workqueue);
2722 
2723         hci_dev_lock(hdev);
2724         hci_bdaddr_list_clear(&hdev->reject_list);
2725         hci_bdaddr_list_clear(&hdev->accept_list);
2726         hci_uuids_clear(hdev);
2727         hci_link_keys_clear(hdev);
2728         hci_smp_ltks_clear(hdev);
2729         hci_smp_irks_clear(hdev);
2730         hci_remote_oob_data_clear(hdev);
2731         hci_adv_instances_clear(hdev);
2732         hci_adv_monitors_clear(hdev);
2733         hci_bdaddr_list_clear(&hdev->le_accept_list);
2734         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2735         hci_conn_params_clear_all(hdev);
2736         hci_discovery_filter_clear(hdev);
2737         hci_blocked_keys_clear(hdev);
2738         hci_dev_unlock(hdev);
2739 
2740         ida_simple_remove(&hci_index_ida, hdev->id);
2741         kfree_skb(hdev->sent_cmd);
2742         kfree(hdev);
2743 }
2744 EXPORT_SYMBOL(hci_release_dev);
2745 
2746 /* Suspend HCI device */
2747 int hci_suspend_dev(struct hci_dev *hdev)
2748 {
2749         int ret;
2750 
2751         bt_dev_dbg(hdev, "");
2752 
2753         /* Suspend should only act on when powered. */
2754         if (!hdev_is_powered(hdev) ||
2755             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2756                 return 0;
2757 
2758         /* If powering down don't attempt to suspend */
2759         if (mgmt_powering_down(hdev))
2760                 return 0;
2761 
2762         hci_req_sync_lock(hdev);
2763         ret = hci_suspend_sync(hdev);
2764         hci_req_sync_unlock(hdev);
2765 
2766         hci_clear_wake_reason(hdev);
2767         mgmt_suspending(hdev, hdev->suspend_state);
2768 
2769         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2770         return ret;
2771 }
2772 EXPORT_SYMBOL(hci_suspend_dev);
2773 
2774 /* Resume HCI device */
2775 int hci_resume_dev(struct hci_dev *hdev)
2776 {
2777         int ret;
2778 
2779         bt_dev_dbg(hdev, "");
2780 
2781         /* Resume should only act on when powered. */
2782         if (!hdev_is_powered(hdev) ||
2783             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2784                 return 0;
2785 
2786         /* If powering down don't attempt to resume */
2787         if (mgmt_powering_down(hdev))
2788                 return 0;
2789 
2790         hci_req_sync_lock(hdev);
2791         ret = hci_resume_sync(hdev);
2792         hci_req_sync_unlock(hdev);
2793 
2794         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2795                       hdev->wake_addr_type);
2796 
2797         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2798         return ret;
2799 }
2800 EXPORT_SYMBOL(hci_resume_dev);
2801 
2802 /* Reset HCI device */
2803 int hci_reset_dev(struct hci_dev *hdev)
2804 {
2805         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2806         struct sk_buff *skb;
2807 
2808         skb = bt_skb_alloc(3, GFP_ATOMIC);
2809         if (!skb)
2810                 return -ENOMEM;
2811 
2812         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2813         skb_put_data(skb, hw_err, 3);
2814 
2815         bt_dev_err(hdev, "Injecting HCI hardware error event");
2816 
2817         /* Send Hardware Error to upper stack */
2818         return hci_recv_frame(hdev, skb);
2819 }
2820 EXPORT_SYMBOL(hci_reset_dev);
2821 
2822 /* Receive frame from HCI drivers */
2823 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2824 {
2825         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2826                       && !test_bit(HCI_INIT, &hdev->flags))) {
2827                 kfree_skb(skb);
2828                 return -ENXIO;
2829         }
2830 
2831         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2832             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2833             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2834             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2835                 kfree_skb(skb);
2836                 return -EINVAL;
2837         }
2838 
2839         /* Incoming skb */
2840         bt_cb(skb)->incoming = 1;
2841 
2842         /* Time stamp */
2843         __net_timestamp(skb);
2844 
2845         skb_queue_tail(&hdev->rx_q, skb);
2846         queue_work(hdev->workqueue, &hdev->rx_work);
2847 
2848         return 0;
2849 }
2850 EXPORT_SYMBOL(hci_recv_frame);
2851 
2852 /* Receive diagnostic message from HCI drivers */
2853 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2854 {
2855         /* Mark as diagnostic packet */
2856         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2857 
2858         /* Time stamp */
2859         __net_timestamp(skb);
2860 
2861         skb_queue_tail(&hdev->rx_q, skb);
2862         queue_work(hdev->workqueue, &hdev->rx_work);
2863 
2864         return 0;
2865 }
2866 EXPORT_SYMBOL(hci_recv_diag);
2867 
2868 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2869 {
2870         va_list vargs;
2871 
2872         va_start(vargs, fmt);
2873         kfree_const(hdev->hw_info);
2874         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2875         va_end(vargs);
2876 }
2877 EXPORT_SYMBOL(hci_set_hw_info);
2878 
2879 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2880 {
2881         va_list vargs;
2882 
2883         va_start(vargs, fmt);
2884         kfree_const(hdev->fw_info);
2885         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2886         va_end(vargs);
2887 }
2888 EXPORT_SYMBOL(hci_set_fw_info);
2889 
2890 /* ---- Interface to upper protocols ---- */
2891 
2892 int hci_register_cb(struct hci_cb *cb)
2893 {
2894         BT_DBG("%p name %s", cb, cb->name);
2895 
2896         mutex_lock(&hci_cb_list_lock);
2897         list_add_tail(&cb->list, &hci_cb_list);
2898         mutex_unlock(&hci_cb_list_lock);
2899 
2900         return 0;
2901 }
2902 EXPORT_SYMBOL(hci_register_cb);
2903 
2904 int hci_unregister_cb(struct hci_cb *cb)
2905 {
2906         BT_DBG("%p name %s", cb, cb->name);
2907 
2908         mutex_lock(&hci_cb_list_lock);
2909         list_del(&cb->list);
2910         mutex_unlock(&hci_cb_list_lock);
2911 
2912         return 0;
2913 }
2914 EXPORT_SYMBOL(hci_unregister_cb);
2915 
2916 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2917 {
2918         int err;
2919 
2920         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2921                skb->len);
2922 
2923         /* Time stamp */
2924         __net_timestamp(skb);
2925 
2926         /* Send copy to monitor */
2927         hci_send_to_monitor(hdev, skb);
2928 
2929         if (atomic_read(&hdev->promisc)) {
2930                 /* Send copy to the sockets */
2931                 hci_send_to_sock(hdev, skb);
2932         }
2933 
2934         /* Get rid of skb owner, prior to sending to the driver. */
2935         skb_orphan(skb);
2936 
2937         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2938                 kfree_skb(skb);
2939                 return -EINVAL;
2940         }
2941 
2942         err = hdev->send(hdev, skb);
2943         if (err < 0) {
2944                 bt_dev_err(hdev, "sending frame failed (%d)", err);
2945                 kfree_skb(skb);
2946                 return err;
2947         }
2948 
2949         return 0;
2950 }
2951 
2952 /* Send HCI command */
2953 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2954                  const void *param)
2955 {
2956         struct sk_buff *skb;
2957 
2958         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2959 
2960         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2961         if (!skb) {
2962                 bt_dev_err(hdev, "no memory for command");
2963                 return -ENOMEM;
2964         }
2965 
2966         /* Stand-alone HCI commands must be flagged as
2967          * single-command requests.
2968          */
2969         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2970 
2971         skb_queue_tail(&hdev->cmd_q, skb);
2972         queue_work(hdev->workqueue, &hdev->cmd_work);
2973 
2974         return 0;
2975 }
2976 
2977 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2978                    const void *param)
2979 {
2980         struct sk_buff *skb;
2981 
2982         if (hci_opcode_ogf(opcode) != 0x3f) {
2983                 /* A controller receiving a command shall respond with either
2984                  * a Command Status Event or a Command Complete Event.
2985                  * Therefore, all standard HCI commands must be sent via the
2986                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2987                  * Some vendors do not comply with this rule for vendor-specific
2988                  * commands and do not return any event. We want to support
2989                  * unresponded commands for such cases only.
2990                  */
2991                 bt_dev_err(hdev, "unresponded command not supported");
2992                 return -EINVAL;
2993         }
2994 
2995         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2996         if (!skb) {
2997                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2998                            opcode);
2999                 return -ENOMEM;
3000         }
3001 
3002         hci_send_frame(hdev, skb);
3003 
3004         return 0;
3005 }
3006 EXPORT_SYMBOL(__hci_cmd_send);
3007 
3008 /* Get data from the previously sent command */
3009 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3010 {
3011         struct hci_command_hdr *hdr;
3012 
3013         if (!hdev->sent_cmd)
3014                 return NULL;
3015 
3016         hdr = (void *) hdev->sent_cmd->data;
3017 
3018         if (hdr->opcode != cpu_to_le16(opcode))
3019                 return NULL;
3020 
3021         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3022 
3023         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3024 }
3025 
3026 /* Send ACL data */
3027 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3028 {
3029         struct hci_acl_hdr *hdr;
3030         int len = skb->len;
3031 
3032         skb_push(skb, HCI_ACL_HDR_SIZE);
3033         skb_reset_transport_header(skb);
3034         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3035         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3036         hdr->dlen   = cpu_to_le16(len);
3037 }
3038 
3039 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3040                           struct sk_buff *skb, __u16 flags)
3041 {
3042         struct hci_conn *conn = chan->conn;
3043         struct hci_dev *hdev = conn->hdev;
3044         struct sk_buff *list;
3045 
3046         skb->len = skb_headlen(skb);
3047         skb->data_len = 0;
3048 
3049         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3050 
3051         switch (hdev->dev_type) {
3052         case HCI_PRIMARY:
3053                 hci_add_acl_hdr(skb, conn->handle, flags);
3054                 break;
3055         case HCI_AMP:
3056                 hci_add_acl_hdr(skb, chan->handle, flags);
3057                 break;
3058         default:
3059                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3060                 return;
3061         }
3062 
3063         list = skb_shinfo(skb)->frag_list;
3064         if (!list) {
3065                 /* Non fragmented */
3066                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3067 
3068                 skb_queue_tail(queue, skb);
3069         } else {
3070                 /* Fragmented */
3071                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3072 
3073                 skb_shinfo(skb)->frag_list = NULL;
3074 
3075                 /* Queue all fragments atomically. We need to use spin_lock_bh
3076                  * here because of 6LoWPAN links, as there this function is
3077                  * called from softirq and using normal spin lock could cause
3078                  * deadlocks.
3079                  */
3080                 spin_lock_bh(&queue->lock);
3081 
3082                 __skb_queue_tail(queue, skb);
3083 
3084                 flags &= ~ACL_START;
3085                 flags |= ACL_CONT;
3086                 do {
3087                         skb = list; list = list->next;
3088 
3089                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3090                         hci_add_acl_hdr(skb, conn->handle, flags);
3091 
3092                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3093 
3094                         __skb_queue_tail(queue, skb);
3095                 } while (list);
3096 
3097                 spin_unlock_bh(&queue->lock);
3098         }
3099 }
3100 
3101 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3102 {
3103         struct hci_dev *hdev = chan->conn->hdev;
3104 
3105         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3106 
3107         hci_queue_acl(chan, &chan->data_q, skb, flags);
3108 
3109         queue_work(hdev->workqueue, &hdev->tx_work);
3110 }
3111 
3112 /* Send SCO data */
3113 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3114 {
3115         struct hci_dev *hdev = conn->hdev;
3116         struct hci_sco_hdr hdr;
3117 
3118         BT_DBG("%s len %d", hdev->name, skb->len);
3119 
3120         hdr.handle = cpu_to_le16(conn->handle);
3121         hdr.dlen   = skb->len;
3122 
3123         skb_push(skb, HCI_SCO_HDR_SIZE);
3124         skb_reset_transport_header(skb);
3125         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3126 
3127         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3128 
3129         skb_queue_tail(&conn->data_q, skb);
3130         queue_work(hdev->workqueue, &hdev->tx_work);
3131 }
3132 
3133 /* ---- HCI TX task (outgoing data) ---- */
3134 
3135 /* HCI Connection scheduler */
3136 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3137                                      int *quote)
3138 {
3139         struct hci_conn_hash *h = &hdev->conn_hash;
3140         struct hci_conn *conn = NULL, *c;
3141         unsigned int num = 0, min = ~0;
3142 
3143         /* We don't have to lock device here. Connections are always
3144          * added and removed with TX task disabled. */
3145 
3146         rcu_read_lock();
3147 
3148         list_for_each_entry_rcu(c, &h->list, list) {
3149                 if (c->type != type || skb_queue_empty(&c->data_q))
3150                         continue;
3151 
3152                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3153                         continue;
3154 
3155                 num++;
3156 
3157                 if (c->sent < min) {
3158                         min  = c->sent;
3159                         conn = c;
3160                 }
3161 
3162                 if (hci_conn_num(hdev, type) == num)
3163                         break;
3164         }
3165 
3166         rcu_read_unlock();
3167 
3168         if (conn) {
3169                 int cnt, q;
3170 
3171                 switch (conn->type) {
3172                 case ACL_LINK:
3173                         cnt = hdev->acl_cnt;
3174                         break;
3175                 case SCO_LINK:
3176                 case ESCO_LINK:
3177                         cnt = hdev->sco_cnt;
3178                         break;
3179                 case LE_LINK:
3180                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3181                         break;
3182                 default:
3183                         cnt = 0;
3184                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3185                 }
3186 
3187                 q = cnt / num;
3188                 *quote = q ? q : 1;
3189         } else
3190                 *quote = 0;
3191 
3192         BT_DBG("conn %p quote %d", conn, *quote);
3193         return conn;
3194 }
3195 
3196 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3197 {
3198         struct hci_conn_hash *h = &hdev->conn_hash;
3199         struct hci_conn *c;
3200 
3201         bt_dev_err(hdev, "link tx timeout");
3202 
3203         rcu_read_lock();
3204 
3205         /* Kill stalled connections */
3206         list_for_each_entry_rcu(c, &h->list, list) {
3207                 if (c->type == type && c->sent) {
3208                         bt_dev_err(hdev, "killing stalled connection %pMR",
3209                                    &c->dst);
3210                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3211                 }
3212         }
3213 
3214         rcu_read_unlock();
3215 }
3216 
3217 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3218                                       int *quote)
3219 {
3220         struct hci_conn_hash *h = &hdev->conn_hash;
3221         struct hci_chan *chan = NULL;
3222         unsigned int num = 0, min = ~0, cur_prio = 0;
3223         struct hci_conn *conn;
3224         int cnt, q, conn_num = 0;
3225 
3226         BT_DBG("%s", hdev->name);
3227 
3228         rcu_read_lock();
3229 
3230         list_for_each_entry_rcu(conn, &h->list, list) {
3231                 struct hci_chan *tmp;
3232 
3233                 if (conn->type != type)
3234                         continue;
3235 
3236                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3237                         continue;
3238 
3239                 conn_num++;
3240 
3241                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3242                         struct sk_buff *skb;
3243 
3244                         if (skb_queue_empty(&tmp->data_q))
3245                                 continue;
3246 
3247                         skb = skb_peek(&tmp->data_q);
3248                         if (skb->priority < cur_prio)
3249                                 continue;
3250 
3251                         if (skb->priority > cur_prio) {
3252                                 num = 0;
3253                                 min = ~0;
3254                                 cur_prio = skb->priority;
3255                         }
3256 
3257                         num++;
3258 
3259                         if (conn->sent < min) {
3260                                 min  = conn->sent;
3261                                 chan = tmp;
3262                         }
3263                 }
3264 
3265                 if (hci_conn_num(hdev, type) == conn_num)
3266                         break;
3267         }
3268 
3269         rcu_read_unlock();
3270 
3271         if (!chan)
3272                 return NULL;
3273 
3274         switch (chan->conn->type) {
3275         case ACL_LINK:
3276                 cnt = hdev->acl_cnt;
3277                 break;
3278         case AMP_LINK:
3279                 cnt = hdev->block_cnt;
3280                 break;
3281         case SCO_LINK:
3282         case ESCO_LINK:
3283                 cnt = hdev->sco_cnt;
3284                 break;
3285         case LE_LINK:
3286                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3287                 break;
3288         default:
3289                 cnt = 0;
3290                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3291         }
3292 
3293         q = cnt / num;
3294         *quote = q ? q : 1;
3295         BT_DBG("chan %p quote %d", chan, *quote);
3296         return chan;
3297 }
3298 
3299 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3300 {
3301         struct hci_conn_hash *h = &hdev->conn_hash;
3302         struct hci_conn *conn;
3303         int num = 0;
3304 
3305         BT_DBG("%s", hdev->name);
3306 
3307         rcu_read_lock();
3308 
3309         list_for_each_entry_rcu(conn, &h->list, list) {
3310                 struct hci_chan *chan;
3311 
3312                 if (conn->type != type)
3313                         continue;
3314 
3315                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3316                         continue;
3317 
3318                 num++;
3319 
3320                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3321                         struct sk_buff *skb;
3322 
3323                         if (chan->sent) {
3324                                 chan->sent = 0;
3325                                 continue;
3326                         }
3327 
3328                         if (skb_queue_empty(&chan->data_q))
3329                                 continue;
3330 
3331                         skb = skb_peek(&chan->data_q);
3332                         if (skb->priority >= HCI_PRIO_MAX - 1)
3333                                 continue;
3334 
3335                         skb->priority = HCI_PRIO_MAX - 1;
3336 
3337                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3338                                skb->priority);
3339                 }
3340 
3341                 if (hci_conn_num(hdev, type) == num)
3342                         break;
3343         }
3344 
3345         rcu_read_unlock();
3346 
3347 }
3348 
3349 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3350 {
3351         /* Calculate count of blocks used by this packet */
3352         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3353 }
3354 
3355 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3356 {
3357         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3358                 /* ACL tx timeout must be longer than maximum
3359                  * link supervision timeout (40.9 seconds) */
3360                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3361                                        HCI_ACL_TX_TIMEOUT))
3362                         hci_link_tx_to(hdev, ACL_LINK);
3363         }
3364 }
3365 
3366 /* Schedule SCO */
3367 static void hci_sched_sco(struct hci_dev *hdev)
3368 {
3369         struct hci_conn *conn;
3370         struct sk_buff *skb;
3371         int quote;
3372 
3373         BT_DBG("%s", hdev->name);
3374 
3375         if (!hci_conn_num(hdev, SCO_LINK))
3376                 return;
3377 
3378         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3379                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3380                         BT_DBG("skb %p len %d", skb, skb->len);
3381                         hci_send_frame(hdev, skb);
3382 
3383                         conn->sent++;
3384                         if (conn->sent == ~0)
3385                                 conn->sent = 0;
3386                 }
3387         }
3388 }
3389 
3390 static void hci_sched_esco(struct hci_dev *hdev)
3391 {
3392         struct hci_conn *conn;
3393         struct sk_buff *skb;
3394         int quote;
3395 
3396         BT_DBG("%s", hdev->name);
3397 
3398         if (!hci_conn_num(hdev, ESCO_LINK))
3399                 return;
3400 
3401         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3402                                                      &quote))) {
3403                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3404                         BT_DBG("skb %p len %d", skb, skb->len);
3405                         hci_send_frame(hdev, skb);
3406 
3407                         conn->sent++;
3408                         if (conn->sent == ~0)
3409                                 conn->sent = 0;
3410                 }
3411         }
3412 }
3413 
3414 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3415 {
3416         unsigned int cnt = hdev->acl_cnt;
3417         struct hci_chan *chan;
3418         struct sk_buff *skb;
3419         int quote;
3420 
3421         __check_timeout(hdev, cnt);
3422 
3423         while (hdev->acl_cnt &&
3424                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3425                 u32 priority = (skb_peek(&chan->data_q))->priority;
3426                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3427                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3428                                skb->len, skb->priority);
3429 
3430                         /* Stop if priority has changed */
3431                         if (skb->priority < priority)
3432                                 break;
3433 
3434                         skb = skb_dequeue(&chan->data_q);
3435 
3436                         hci_conn_enter_active_mode(chan->conn,
3437                                                    bt_cb(skb)->force_active);
3438 
3439                         hci_send_frame(hdev, skb);
3440                         hdev->acl_last_tx = jiffies;
3441 
3442                         hdev->acl_cnt--;
3443                         chan->sent++;
3444                         chan->conn->sent++;
3445 
3446                         /* Send pending SCO packets right away */
3447                         hci_sched_sco(hdev);
3448                         hci_sched_esco(hdev);
3449                 }
3450         }
3451 
3452         if (cnt != hdev->acl_cnt)
3453                 hci_prio_recalculate(hdev, ACL_LINK);
3454 }
3455 
3456 static void hci_sched_acl_blk(struct hci_dev *hdev)
3457 {
3458         unsigned int cnt = hdev->block_cnt;
3459         struct hci_chan *chan;
3460         struct sk_buff *skb;
3461         int quote;
3462         u8 type;
3463 
3464         __check_timeout(hdev, cnt);
3465 
3466         BT_DBG("%s", hdev->name);
3467 
3468         if (hdev->dev_type == HCI_AMP)
3469                 type = AMP_LINK;
3470         else
3471                 type = ACL_LINK;
3472 
3473         while (hdev->block_cnt > 0 &&
3474                (chan = hci_chan_sent(hdev, type, &quote))) {
3475                 u32 priority = (skb_peek(&chan->data_q))->priority;
3476                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3477                         int blocks;
3478 
3479                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3480                                skb->len, skb->priority);
3481 
3482                         /* Stop if priority has changed */
3483                         if (skb->priority < priority)
3484                                 break;
3485 
3486                         skb = skb_dequeue(&chan->data_q);
3487 
3488                         blocks = __get_blocks(hdev, skb);
3489                         if (blocks > hdev->block_cnt)
3490                                 return;
3491 
3492                         hci_conn_enter_active_mode(chan->conn,
3493                                                    bt_cb(skb)->force_active);
3494 
3495                         hci_send_frame(hdev, skb);
3496                         hdev->acl_last_tx = jiffies;
3497 
3498                         hdev->block_cnt -= blocks;
3499                         quote -= blocks;
3500 
3501                         chan->sent += blocks;
3502                         chan->conn->sent += blocks;
3503                 }
3504         }
3505 
3506         if (cnt != hdev->block_cnt)
3507                 hci_prio_recalculate(hdev, type);
3508 }
3509 
3510 static void hci_sched_acl(struct hci_dev *hdev)
3511 {
3512         BT_DBG("%s", hdev->name);
3513 
3514         /* No ACL link over BR/EDR controller */
3515         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3516                 return;
3517 
3518         /* No AMP link over AMP controller */
3519         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3520                 return;
3521 
3522         switch (hdev->flow_ctl_mode) {
3523         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3524                 hci_sched_acl_pkt(hdev);
3525                 break;
3526 
3527         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3528                 hci_sched_acl_blk(hdev);
3529                 break;
3530         }
3531 }
3532 
3533 static void hci_sched_le(struct hci_dev *hdev)
3534 {
3535         struct hci_chan *chan;
3536         struct sk_buff *skb;
3537         int quote, cnt, tmp;
3538 
3539         BT_DBG("%s", hdev->name);
3540 
3541         if (!hci_conn_num(hdev, LE_LINK))
3542                 return;
3543 
3544         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3545 
3546         __check_timeout(hdev, cnt);
3547 
3548         tmp = cnt;
3549         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3550                 u32 priority = (skb_peek(&chan->data_q))->priority;
3551                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3552                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3553                                skb->len, skb->priority);
3554 
3555                         /* Stop if priority has changed */
3556                         if (skb->priority < priority)
3557                                 break;
3558 
3559                         skb = skb_dequeue(&chan->data_q);
3560 
3561                         hci_send_frame(hdev, skb);
3562                         hdev->le_last_tx = jiffies;
3563 
3564                         cnt--;
3565                         chan->sent++;
3566                         chan->conn->sent++;
3567 
3568                         /* Send pending SCO packets right away */
3569                         hci_sched_sco(hdev);
3570                         hci_sched_esco(hdev);
3571                 }
3572         }
3573 
3574         if (hdev->le_pkts)
3575                 hdev->le_cnt = cnt;
3576         else
3577                 hdev->acl_cnt = cnt;
3578 
3579         if (cnt != tmp)
3580                 hci_prio_recalculate(hdev, LE_LINK);
3581 }
3582 
3583 static void hci_tx_work(struct work_struct *work)
3584 {
3585         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3586         struct sk_buff *skb;
3587 
3588         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3589                hdev->sco_cnt, hdev->le_cnt);
3590 
3591         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3592                 /* Schedule queues and send stuff to HCI driver */
3593                 hci_sched_sco(hdev);
3594                 hci_sched_esco(hdev);
3595                 hci_sched_acl(hdev);
3596                 hci_sched_le(hdev);
3597         }
3598 
3599         /* Send next queued raw (unknown type) packet */
3600         while ((skb = skb_dequeue(&hdev->raw_q)))
3601                 hci_send_frame(hdev, skb);
3602 }
3603 
3604 /* ----- HCI RX task (incoming data processing) ----- */
3605 
3606 /* ACL data packet */
3607 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3608 {
3609         struct hci_acl_hdr *hdr = (void *) skb->data;
3610         struct hci_conn *conn;
3611         __u16 handle, flags;
3612 
3613         skb_pull(skb, HCI_ACL_HDR_SIZE);
3614 
3615         handle = __le16_to_cpu(hdr->handle);
3616         flags  = hci_flags(handle);
3617         handle = hci_handle(handle);
3618 
3619         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3620                handle, flags);
3621 
3622         hdev->stat.acl_rx++;
3623 
3624         hci_dev_lock(hdev);
3625         conn = hci_conn_hash_lookup_handle(hdev, handle);
3626         hci_dev_unlock(hdev);
3627 
3628         if (conn) {
3629                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3630 
3631                 /* Send to upper protocol */
3632                 l2cap_recv_acldata(conn, skb, flags);
3633                 return;
3634         } else {
3635                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3636                            handle);
3637         }
3638 
3639         kfree_skb(skb);
3640 }
3641 
3642 /* SCO data packet */
3643 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3644 {
3645         struct hci_sco_hdr *hdr = (void *) skb->data;
3646         struct hci_conn *conn;
3647         __u16 handle, flags;
3648 
3649         skb_pull(skb, HCI_SCO_HDR_SIZE);
3650 
3651         handle = __le16_to_cpu(hdr->handle);
3652         flags  = hci_flags(handle);
3653         handle = hci_handle(handle);
3654 
3655         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3656                handle, flags);
3657 
3658         hdev->stat.sco_rx++;
3659 
3660         hci_dev_lock(hdev);
3661         conn = hci_conn_hash_lookup_handle(hdev, handle);
3662         hci_dev_unlock(hdev);
3663 
3664         if (conn) {
3665                 /* Send to upper protocol */
3666                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3667                 sco_recv_scodata(conn, skb);
3668                 return;
3669         } else {
3670                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
3671                            handle);
3672         }
3673 
3674         kfree_skb(skb);
3675 }
3676 
3677 static bool hci_req_is_complete(struct hci_dev *hdev)
3678 {
3679         struct sk_buff *skb;
3680 
3681         skb = skb_peek(&hdev->cmd_q);
3682         if (!skb)
3683                 return true;
3684 
3685         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3686 }
3687 
3688 static void hci_resend_last(struct hci_dev *hdev)
3689 {
3690         struct hci_command_hdr *sent;
3691         struct sk_buff *skb;
3692         u16 opcode;
3693 
3694         if (!hdev->sent_cmd)
3695                 return;
3696 
3697         sent = (void *) hdev->sent_cmd->data;
3698         opcode = __le16_to_cpu(sent->opcode);
3699         if (opcode == HCI_OP_RESET)
3700                 return;
3701 
3702         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3703         if (!skb)
3704                 return;
3705 
3706         skb_queue_head(&hdev->cmd_q, skb);
3707         queue_work(hdev->workqueue, &hdev->cmd_work);
3708 }
3709 
3710 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3711                           hci_req_complete_t *req_complete,
3712                           hci_req_complete_skb_t *req_complete_skb)
3713 {
3714         struct sk_buff *skb;
3715         unsigned long flags;
3716 
3717         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3718 
3719         /* If the completed command doesn't match the last one that was
3720          * sent we need to do special handling of it.
3721          */
3722         if (!hci_sent_cmd_data(hdev, opcode)) {
3723                 /* Some CSR based controllers generate a spontaneous
3724                  * reset complete event during init and any pending
3725                  * command will never be completed. In such a case we
3726                  * need to resend whatever was the last sent
3727                  * command.
3728                  */
3729                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3730                         hci_resend_last(hdev);
3731 
3732                 return;
3733         }
3734 
3735         /* If we reach this point this event matches the last command sent */
3736         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3737 
3738         /* If the command succeeded and there's still more commands in
3739          * this request the request is not yet complete.
3740          */
3741         if (!status && !hci_req_is_complete(hdev))
3742                 return;
3743 
3744         /* If this was the last command in a request the complete
3745          * callback would be found in hdev->sent_cmd instead of the
3746          * command queue (hdev->cmd_q).
3747          */
3748         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3749                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3750                 return;
3751         }
3752 
3753         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3754                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3755                 return;
3756         }
3757 
3758         /* Remove all pending commands belonging to this request */
3759         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3760         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3761                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3762                         __skb_queue_head(&hdev->cmd_q, skb);
3763                         break;
3764                 }
3765 
3766                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3767                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3768                 else
3769                         *req_complete = bt_cb(skb)->hci.req_complete;
3770                 kfree_skb(skb);
3771         }
3772         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3773 }
3774 
3775 static void hci_rx_work(struct work_struct *work)
3776 {
3777         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3778         struct sk_buff *skb;
3779 
3780         BT_DBG("%s", hdev->name);
3781 
3782         while ((skb = skb_dequeue(&hdev->rx_q))) {
3783                 /* Send copy to monitor */
3784                 hci_send_to_monitor(hdev, skb);
3785 
3786                 if (atomic_read(&hdev->promisc)) {
3787                         /* Send copy to the sockets */
3788                         hci_send_to_sock(hdev, skb);
3789                 }
3790 
3791                 /* If the device has been opened in HCI_USER_CHANNEL,
3792                  * the userspace has exclusive access to device.
3793                  * When device is HCI_INIT, we still need to process
3794                  * the data packets to the driver in order
3795                  * to complete its setup().
3796                  */
3797                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3798                     !test_bit(HCI_INIT, &hdev->flags)) {
3799                         kfree_skb(skb);
3800                         continue;
3801                 }
3802 
3803                 if (test_bit(HCI_INIT, &hdev->flags)) {
3804                         /* Don't process data packets in this states. */
3805                         switch (hci_skb_pkt_type(skb)) {
3806                         case HCI_ACLDATA_PKT:
3807                         case HCI_SCODATA_PKT:
3808                         case HCI_ISODATA_PKT:
3809                                 kfree_skb(skb);
3810                                 continue;
3811                         }
3812                 }
3813 
3814                 /* Process frame */
3815                 switch (hci_skb_pkt_type(skb)) {
3816                 case HCI_EVENT_PKT:
3817                         BT_DBG("%s Event packet", hdev->name);
3818                         hci_event_packet(hdev, skb);
3819                         break;
3820 
3821                 case HCI_ACLDATA_PKT:
3822                         BT_DBG("%s ACL data packet", hdev->name);
3823                         hci_acldata_packet(hdev, skb);
3824                         break;
3825 
3826                 case HCI_SCODATA_PKT:
3827                         BT_DBG("%s SCO data packet", hdev->name);
3828                         hci_scodata_packet(hdev, skb);
3829                         break;
3830 
3831                 default:
3832                         kfree_skb(skb);
3833                         break;
3834                 }
3835         }
3836 }
3837 
3838 static void hci_cmd_work(struct work_struct *work)
3839 {
3840         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3841         struct sk_buff *skb;
3842 
3843         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3844                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3845 
3846         /* Send queued commands */
3847         if (atomic_read(&hdev->cmd_cnt)) {
3848                 skb = skb_dequeue(&hdev->cmd_q);
3849                 if (!skb)
3850                         return;
3851 
3852                 kfree_skb(hdev->sent_cmd);
3853 
3854                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3855                 if (hdev->sent_cmd) {
3856                         int res;
3857                         if (hci_req_status_pend(hdev))
3858                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3859                         atomic_dec(&hdev->cmd_cnt);
3860 
3861                         res = hci_send_frame(hdev, skb);
3862                         if (res < 0)
3863                                 __hci_cmd_sync_cancel(hdev, -res);
3864 
3865                         if (test_bit(HCI_RESET, &hdev->flags))
3866                                 cancel_delayed_work(&hdev->cmd_timer);
3867                         else
3868                                 schedule_delayed_work(&hdev->cmd_timer,
3869                                                       HCI_CMD_TIMEOUT);
3870                 } else {
3871                         skb_queue_head(&hdev->cmd_q, skb);
3872                         queue_work(hdev->workqueue, &hdev->cmd_work);
3873                 }
3874         }
3875 }
3876 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp