~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/rfkill.h>
 30 #include <linux/debugfs.h>
 31 #include <linux/crypto.h>
 32 #include <linux/property.h>
 33 #include <linux/suspend.h>
 34 #include <linux/wait.h>
 35 #include <asm/unaligned.h>
 36 
 37 #include <net/bluetooth/bluetooth.h>
 38 #include <net/bluetooth/hci_core.h>
 39 #include <net/bluetooth/l2cap.h>
 40 #include <net/bluetooth/mgmt.h>
 41 
 42 #include "hci_request.h"
 43 #include "hci_debugfs.h"
 44 #include "smp.h"
 45 #include "leds.h"
 46 #include "msft.h"
 47 #include "aosp.h"
 48 #include "hci_codec.h"
 49 
 50 static void hci_rx_work(struct work_struct *work);
 51 static void hci_cmd_work(struct work_struct *work);
 52 static void hci_tx_work(struct work_struct *work);
 53 
 54 /* HCI device list */
 55 LIST_HEAD(hci_dev_list);
 56 DEFINE_RWLOCK(hci_dev_list_lock);
 57 
 58 /* HCI callback list */
 59 LIST_HEAD(hci_cb_list);
 60 DEFINE_MUTEX(hci_cb_list_lock);
 61 
 62 /* HCI ID Numbering */
 63 static DEFINE_IDA(hci_index_ida);
 64 
 65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
 66 {
 67         __u8 scan = opt;
 68 
 69         BT_DBG("%s %x", req->hdev->name, scan);
 70 
 71         /* Inquiry and Page scans */
 72         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 73         return 0;
 74 }
 75 
 76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
 77 {
 78         __u8 auth = opt;
 79 
 80         BT_DBG("%s %x", req->hdev->name, auth);
 81 
 82         /* Authentication */
 83         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
 84         return 0;
 85 }
 86 
 87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
 88 {
 89         __u8 encrypt = opt;
 90 
 91         BT_DBG("%s %x", req->hdev->name, encrypt);
 92 
 93         /* Encryption */
 94         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
 95         return 0;
 96 }
 97 
 98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
 99 {
100         __le16 policy = cpu_to_le16(opt);
101 
102         BT_DBG("%s %x", req->hdev->name, policy);
103 
104         /* Default link policy */
105         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
106         return 0;
107 }
108 
109 /* Get HCI device by index.
110  * Device is held on return. */
111 struct hci_dev *hci_dev_get(int index)
112 {
113         struct hci_dev *hdev = NULL, *d;
114 
115         BT_DBG("%d", index);
116 
117         if (index < 0)
118                 return NULL;
119 
120         read_lock(&hci_dev_list_lock);
121         list_for_each_entry(d, &hci_dev_list, list) {
122                 if (d->id == index) {
123                         hdev = hci_dev_hold(d);
124                         break;
125                 }
126         }
127         read_unlock(&hci_dev_list_lock);
128         return hdev;
129 }
130 
131 /* ---- Inquiry support ---- */
132 
133 bool hci_discovery_active(struct hci_dev *hdev)
134 {
135         struct discovery_state *discov = &hdev->discovery;
136 
137         switch (discov->state) {
138         case DISCOVERY_FINDING:
139         case DISCOVERY_RESOLVING:
140                 return true;
141 
142         default:
143                 return false;
144         }
145 }
146 
147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
148 {
149         int old_state = hdev->discovery.state;
150 
151         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152 
153         if (old_state == state)
154                 return;
155 
156         hdev->discovery.state = state;
157 
158         switch (state) {
159         case DISCOVERY_STOPPED:
160                 hci_update_passive_scan(hdev);
161 
162                 if (old_state != DISCOVERY_STARTING)
163                         mgmt_discovering(hdev, 0);
164                 break;
165         case DISCOVERY_STARTING:
166                 break;
167         case DISCOVERY_FINDING:
168                 mgmt_discovering(hdev, 1);
169                 break;
170         case DISCOVERY_RESOLVING:
171                 break;
172         case DISCOVERY_STOPPING:
173                 break;
174         }
175 }
176 
177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
178 {
179         struct discovery_state *cache = &hdev->discovery;
180         struct inquiry_entry *p, *n;
181 
182         list_for_each_entry_safe(p, n, &cache->all, all) {
183                 list_del(&p->all);
184                 kfree(p);
185         }
186 
187         INIT_LIST_HEAD(&cache->unknown);
188         INIT_LIST_HEAD(&cache->resolve);
189 }
190 
191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192                                                bdaddr_t *bdaddr)
193 {
194         struct discovery_state *cache = &hdev->discovery;
195         struct inquiry_entry *e;
196 
197         BT_DBG("cache %p, %pMR", cache, bdaddr);
198 
199         list_for_each_entry(e, &cache->all, all) {
200                 if (!bacmp(&e->data.bdaddr, bdaddr))
201                         return e;
202         }
203 
204         return NULL;
205 }
206 
207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
208                                                        bdaddr_t *bdaddr)
209 {
210         struct discovery_state *cache = &hdev->discovery;
211         struct inquiry_entry *e;
212 
213         BT_DBG("cache %p, %pMR", cache, bdaddr);
214 
215         list_for_each_entry(e, &cache->unknown, list) {
216                 if (!bacmp(&e->data.bdaddr, bdaddr))
217                         return e;
218         }
219 
220         return NULL;
221 }
222 
223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
224                                                        bdaddr_t *bdaddr,
225                                                        int state)
226 {
227         struct discovery_state *cache = &hdev->discovery;
228         struct inquiry_entry *e;
229 
230         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
231 
232         list_for_each_entry(e, &cache->resolve, list) {
233                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234                         return e;
235                 if (!bacmp(&e->data.bdaddr, bdaddr))
236                         return e;
237         }
238 
239         return NULL;
240 }
241 
242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243                                       struct inquiry_entry *ie)
244 {
245         struct discovery_state *cache = &hdev->discovery;
246         struct list_head *pos = &cache->resolve;
247         struct inquiry_entry *p;
248 
249         list_del(&ie->list);
250 
251         list_for_each_entry(p, &cache->resolve, list) {
252                 if (p->name_state != NAME_PENDING &&
253                     abs(p->data.rssi) >= abs(ie->data.rssi))
254                         break;
255                 pos = &p->list;
256         }
257 
258         list_add(&ie->list, pos);
259 }
260 
261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262                              bool name_known)
263 {
264         struct discovery_state *cache = &hdev->discovery;
265         struct inquiry_entry *ie;
266         u32 flags = 0;
267 
268         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
269 
270         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
271 
272         if (!data->ssp_mode)
273                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
274 
275         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
276         if (ie) {
277                 if (!ie->data.ssp_mode)
278                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
279 
280                 if (ie->name_state == NAME_NEEDED &&
281                     data->rssi != ie->data.rssi) {
282                         ie->data.rssi = data->rssi;
283                         hci_inquiry_cache_update_resolve(hdev, ie);
284                 }
285 
286                 goto update;
287         }
288 
289         /* Entry not in the cache. Add new one. */
290         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
291         if (!ie) {
292                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293                 goto done;
294         }
295 
296         list_add(&ie->all, &cache->all);
297 
298         if (name_known) {
299                 ie->name_state = NAME_KNOWN;
300         } else {
301                 ie->name_state = NAME_NOT_KNOWN;
302                 list_add(&ie->list, &cache->unknown);
303         }
304 
305 update:
306         if (name_known && ie->name_state != NAME_KNOWN &&
307             ie->name_state != NAME_PENDING) {
308                 ie->name_state = NAME_KNOWN;
309                 list_del(&ie->list);
310         }
311 
312         memcpy(&ie->data, data, sizeof(*data));
313         ie->timestamp = jiffies;
314         cache->timestamp = jiffies;
315 
316         if (ie->name_state == NAME_NOT_KNOWN)
317                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
318 
319 done:
320         return flags;
321 }
322 
323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324 {
325         struct discovery_state *cache = &hdev->discovery;
326         struct inquiry_info *info = (struct inquiry_info *) buf;
327         struct inquiry_entry *e;
328         int copied = 0;
329 
330         list_for_each_entry(e, &cache->all, all) {
331                 struct inquiry_data *data = &e->data;
332 
333                 if (copied >= num)
334                         break;
335 
336                 bacpy(&info->bdaddr, &data->bdaddr);
337                 info->pscan_rep_mode    = data->pscan_rep_mode;
338                 info->pscan_period_mode = data->pscan_period_mode;
339                 info->pscan_mode        = data->pscan_mode;
340                 memcpy(info->dev_class, data->dev_class, 3);
341                 info->clock_offset      = data->clock_offset;
342 
343                 info++;
344                 copied++;
345         }
346 
347         BT_DBG("cache %p, copied %d", cache, copied);
348         return copied;
349 }
350 
351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
352 {
353         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_inquiry cp;
356 
357         BT_DBG("%s", hdev->name);
358 
359         if (test_bit(HCI_INQUIRY, &hdev->flags))
360                 return 0;
361 
362         /* Start Inquiry */
363         memcpy(&cp.lap, &ir->lap, 3);
364         cp.length  = ir->length;
365         cp.num_rsp = ir->num_rsp;
366         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
367 
368         return 0;
369 }
370 
371 int hci_inquiry(void __user *arg)
372 {
373         __u8 __user *ptr = arg;
374         struct hci_inquiry_req ir;
375         struct hci_dev *hdev;
376         int err = 0, do_inquiry = 0, max_rsp;
377         long timeo;
378         __u8 *buf;
379 
380         if (copy_from_user(&ir, ptr, sizeof(ir)))
381                 return -EFAULT;
382 
383         hdev = hci_dev_get(ir.dev_id);
384         if (!hdev)
385                 return -ENODEV;
386 
387         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
388                 err = -EBUSY;
389                 goto done;
390         }
391 
392         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
393                 err = -EOPNOTSUPP;
394                 goto done;
395         }
396 
397         if (hdev->dev_type != HCI_PRIMARY) {
398                 err = -EOPNOTSUPP;
399                 goto done;
400         }
401 
402         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
403                 err = -EOPNOTSUPP;
404                 goto done;
405         }
406 
407         /* Restrict maximum inquiry length to 60 seconds */
408         if (ir.length > 60) {
409                 err = -EINVAL;
410                 goto done;
411         }
412 
413         hci_dev_lock(hdev);
414         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416                 hci_inquiry_cache_flush(hdev);
417                 do_inquiry = 1;
418         }
419         hci_dev_unlock(hdev);
420 
421         timeo = ir.length * msecs_to_jiffies(2000);
422 
423         if (do_inquiry) {
424                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
425                                    timeo, NULL);
426                 if (err < 0)
427                         goto done;
428 
429                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430                  * cleared). If it is interrupted by a signal, return -EINTR.
431                  */
432                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433                                 TASK_INTERRUPTIBLE)) {
434                         err = -EINTR;
435                         goto done;
436                 }
437         }
438 
439         /* for unlimited number of responses we will use buffer with
440          * 255 entries
441          */
442         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443 
444         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445          * copy it to the user space.
446          */
447         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
448         if (!buf) {
449                 err = -ENOMEM;
450                 goto done;
451         }
452 
453         hci_dev_lock(hdev);
454         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455         hci_dev_unlock(hdev);
456 
457         BT_DBG("num_rsp %d", ir.num_rsp);
458 
459         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460                 ptr += sizeof(ir);
461                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
462                                  ir.num_rsp))
463                         err = -EFAULT;
464         } else
465                 err = -EFAULT;
466 
467         kfree(buf);
468 
469 done:
470         hci_dev_put(hdev);
471         return err;
472 }
473 
474 static int hci_dev_do_open(struct hci_dev *hdev)
475 {
476         int ret = 0;
477 
478         BT_DBG("%s %p", hdev->name, hdev);
479 
480         hci_req_sync_lock(hdev);
481 
482         ret = hci_dev_open_sync(hdev);
483 
484         hci_req_sync_unlock(hdev);
485         return ret;
486 }
487 
488 /* ---- HCI ioctl helpers ---- */
489 
490 int hci_dev_open(__u16 dev)
491 {
492         struct hci_dev *hdev;
493         int err;
494 
495         hdev = hci_dev_get(dev);
496         if (!hdev)
497                 return -ENODEV;
498 
499         /* Devices that are marked as unconfigured can only be powered
500          * up as user channel. Trying to bring them up as normal devices
501          * will result into a failure. Only user channel operation is
502          * possible.
503          *
504          * When this function is called for a user channel, the flag
505          * HCI_USER_CHANNEL will be set first before attempting to
506          * open the device.
507          */
508         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
510                 err = -EOPNOTSUPP;
511                 goto done;
512         }
513 
514         /* We need to ensure that no other power on/off work is pending
515          * before proceeding to call hci_dev_do_open. This is
516          * particularly important if the setup procedure has not yet
517          * completed.
518          */
519         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520                 cancel_delayed_work(&hdev->power_off);
521 
522         /* After this call it is guaranteed that the setup procedure
523          * has finished. This means that error conditions like RFKILL
524          * or no valid public or static random address apply.
525          */
526         flush_workqueue(hdev->req_workqueue);
527 
528         /* For controllers not using the management interface and that
529          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530          * so that pairing works for them. Once the management interface
531          * is in use this bit will be cleared again and userspace has
532          * to explicitly enable it.
533          */
534         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535             !hci_dev_test_flag(hdev, HCI_MGMT))
536                 hci_dev_set_flag(hdev, HCI_BONDABLE);
537 
538         err = hci_dev_do_open(hdev);
539 
540 done:
541         hci_dev_put(hdev);
542         return err;
543 }
544 
545 int hci_dev_do_close(struct hci_dev *hdev)
546 {
547         int err;
548 
549         BT_DBG("%s %p", hdev->name, hdev);
550 
551         hci_req_sync_lock(hdev);
552 
553         err = hci_dev_close_sync(hdev);
554 
555         hci_req_sync_unlock(hdev);
556 
557         return err;
558 }
559 
560 int hci_dev_close(__u16 dev)
561 {
562         struct hci_dev *hdev;
563         int err;
564 
565         hdev = hci_dev_get(dev);
566         if (!hdev)
567                 return -ENODEV;
568 
569         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
570                 err = -EBUSY;
571                 goto done;
572         }
573 
574         cancel_work_sync(&hdev->power_on);
575         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
576                 cancel_delayed_work(&hdev->power_off);
577 
578         err = hci_dev_do_close(hdev);
579 
580 done:
581         hci_dev_put(hdev);
582         return err;
583 }
584 
585 static int hci_dev_do_reset(struct hci_dev *hdev)
586 {
587         int ret;
588 
589         BT_DBG("%s %p", hdev->name, hdev);
590 
591         hci_req_sync_lock(hdev);
592 
593         /* Drop queues */
594         skb_queue_purge(&hdev->rx_q);
595         skb_queue_purge(&hdev->cmd_q);
596 
597         /* Cancel these to avoid queueing non-chained pending work */
598         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
599         /* Wait for
600          *
601          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
602          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
603          *
604          * inside RCU section to see the flag or complete scheduling.
605          */
606         synchronize_rcu();
607         /* Explicitly cancel works in case scheduled after setting the flag. */
608         cancel_delayed_work(&hdev->cmd_timer);
609         cancel_delayed_work(&hdev->ncmd_timer);
610 
611         /* Avoid potential lockdep warnings from the *_flush() calls by
612          * ensuring the workqueue is empty up front.
613          */
614         drain_workqueue(hdev->workqueue);
615 
616         hci_dev_lock(hdev);
617         hci_inquiry_cache_flush(hdev);
618         hci_conn_hash_flush(hdev);
619         hci_dev_unlock(hdev);
620 
621         if (hdev->flush)
622                 hdev->flush(hdev);
623 
624         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
625 
626         atomic_set(&hdev->cmd_cnt, 1);
627         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
628 
629         ret = hci_reset_sync(hdev);
630 
631         hci_req_sync_unlock(hdev);
632         return ret;
633 }
634 
635 int hci_dev_reset(__u16 dev)
636 {
637         struct hci_dev *hdev;
638         int err;
639 
640         hdev = hci_dev_get(dev);
641         if (!hdev)
642                 return -ENODEV;
643 
644         if (!test_bit(HCI_UP, &hdev->flags)) {
645                 err = -ENETDOWN;
646                 goto done;
647         }
648 
649         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
650                 err = -EBUSY;
651                 goto done;
652         }
653 
654         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
655                 err = -EOPNOTSUPP;
656                 goto done;
657         }
658 
659         err = hci_dev_do_reset(hdev);
660 
661 done:
662         hci_dev_put(hdev);
663         return err;
664 }
665 
666 int hci_dev_reset_stat(__u16 dev)
667 {
668         struct hci_dev *hdev;
669         int ret = 0;
670 
671         hdev = hci_dev_get(dev);
672         if (!hdev)
673                 return -ENODEV;
674 
675         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676                 ret = -EBUSY;
677                 goto done;
678         }
679 
680         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681                 ret = -EOPNOTSUPP;
682                 goto done;
683         }
684 
685         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
686 
687 done:
688         hci_dev_put(hdev);
689         return ret;
690 }
691 
692 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
693 {
694         bool conn_changed, discov_changed;
695 
696         BT_DBG("%s scan 0x%02x", hdev->name, scan);
697 
698         if ((scan & SCAN_PAGE))
699                 conn_changed = !hci_dev_test_and_set_flag(hdev,
700                                                           HCI_CONNECTABLE);
701         else
702                 conn_changed = hci_dev_test_and_clear_flag(hdev,
703                                                            HCI_CONNECTABLE);
704 
705         if ((scan & SCAN_INQUIRY)) {
706                 discov_changed = !hci_dev_test_and_set_flag(hdev,
707                                                             HCI_DISCOVERABLE);
708         } else {
709                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
710                 discov_changed = hci_dev_test_and_clear_flag(hdev,
711                                                              HCI_DISCOVERABLE);
712         }
713 
714         if (!hci_dev_test_flag(hdev, HCI_MGMT))
715                 return;
716 
717         if (conn_changed || discov_changed) {
718                 /* In case this was disabled through mgmt */
719                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
720 
721                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
722                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
723 
724                 mgmt_new_settings(hdev);
725         }
726 }
727 
728 int hci_dev_cmd(unsigned int cmd, void __user *arg)
729 {
730         struct hci_dev *hdev;
731         struct hci_dev_req dr;
732         int err = 0;
733 
734         if (copy_from_user(&dr, arg, sizeof(dr)))
735                 return -EFAULT;
736 
737         hdev = hci_dev_get(dr.dev_id);
738         if (!hdev)
739                 return -ENODEV;
740 
741         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
742                 err = -EBUSY;
743                 goto done;
744         }
745 
746         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
747                 err = -EOPNOTSUPP;
748                 goto done;
749         }
750 
751         if (hdev->dev_type != HCI_PRIMARY) {
752                 err = -EOPNOTSUPP;
753                 goto done;
754         }
755 
756         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
757                 err = -EOPNOTSUPP;
758                 goto done;
759         }
760 
761         switch (cmd) {
762         case HCISETAUTH:
763                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
764                                    HCI_INIT_TIMEOUT, NULL);
765                 break;
766 
767         case HCISETENCRYPT:
768                 if (!lmp_encrypt_capable(hdev)) {
769                         err = -EOPNOTSUPP;
770                         break;
771                 }
772 
773                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
774                         /* Auth must be enabled first */
775                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
776                                            HCI_INIT_TIMEOUT, NULL);
777                         if (err)
778                                 break;
779                 }
780 
781                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
782                                    HCI_INIT_TIMEOUT, NULL);
783                 break;
784 
785         case HCISETSCAN:
786                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
787                                    HCI_INIT_TIMEOUT, NULL);
788 
789                 /* Ensure that the connectable and discoverable states
790                  * get correctly modified as this was a non-mgmt change.
791                  */
792                 if (!err)
793                         hci_update_passive_scan_state(hdev, dr.dev_opt);
794                 break;
795 
796         case HCISETLINKPOL:
797                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
798                                    HCI_INIT_TIMEOUT, NULL);
799                 break;
800 
801         case HCISETLINKMODE:
802                 hdev->link_mode = ((__u16) dr.dev_opt) &
803                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
804                 break;
805 
806         case HCISETPTYPE:
807                 if (hdev->pkt_type == (__u16) dr.dev_opt)
808                         break;
809 
810                 hdev->pkt_type = (__u16) dr.dev_opt;
811                 mgmt_phy_configuration_changed(hdev, NULL);
812                 break;
813 
814         case HCISETACLMTU:
815                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
816                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
817                 break;
818 
819         case HCISETSCOMTU:
820                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
821                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
822                 break;
823 
824         default:
825                 err = -EINVAL;
826                 break;
827         }
828 
829 done:
830         hci_dev_put(hdev);
831         return err;
832 }
833 
834 int hci_get_dev_list(void __user *arg)
835 {
836         struct hci_dev *hdev;
837         struct hci_dev_list_req *dl;
838         struct hci_dev_req *dr;
839         int n = 0, size, err;
840         __u16 dev_num;
841 
842         if (get_user(dev_num, (__u16 __user *) arg))
843                 return -EFAULT;
844 
845         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
846                 return -EINVAL;
847 
848         size = sizeof(*dl) + dev_num * sizeof(*dr);
849 
850         dl = kzalloc(size, GFP_KERNEL);
851         if (!dl)
852                 return -ENOMEM;
853 
854         dr = dl->dev_req;
855 
856         read_lock(&hci_dev_list_lock);
857         list_for_each_entry(hdev, &hci_dev_list, list) {
858                 unsigned long flags = hdev->flags;
859 
860                 /* When the auto-off is configured it means the transport
861                  * is running, but in that case still indicate that the
862                  * device is actually down.
863                  */
864                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
865                         flags &= ~BIT(HCI_UP);
866 
867                 (dr + n)->dev_id  = hdev->id;
868                 (dr + n)->dev_opt = flags;
869 
870                 if (++n >= dev_num)
871                         break;
872         }
873         read_unlock(&hci_dev_list_lock);
874 
875         dl->dev_num = n;
876         size = sizeof(*dl) + n * sizeof(*dr);
877 
878         err = copy_to_user(arg, dl, size);
879         kfree(dl);
880 
881         return err ? -EFAULT : 0;
882 }
883 
884 int hci_get_dev_info(void __user *arg)
885 {
886         struct hci_dev *hdev;
887         struct hci_dev_info di;
888         unsigned long flags;
889         int err = 0;
890 
891         if (copy_from_user(&di, arg, sizeof(di)))
892                 return -EFAULT;
893 
894         hdev = hci_dev_get(di.dev_id);
895         if (!hdev)
896                 return -ENODEV;
897 
898         /* When the auto-off is configured it means the transport
899          * is running, but in that case still indicate that the
900          * device is actually down.
901          */
902         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
903                 flags = hdev->flags & ~BIT(HCI_UP);
904         else
905                 flags = hdev->flags;
906 
907         strcpy(di.name, hdev->name);
908         di.bdaddr   = hdev->bdaddr;
909         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
910         di.flags    = flags;
911         di.pkt_type = hdev->pkt_type;
912         if (lmp_bredr_capable(hdev)) {
913                 di.acl_mtu  = hdev->acl_mtu;
914                 di.acl_pkts = hdev->acl_pkts;
915                 di.sco_mtu  = hdev->sco_mtu;
916                 di.sco_pkts = hdev->sco_pkts;
917         } else {
918                 di.acl_mtu  = hdev->le_mtu;
919                 di.acl_pkts = hdev->le_pkts;
920                 di.sco_mtu  = 0;
921                 di.sco_pkts = 0;
922         }
923         di.link_policy = hdev->link_policy;
924         di.link_mode   = hdev->link_mode;
925 
926         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
927         memcpy(&di.features, &hdev->features, sizeof(di.features));
928 
929         if (copy_to_user(arg, &di, sizeof(di)))
930                 err = -EFAULT;
931 
932         hci_dev_put(hdev);
933 
934         return err;
935 }
936 
937 /* ---- Interface to HCI drivers ---- */
938 
939 static int hci_rfkill_set_block(void *data, bool blocked)
940 {
941         struct hci_dev *hdev = data;
942 
943         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
944 
945         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
946                 return -EBUSY;
947 
948         if (blocked) {
949                 hci_dev_set_flag(hdev, HCI_RFKILLED);
950                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
951                     !hci_dev_test_flag(hdev, HCI_CONFIG))
952                         hci_dev_do_close(hdev);
953         } else {
954                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
955         }
956 
957         return 0;
958 }
959 
960 static const struct rfkill_ops hci_rfkill_ops = {
961         .set_block = hci_rfkill_set_block,
962 };
963 
964 static void hci_power_on(struct work_struct *work)
965 {
966         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
967         int err;
968 
969         BT_DBG("%s", hdev->name);
970 
971         if (test_bit(HCI_UP, &hdev->flags) &&
972             hci_dev_test_flag(hdev, HCI_MGMT) &&
973             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
974                 cancel_delayed_work(&hdev->power_off);
975                 err = hci_powered_update_sync(hdev);
976                 mgmt_power_on(hdev, err);
977                 return;
978         }
979 
980         err = hci_dev_do_open(hdev);
981         if (err < 0) {
982                 hci_dev_lock(hdev);
983                 mgmt_set_powered_failed(hdev, err);
984                 hci_dev_unlock(hdev);
985                 return;
986         }
987 
988         /* During the HCI setup phase, a few error conditions are
989          * ignored and they need to be checked now. If they are still
990          * valid, it is important to turn the device back off.
991          */
992         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
993             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
994             (hdev->dev_type == HCI_PRIMARY &&
995              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
996              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
997                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
998                 hci_dev_do_close(hdev);
999         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1000                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1001                                    HCI_AUTO_OFF_TIMEOUT);
1002         }
1003 
1004         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1005                 /* For unconfigured devices, set the HCI_RAW flag
1006                  * so that userspace can easily identify them.
1007                  */
1008                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1009                         set_bit(HCI_RAW, &hdev->flags);
1010 
1011                 /* For fully configured devices, this will send
1012                  * the Index Added event. For unconfigured devices,
1013                  * it will send Unconfigued Index Added event.
1014                  *
1015                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1016                  * and no event will be send.
1017                  */
1018                 mgmt_index_added(hdev);
1019         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1020                 /* When the controller is now configured, then it
1021                  * is important to clear the HCI_RAW flag.
1022                  */
1023                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1024                         clear_bit(HCI_RAW, &hdev->flags);
1025 
1026                 /* Powering on the controller with HCI_CONFIG set only
1027                  * happens with the transition from unconfigured to
1028                  * configured. This will send the Index Added event.
1029                  */
1030                 mgmt_index_added(hdev);
1031         }
1032 }
1033 
1034 static void hci_power_off(struct work_struct *work)
1035 {
1036         struct hci_dev *hdev = container_of(work, struct hci_dev,
1037                                             power_off.work);
1038 
1039         BT_DBG("%s", hdev->name);
1040 
1041         hci_dev_do_close(hdev);
1042 }
1043 
1044 static void hci_error_reset(struct work_struct *work)
1045 {
1046         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1047 
1048         BT_DBG("%s", hdev->name);
1049 
1050         if (hdev->hw_error)
1051                 hdev->hw_error(hdev, hdev->hw_error_code);
1052         else
1053                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1054 
1055         if (hci_dev_do_close(hdev))
1056                 return;
1057 
1058         hci_dev_do_open(hdev);
1059 }
1060 
1061 void hci_uuids_clear(struct hci_dev *hdev)
1062 {
1063         struct bt_uuid *uuid, *tmp;
1064 
1065         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1066                 list_del(&uuid->list);
1067                 kfree(uuid);
1068         }
1069 }
1070 
1071 void hci_link_keys_clear(struct hci_dev *hdev)
1072 {
1073         struct link_key *key;
1074 
1075         list_for_each_entry(key, &hdev->link_keys, list) {
1076                 list_del_rcu(&key->list);
1077                 kfree_rcu(key, rcu);
1078         }
1079 }
1080 
1081 void hci_smp_ltks_clear(struct hci_dev *hdev)
1082 {
1083         struct smp_ltk *k;
1084 
1085         list_for_each_entry(k, &hdev->long_term_keys, list) {
1086                 list_del_rcu(&k->list);
1087                 kfree_rcu(k, rcu);
1088         }
1089 }
1090 
1091 void hci_smp_irks_clear(struct hci_dev *hdev)
1092 {
1093         struct smp_irk *k;
1094 
1095         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1096                 list_del_rcu(&k->list);
1097                 kfree_rcu(k, rcu);
1098         }
1099 }
1100 
1101 void hci_blocked_keys_clear(struct hci_dev *hdev)
1102 {
1103         struct blocked_key *b;
1104 
1105         list_for_each_entry(b, &hdev->blocked_keys, list) {
1106                 list_del_rcu(&b->list);
1107                 kfree_rcu(b, rcu);
1108         }
1109 }
1110 
1111 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1112 {
1113         bool blocked = false;
1114         struct blocked_key *b;
1115 
1116         rcu_read_lock();
1117         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1118                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1119                         blocked = true;
1120                         break;
1121                 }
1122         }
1123 
1124         rcu_read_unlock();
1125         return blocked;
1126 }
1127 
1128 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1129 {
1130         struct link_key *k;
1131 
1132         rcu_read_lock();
1133         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1134                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1135                         rcu_read_unlock();
1136 
1137                         if (hci_is_blocked_key(hdev,
1138                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1139                                                k->val)) {
1140                                 bt_dev_warn_ratelimited(hdev,
1141                                                         "Link key blocked for %pMR",
1142                                                         &k->bdaddr);
1143                                 return NULL;
1144                         }
1145 
1146                         return k;
1147                 }
1148         }
1149         rcu_read_unlock();
1150 
1151         return NULL;
1152 }
1153 
1154 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1155                                u8 key_type, u8 old_key_type)
1156 {
1157         /* Legacy key */
1158         if (key_type < 0x03)
1159                 return true;
1160 
1161         /* Debug keys are insecure so don't store them persistently */
1162         if (key_type == HCI_LK_DEBUG_COMBINATION)
1163                 return false;
1164 
1165         /* Changed combination key and there's no previous one */
1166         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1167                 return false;
1168 
1169         /* Security mode 3 case */
1170         if (!conn)
1171                 return true;
1172 
1173         /* BR/EDR key derived using SC from an LE link */
1174         if (conn->type == LE_LINK)
1175                 return true;
1176 
1177         /* Neither local nor remote side had no-bonding as requirement */
1178         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1179                 return true;
1180 
1181         /* Local side had dedicated bonding as requirement */
1182         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1183                 return true;
1184 
1185         /* Remote side had dedicated bonding as requirement */
1186         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1187                 return true;
1188 
1189         /* If none of the above criteria match, then don't store the key
1190          * persistently */
1191         return false;
1192 }
1193 
1194 static u8 ltk_role(u8 type)
1195 {
1196         if (type == SMP_LTK)
1197                 return HCI_ROLE_MASTER;
1198 
1199         return HCI_ROLE_SLAVE;
1200 }
1201 
1202 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1203                              u8 addr_type, u8 role)
1204 {
1205         struct smp_ltk *k;
1206 
1207         rcu_read_lock();
1208         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1209                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1210                         continue;
1211 
1212                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1213                         rcu_read_unlock();
1214 
1215                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1216                                                k->val)) {
1217                                 bt_dev_warn_ratelimited(hdev,
1218                                                         "LTK blocked for %pMR",
1219                                                         &k->bdaddr);
1220                                 return NULL;
1221                         }
1222 
1223                         return k;
1224                 }
1225         }
1226         rcu_read_unlock();
1227 
1228         return NULL;
1229 }
1230 
1231 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1232 {
1233         struct smp_irk *irk_to_return = NULL;
1234         struct smp_irk *irk;
1235 
1236         rcu_read_lock();
1237         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238                 if (!bacmp(&irk->rpa, rpa)) {
1239                         irk_to_return = irk;
1240                         goto done;
1241                 }
1242         }
1243 
1244         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1245                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1246                         bacpy(&irk->rpa, rpa);
1247                         irk_to_return = irk;
1248                         goto done;
1249                 }
1250         }
1251 
1252 done:
1253         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1254                                                 irk_to_return->val)) {
1255                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1256                                         &irk_to_return->bdaddr);
1257                 irk_to_return = NULL;
1258         }
1259 
1260         rcu_read_unlock();
1261 
1262         return irk_to_return;
1263 }
1264 
1265 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1266                                      u8 addr_type)
1267 {
1268         struct smp_irk *irk_to_return = NULL;
1269         struct smp_irk *irk;
1270 
1271         /* Identity Address must be public or static random */
1272         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1273                 return NULL;
1274 
1275         rcu_read_lock();
1276         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1277                 if (addr_type == irk->addr_type &&
1278                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1279                         irk_to_return = irk;
1280                         goto done;
1281                 }
1282         }
1283 
1284 done:
1285 
1286         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1287                                                 irk_to_return->val)) {
1288                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1289                                         &irk_to_return->bdaddr);
1290                 irk_to_return = NULL;
1291         }
1292 
1293         rcu_read_unlock();
1294 
1295         return irk_to_return;
1296 }
1297 
1298 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1299                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1300                                   u8 pin_len, bool *persistent)
1301 {
1302         struct link_key *key, *old_key;
1303         u8 old_key_type;
1304 
1305         old_key = hci_find_link_key(hdev, bdaddr);
1306         if (old_key) {
1307                 old_key_type = old_key->type;
1308                 key = old_key;
1309         } else {
1310                 old_key_type = conn ? conn->key_type : 0xff;
1311                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1312                 if (!key)
1313                         return NULL;
1314                 list_add_rcu(&key->list, &hdev->link_keys);
1315         }
1316 
1317         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1318 
1319         /* Some buggy controller combinations generate a changed
1320          * combination key for legacy pairing even when there's no
1321          * previous key */
1322         if (type == HCI_LK_CHANGED_COMBINATION &&
1323             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1324                 type = HCI_LK_COMBINATION;
1325                 if (conn)
1326                         conn->key_type = type;
1327         }
1328 
1329         bacpy(&key->bdaddr, bdaddr);
1330         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1331         key->pin_len = pin_len;
1332 
1333         if (type == HCI_LK_CHANGED_COMBINATION)
1334                 key->type = old_key_type;
1335         else
1336                 key->type = type;
1337 
1338         if (persistent)
1339                 *persistent = hci_persistent_key(hdev, conn, type,
1340                                                  old_key_type);
1341 
1342         return key;
1343 }
1344 
1345 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1346                             u8 addr_type, u8 type, u8 authenticated,
1347                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1348 {
1349         struct smp_ltk *key, *old_key;
1350         u8 role = ltk_role(type);
1351 
1352         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1353         if (old_key)
1354                 key = old_key;
1355         else {
1356                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1357                 if (!key)
1358                         return NULL;
1359                 list_add_rcu(&key->list, &hdev->long_term_keys);
1360         }
1361 
1362         bacpy(&key->bdaddr, bdaddr);
1363         key->bdaddr_type = addr_type;
1364         memcpy(key->val, tk, sizeof(key->val));
1365         key->authenticated = authenticated;
1366         key->ediv = ediv;
1367         key->rand = rand;
1368         key->enc_size = enc_size;
1369         key->type = type;
1370 
1371         return key;
1372 }
1373 
1374 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1375                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1376 {
1377         struct smp_irk *irk;
1378 
1379         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1380         if (!irk) {
1381                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1382                 if (!irk)
1383                         return NULL;
1384 
1385                 bacpy(&irk->bdaddr, bdaddr);
1386                 irk->addr_type = addr_type;
1387 
1388                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1389         }
1390 
1391         memcpy(irk->val, val, 16);
1392         bacpy(&irk->rpa, rpa);
1393 
1394         return irk;
1395 }
1396 
1397 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1398 {
1399         struct link_key *key;
1400 
1401         key = hci_find_link_key(hdev, bdaddr);
1402         if (!key)
1403                 return -ENOENT;
1404 
1405         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1406 
1407         list_del_rcu(&key->list);
1408         kfree_rcu(key, rcu);
1409 
1410         return 0;
1411 }
1412 
1413 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1414 {
1415         struct smp_ltk *k;
1416         int removed = 0;
1417 
1418         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1419                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1420                         continue;
1421 
1422                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1423 
1424                 list_del_rcu(&k->list);
1425                 kfree_rcu(k, rcu);
1426                 removed++;
1427         }
1428 
1429         return removed ? 0 : -ENOENT;
1430 }
1431 
1432 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1433 {
1434         struct smp_irk *k;
1435 
1436         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1437                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1438                         continue;
1439 
1440                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1441 
1442                 list_del_rcu(&k->list);
1443                 kfree_rcu(k, rcu);
1444         }
1445 }
1446 
1447 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1448 {
1449         struct smp_ltk *k;
1450         struct smp_irk *irk;
1451         u8 addr_type;
1452 
1453         if (type == BDADDR_BREDR) {
1454                 if (hci_find_link_key(hdev, bdaddr))
1455                         return true;
1456                 return false;
1457         }
1458 
1459         /* Convert to HCI addr type which struct smp_ltk uses */
1460         if (type == BDADDR_LE_PUBLIC)
1461                 addr_type = ADDR_LE_DEV_PUBLIC;
1462         else
1463                 addr_type = ADDR_LE_DEV_RANDOM;
1464 
1465         irk = hci_get_irk(hdev, bdaddr, addr_type);
1466         if (irk) {
1467                 bdaddr = &irk->bdaddr;
1468                 addr_type = irk->addr_type;
1469         }
1470 
1471         rcu_read_lock();
1472         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1473                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1474                         rcu_read_unlock();
1475                         return true;
1476                 }
1477         }
1478         rcu_read_unlock();
1479 
1480         return false;
1481 }
1482 
1483 /* HCI command timer function */
1484 static void hci_cmd_timeout(struct work_struct *work)
1485 {
1486         struct hci_dev *hdev = container_of(work, struct hci_dev,
1487                                             cmd_timer.work);
1488 
1489         if (hdev->sent_cmd) {
1490                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1491                 u16 opcode = __le16_to_cpu(sent->opcode);
1492 
1493                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1494         } else {
1495                 bt_dev_err(hdev, "command tx timeout");
1496         }
1497 
1498         if (hdev->cmd_timeout)
1499                 hdev->cmd_timeout(hdev);
1500 
1501         atomic_set(&hdev->cmd_cnt, 1);
1502         queue_work(hdev->workqueue, &hdev->cmd_work);
1503 }
1504 
1505 /* HCI ncmd timer function */
1506 static void hci_ncmd_timeout(struct work_struct *work)
1507 {
1508         struct hci_dev *hdev = container_of(work, struct hci_dev,
1509                                             ncmd_timer.work);
1510 
1511         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1512 
1513         /* During HCI_INIT phase no events can be injected if the ncmd timer
1514          * triggers since the procedure has its own timeout handling.
1515          */
1516         if (test_bit(HCI_INIT, &hdev->flags))
1517                 return;
1518 
1519         /* This is an irrecoverable state, inject hardware error event */
1520         hci_reset_dev(hdev);
1521 }
1522 
1523 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1524                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1525 {
1526         struct oob_data *data;
1527 
1528         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1529                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1530                         continue;
1531                 if (data->bdaddr_type != bdaddr_type)
1532                         continue;
1533                 return data;
1534         }
1535 
1536         return NULL;
1537 }
1538 
1539 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1540                                u8 bdaddr_type)
1541 {
1542         struct oob_data *data;
1543 
1544         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1545         if (!data)
1546                 return -ENOENT;
1547 
1548         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1549 
1550         list_del(&data->list);
1551         kfree(data);
1552 
1553         return 0;
1554 }
1555 
1556 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1557 {
1558         struct oob_data *data, *n;
1559 
1560         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1561                 list_del(&data->list);
1562                 kfree(data);
1563         }
1564 }
1565 
1566 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1567                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1568                             u8 *hash256, u8 *rand256)
1569 {
1570         struct oob_data *data;
1571 
1572         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1573         if (!data) {
1574                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1575                 if (!data)
1576                         return -ENOMEM;
1577 
1578                 bacpy(&data->bdaddr, bdaddr);
1579                 data->bdaddr_type = bdaddr_type;
1580                 list_add(&data->list, &hdev->remote_oob_data);
1581         }
1582 
1583         if (hash192 && rand192) {
1584                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1585                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1586                 if (hash256 && rand256)
1587                         data->present = 0x03;
1588         } else {
1589                 memset(data->hash192, 0, sizeof(data->hash192));
1590                 memset(data->rand192, 0, sizeof(data->rand192));
1591                 if (hash256 && rand256)
1592                         data->present = 0x02;
1593                 else
1594                         data->present = 0x00;
1595         }
1596 
1597         if (hash256 && rand256) {
1598                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1599                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1600         } else {
1601                 memset(data->hash256, 0, sizeof(data->hash256));
1602                 memset(data->rand256, 0, sizeof(data->rand256));
1603                 if (hash192 && rand192)
1604                         data->present = 0x01;
1605         }
1606 
1607         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1608 
1609         return 0;
1610 }
1611 
1612 /* This function requires the caller holds hdev->lock */
1613 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1614 {
1615         struct adv_info *adv_instance;
1616 
1617         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1618                 if (adv_instance->instance == instance)
1619                         return adv_instance;
1620         }
1621 
1622         return NULL;
1623 }
1624 
1625 /* This function requires the caller holds hdev->lock */
1626 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1627 {
1628         struct adv_info *cur_instance;
1629 
1630         cur_instance = hci_find_adv_instance(hdev, instance);
1631         if (!cur_instance)
1632                 return NULL;
1633 
1634         if (cur_instance == list_last_entry(&hdev->adv_instances,
1635                                             struct adv_info, list))
1636                 return list_first_entry(&hdev->adv_instances,
1637                                                  struct adv_info, list);
1638         else
1639                 return list_next_entry(cur_instance, list);
1640 }
1641 
1642 /* This function requires the caller holds hdev->lock */
1643 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1644 {
1645         struct adv_info *adv_instance;
1646 
1647         adv_instance = hci_find_adv_instance(hdev, instance);
1648         if (!adv_instance)
1649                 return -ENOENT;
1650 
1651         BT_DBG("%s removing %dMR", hdev->name, instance);
1652 
1653         if (hdev->cur_adv_instance == instance) {
1654                 if (hdev->adv_instance_timeout) {
1655                         cancel_delayed_work(&hdev->adv_instance_expire);
1656                         hdev->adv_instance_timeout = 0;
1657                 }
1658                 hdev->cur_adv_instance = 0x00;
1659         }
1660 
1661         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1662 
1663         list_del(&adv_instance->list);
1664         kfree(adv_instance);
1665 
1666         hdev->adv_instance_cnt--;
1667 
1668         return 0;
1669 }
1670 
1671 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1672 {
1673         struct adv_info *adv_instance, *n;
1674 
1675         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1676                 adv_instance->rpa_expired = rpa_expired;
1677 }
1678 
1679 /* This function requires the caller holds hdev->lock */
1680 void hci_adv_instances_clear(struct hci_dev *hdev)
1681 {
1682         struct adv_info *adv_instance, *n;
1683 
1684         if (hdev->adv_instance_timeout) {
1685                 cancel_delayed_work(&hdev->adv_instance_expire);
1686                 hdev->adv_instance_timeout = 0;
1687         }
1688 
1689         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1690                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1691                 list_del(&adv_instance->list);
1692                 kfree(adv_instance);
1693         }
1694 
1695         hdev->adv_instance_cnt = 0;
1696         hdev->cur_adv_instance = 0x00;
1697 }
1698 
1699 static void adv_instance_rpa_expired(struct work_struct *work)
1700 {
1701         struct adv_info *adv_instance = container_of(work, struct adv_info,
1702                                                      rpa_expired_cb.work);
1703 
1704         BT_DBG("");
1705 
1706         adv_instance->rpa_expired = true;
1707 }
1708 
1709 /* This function requires the caller holds hdev->lock */
1710 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1711                          u16 adv_data_len, u8 *adv_data,
1712                          u16 scan_rsp_len, u8 *scan_rsp_data,
1713                          u16 timeout, u16 duration, s8 tx_power,
1714                          u32 min_interval, u32 max_interval)
1715 {
1716         struct adv_info *adv_instance;
1717 
1718         adv_instance = hci_find_adv_instance(hdev, instance);
1719         if (adv_instance) {
1720                 memset(adv_instance->adv_data, 0,
1721                        sizeof(adv_instance->adv_data));
1722                 memset(adv_instance->scan_rsp_data, 0,
1723                        sizeof(adv_instance->scan_rsp_data));
1724         } else {
1725                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1726                     instance < 1 || instance > hdev->le_num_of_adv_sets)
1727                         return -EOVERFLOW;
1728 
1729                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1730                 if (!adv_instance)
1731                         return -ENOMEM;
1732 
1733                 adv_instance->pending = true;
1734                 adv_instance->instance = instance;
1735                 list_add(&adv_instance->list, &hdev->adv_instances);
1736                 hdev->adv_instance_cnt++;
1737         }
1738 
1739         adv_instance->flags = flags;
1740         adv_instance->adv_data_len = adv_data_len;
1741         adv_instance->scan_rsp_len = scan_rsp_len;
1742         adv_instance->min_interval = min_interval;
1743         adv_instance->max_interval = max_interval;
1744         adv_instance->tx_power = tx_power;
1745 
1746         if (adv_data_len)
1747                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1748 
1749         if (scan_rsp_len)
1750                 memcpy(adv_instance->scan_rsp_data,
1751                        scan_rsp_data, scan_rsp_len);
1752 
1753         adv_instance->timeout = timeout;
1754         adv_instance->remaining_time = timeout;
1755 
1756         if (duration == 0)
1757                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1758         else
1759                 adv_instance->duration = duration;
1760 
1761         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1762                           adv_instance_rpa_expired);
1763 
1764         BT_DBG("%s for %dMR", hdev->name, instance);
1765 
1766         return 0;
1767 }
1768 
1769 /* This function requires the caller holds hdev->lock */
1770 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1771                               u16 adv_data_len, u8 *adv_data,
1772                               u16 scan_rsp_len, u8 *scan_rsp_data)
1773 {
1774         struct adv_info *adv_instance;
1775 
1776         adv_instance = hci_find_adv_instance(hdev, instance);
1777 
1778         /* If advertisement doesn't exist, we can't modify its data */
1779         if (!adv_instance)
1780                 return -ENOENT;
1781 
1782         if (adv_data_len) {
1783                 memset(adv_instance->adv_data, 0,
1784                        sizeof(adv_instance->adv_data));
1785                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1786                 adv_instance->adv_data_len = adv_data_len;
1787         }
1788 
1789         if (scan_rsp_len) {
1790                 memset(adv_instance->scan_rsp_data, 0,
1791                        sizeof(adv_instance->scan_rsp_data));
1792                 memcpy(adv_instance->scan_rsp_data,
1793                        scan_rsp_data, scan_rsp_len);
1794                 adv_instance->scan_rsp_len = scan_rsp_len;
1795         }
1796 
1797         return 0;
1798 }
1799 
1800 /* This function requires the caller holds hdev->lock */
1801 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1802 {
1803         u32 flags;
1804         struct adv_info *adv;
1805 
1806         if (instance == 0x00) {
1807                 /* Instance 0 always manages the "Tx Power" and "Flags"
1808                  * fields
1809                  */
1810                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1811 
1812                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1813                  * corresponds to the "connectable" instance flag.
1814                  */
1815                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1816                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1817 
1818                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1819                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1820                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1821                         flags |= MGMT_ADV_FLAG_DISCOV;
1822 
1823                 return flags;
1824         }
1825 
1826         adv = hci_find_adv_instance(hdev, instance);
1827 
1828         /* Return 0 when we got an invalid instance identifier. */
1829         if (!adv)
1830                 return 0;
1831 
1832         return adv->flags;
1833 }
1834 
1835 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1836 {
1837         struct adv_info *adv;
1838 
1839         /* Instance 0x00 always set local name */
1840         if (instance == 0x00)
1841                 return true;
1842 
1843         adv = hci_find_adv_instance(hdev, instance);
1844         if (!adv)
1845                 return false;
1846 
1847         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1848             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1849                 return true;
1850 
1851         return adv->scan_rsp_len ? true : false;
1852 }
1853 
1854 /* This function requires the caller holds hdev->lock */
1855 void hci_adv_monitors_clear(struct hci_dev *hdev)
1856 {
1857         struct adv_monitor *monitor;
1858         int handle;
1859 
1860         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1861                 hci_free_adv_monitor(hdev, monitor);
1862 
1863         idr_destroy(&hdev->adv_monitors_idr);
1864 }
1865 
1866 /* Frees the monitor structure and do some bookkeepings.
1867  * This function requires the caller holds hdev->lock.
1868  */
1869 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1870 {
1871         struct adv_pattern *pattern;
1872         struct adv_pattern *tmp;
1873 
1874         if (!monitor)
1875                 return;
1876 
1877         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1878                 list_del(&pattern->list);
1879                 kfree(pattern);
1880         }
1881 
1882         if (monitor->handle)
1883                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1884 
1885         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1886                 hdev->adv_monitors_cnt--;
1887                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1888         }
1889 
1890         kfree(monitor);
1891 }
1892 
1893 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1894 {
1895         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1896 }
1897 
1898 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1899 {
1900         return mgmt_remove_adv_monitor_complete(hdev, status);
1901 }
1902 
1903 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1904  * also attempts to forward the request to the controller.
1905  * Returns true if request is forwarded (result is pending), false otherwise.
1906  * This function requires the caller holds hdev->lock.
1907  */
1908 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1909                          int *err)
1910 {
1911         int min, max, handle;
1912 
1913         *err = 0;
1914 
1915         if (!monitor) {
1916                 *err = -EINVAL;
1917                 return false;
1918         }
1919 
1920         min = HCI_MIN_ADV_MONITOR_HANDLE;
1921         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1922         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1923                            GFP_KERNEL);
1924         if (handle < 0) {
1925                 *err = handle;
1926                 return false;
1927         }
1928 
1929         monitor->handle = handle;
1930 
1931         if (!hdev_is_powered(hdev))
1932                 return false;
1933 
1934         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1935         case HCI_ADV_MONITOR_EXT_NONE:
1936                 hci_update_passive_scan(hdev);
1937                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1938                 /* Message was not forwarded to controller - not an error */
1939                 return false;
1940         case HCI_ADV_MONITOR_EXT_MSFT:
1941                 *err = msft_add_monitor_pattern(hdev, monitor);
1942                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1943                            *err);
1944                 break;
1945         }
1946 
1947         return (*err == 0);
1948 }
1949 
1950 /* Attempts to tell the controller and free the monitor. If somehow the
1951  * controller doesn't have a corresponding handle, remove anyway.
1952  * Returns true if request is forwarded (result is pending), false otherwise.
1953  * This function requires the caller holds hdev->lock.
1954  */
1955 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1956                                    struct adv_monitor *monitor,
1957                                    u16 handle, int *err)
1958 {
1959         *err = 0;
1960 
1961         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1962         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1963                 goto free_monitor;
1964         case HCI_ADV_MONITOR_EXT_MSFT:
1965                 *err = msft_remove_monitor(hdev, monitor, handle);
1966                 break;
1967         }
1968 
1969         /* In case no matching handle registered, just free the monitor */
1970         if (*err == -ENOENT)
1971                 goto free_monitor;
1972 
1973         return (*err == 0);
1974 
1975 free_monitor:
1976         if (*err == -ENOENT)
1977                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1978                             monitor->handle);
1979         hci_free_adv_monitor(hdev, monitor);
1980 
1981         *err = 0;
1982         return false;
1983 }
1984 
1985 /* Returns true if request is forwarded (result is pending), false otherwise.
1986  * This function requires the caller holds hdev->lock.
1987  */
1988 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1989 {
1990         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1991         bool pending;
1992 
1993         if (!monitor) {
1994                 *err = -EINVAL;
1995                 return false;
1996         }
1997 
1998         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1999         if (!*err && !pending)
2000                 hci_update_passive_scan(hdev);
2001 
2002         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
2003                    hdev->name, handle, *err, pending ? "" : "not ");
2004 
2005         return pending;
2006 }
2007 
2008 /* Returns true if request is forwarded (result is pending), false otherwise.
2009  * This function requires the caller holds hdev->lock.
2010  */
2011 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
2012 {
2013         struct adv_monitor *monitor;
2014         int idr_next_id = 0;
2015         bool pending = false;
2016         bool update = false;
2017 
2018         *err = 0;
2019 
2020         while (!*err && !pending) {
2021                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2022                 if (!monitor)
2023                         break;
2024 
2025                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2026 
2027                 if (!*err && !pending)
2028                         update = true;
2029         }
2030 
2031         if (update)
2032                 hci_update_passive_scan(hdev);
2033 
2034         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2035                    hdev->name, *err, pending ? "" : "not ");
2036 
2037         return pending;
2038 }
2039 
2040 /* This function requires the caller holds hdev->lock */
2041 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2042 {
2043         return !idr_is_empty(&hdev->adv_monitors_idr);
2044 }
2045 
2046 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2047 {
2048         if (msft_monitor_supported(hdev))
2049                 return HCI_ADV_MONITOR_EXT_MSFT;
2050 
2051         return HCI_ADV_MONITOR_EXT_NONE;
2052 }
2053 
2054 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2055                                          bdaddr_t *bdaddr, u8 type)
2056 {
2057         struct bdaddr_list *b;
2058 
2059         list_for_each_entry(b, bdaddr_list, list) {
2060                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2061                         return b;
2062         }
2063 
2064         return NULL;
2065 }
2066 
2067 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2068                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2069                                 u8 type)
2070 {
2071         struct bdaddr_list_with_irk *b;
2072 
2073         list_for_each_entry(b, bdaddr_list, list) {
2074                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2075                         return b;
2076         }
2077 
2078         return NULL;
2079 }
2080 
2081 struct bdaddr_list_with_flags *
2082 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2083                                   bdaddr_t *bdaddr, u8 type)
2084 {
2085         struct bdaddr_list_with_flags *b;
2086 
2087         list_for_each_entry(b, bdaddr_list, list) {
2088                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2089                         return b;
2090         }
2091 
2092         return NULL;
2093 }
2094 
2095 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2096 {
2097         struct bdaddr_list *b, *n;
2098 
2099         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2100                 list_del(&b->list);
2101                 kfree(b);
2102         }
2103 }
2104 
2105 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2106 {
2107         struct bdaddr_list *entry;
2108 
2109         if (!bacmp(bdaddr, BDADDR_ANY))
2110                 return -EBADF;
2111 
2112         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2113                 return -EEXIST;
2114 
2115         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2116         if (!entry)
2117                 return -ENOMEM;
2118 
2119         bacpy(&entry->bdaddr, bdaddr);
2120         entry->bdaddr_type = type;
2121 
2122         list_add(&entry->list, list);
2123 
2124         return 0;
2125 }
2126 
2127 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2128                                         u8 type, u8 *peer_irk, u8 *local_irk)
2129 {
2130         struct bdaddr_list_with_irk *entry;
2131 
2132         if (!bacmp(bdaddr, BDADDR_ANY))
2133                 return -EBADF;
2134 
2135         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2136                 return -EEXIST;
2137 
2138         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2139         if (!entry)
2140                 return -ENOMEM;
2141 
2142         bacpy(&entry->bdaddr, bdaddr);
2143         entry->bdaddr_type = type;
2144 
2145         if (peer_irk)
2146                 memcpy(entry->peer_irk, peer_irk, 16);
2147 
2148         if (local_irk)
2149                 memcpy(entry->local_irk, local_irk, 16);
2150 
2151         list_add(&entry->list, list);
2152 
2153         return 0;
2154 }
2155 
2156 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2157                                    u8 type, u32 flags)
2158 {
2159         struct bdaddr_list_with_flags *entry;
2160 
2161         if (!bacmp(bdaddr, BDADDR_ANY))
2162                 return -EBADF;
2163 
2164         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2165                 return -EEXIST;
2166 
2167         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2168         if (!entry)
2169                 return -ENOMEM;
2170 
2171         bacpy(&entry->bdaddr, bdaddr);
2172         entry->bdaddr_type = type;
2173         entry->flags = flags;
2174 
2175         list_add(&entry->list, list);
2176 
2177         return 0;
2178 }
2179 
2180 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2181 {
2182         struct bdaddr_list *entry;
2183 
2184         if (!bacmp(bdaddr, BDADDR_ANY)) {
2185                 hci_bdaddr_list_clear(list);
2186                 return 0;
2187         }
2188 
2189         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2190         if (!entry)
2191                 return -ENOENT;
2192 
2193         list_del(&entry->list);
2194         kfree(entry);
2195 
2196         return 0;
2197 }
2198 
2199 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2200                                                         u8 type)
2201 {
2202         struct bdaddr_list_with_irk *entry;
2203 
2204         if (!bacmp(bdaddr, BDADDR_ANY)) {
2205                 hci_bdaddr_list_clear(list);
2206                 return 0;
2207         }
2208 
2209         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2210         if (!entry)
2211                 return -ENOENT;
2212 
2213         list_del(&entry->list);
2214         kfree(entry);
2215 
2216         return 0;
2217 }
2218 
2219 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2220                                    u8 type)
2221 {
2222         struct bdaddr_list_with_flags *entry;
2223 
2224         if (!bacmp(bdaddr, BDADDR_ANY)) {
2225                 hci_bdaddr_list_clear(list);
2226                 return 0;
2227         }
2228 
2229         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2230         if (!entry)
2231                 return -ENOENT;
2232 
2233         list_del(&entry->list);
2234         kfree(entry);
2235 
2236         return 0;
2237 }
2238 
2239 /* This function requires the caller holds hdev->lock */
2240 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2241                                                bdaddr_t *addr, u8 addr_type)
2242 {
2243         struct hci_conn_params *params;
2244 
2245         list_for_each_entry(params, &hdev->le_conn_params, list) {
2246                 if (bacmp(&params->addr, addr) == 0 &&
2247                     params->addr_type == addr_type) {
2248                         return params;
2249                 }
2250         }
2251 
2252         return NULL;
2253 }
2254 
2255 /* This function requires the caller holds hdev->lock */
2256 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2257                                                   bdaddr_t *addr, u8 addr_type)
2258 {
2259         struct hci_conn_params *param;
2260 
2261         list_for_each_entry(param, list, action) {
2262                 if (bacmp(&param->addr, addr) == 0 &&
2263                     param->addr_type == addr_type)
2264                         return param;
2265         }
2266 
2267         return NULL;
2268 }
2269 
2270 /* This function requires the caller holds hdev->lock */
2271 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2272                                             bdaddr_t *addr, u8 addr_type)
2273 {
2274         struct hci_conn_params *params;
2275 
2276         params = hci_conn_params_lookup(hdev, addr, addr_type);
2277         if (params)
2278                 return params;
2279 
2280         params = kzalloc(sizeof(*params), GFP_KERNEL);
2281         if (!params) {
2282                 bt_dev_err(hdev, "out of memory");
2283                 return NULL;
2284         }
2285 
2286         bacpy(&params->addr, addr);
2287         params->addr_type = addr_type;
2288 
2289         list_add(&params->list, &hdev->le_conn_params);
2290         INIT_LIST_HEAD(&params->action);
2291 
2292         params->conn_min_interval = hdev->le_conn_min_interval;
2293         params->conn_max_interval = hdev->le_conn_max_interval;
2294         params->conn_latency = hdev->le_conn_latency;
2295         params->supervision_timeout = hdev->le_supv_timeout;
2296         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2297 
2298         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2299 
2300         return params;
2301 }
2302 
2303 static void hci_conn_params_free(struct hci_conn_params *params)
2304 {
2305         if (params->conn) {
2306                 hci_conn_drop(params->conn);
2307                 hci_conn_put(params->conn);
2308         }
2309 
2310         list_del(&params->action);
2311         list_del(&params->list);
2312         kfree(params);
2313 }
2314 
2315 /* This function requires the caller holds hdev->lock */
2316 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2317 {
2318         struct hci_conn_params *params;
2319 
2320         params = hci_conn_params_lookup(hdev, addr, addr_type);
2321         if (!params)
2322                 return;
2323 
2324         hci_conn_params_free(params);
2325 
2326         hci_update_passive_scan(hdev);
2327 
2328         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2329 }
2330 
2331 /* This function requires the caller holds hdev->lock */
2332 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2333 {
2334         struct hci_conn_params *params, *tmp;
2335 
2336         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2337                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2338                         continue;
2339 
2340                 /* If trying to establish one time connection to disabled
2341                  * device, leave the params, but mark them as just once.
2342                  */
2343                 if (params->explicit_connect) {
2344                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2345                         continue;
2346                 }
2347 
2348                 list_del(&params->list);
2349                 kfree(params);
2350         }
2351 
2352         BT_DBG("All LE disabled connection parameters were removed");
2353 }
2354 
2355 /* This function requires the caller holds hdev->lock */
2356 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2357 {
2358         struct hci_conn_params *params, *tmp;
2359 
2360         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2361                 hci_conn_params_free(params);
2362 
2363         BT_DBG("All LE connection parameters were removed");
2364 }
2365 
2366 /* Copy the Identity Address of the controller.
2367  *
2368  * If the controller has a public BD_ADDR, then by default use that one.
2369  * If this is a LE only controller without a public address, default to
2370  * the static random address.
2371  *
2372  * For debugging purposes it is possible to force controllers with a
2373  * public address to use the static random address instead.
2374  *
2375  * In case BR/EDR has been disabled on a dual-mode controller and
2376  * userspace has configured a static address, then that address
2377  * becomes the identity address instead of the public BR/EDR address.
2378  */
2379 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2380                                u8 *bdaddr_type)
2381 {
2382         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2383             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2384             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2385              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2386                 bacpy(bdaddr, &hdev->static_addr);
2387                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2388         } else {
2389                 bacpy(bdaddr, &hdev->bdaddr);
2390                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2391         }
2392 }
2393 
2394 static void hci_clear_wake_reason(struct hci_dev *hdev)
2395 {
2396         hci_dev_lock(hdev);
2397 
2398         hdev->wake_reason = 0;
2399         bacpy(&hdev->wake_addr, BDADDR_ANY);
2400         hdev->wake_addr_type = 0;
2401 
2402         hci_dev_unlock(hdev);
2403 }
2404 
2405 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2406                                 void *data)
2407 {
2408         struct hci_dev *hdev =
2409                 container_of(nb, struct hci_dev, suspend_notifier);
2410         int ret = 0;
2411 
2412         if (action == PM_SUSPEND_PREPARE)
2413                 ret = hci_suspend_dev(hdev);
2414         else if (action == PM_POST_SUSPEND)
2415                 ret = hci_resume_dev(hdev);
2416 
2417         if (ret)
2418                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2419                            action, ret);
2420 
2421         return NOTIFY_DONE;
2422 }
2423 
2424 /* Alloc HCI device */
2425 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2426 {
2427         struct hci_dev *hdev;
2428         unsigned int alloc_size;
2429 
2430         alloc_size = sizeof(*hdev);
2431         if (sizeof_priv) {
2432                 /* Fixme: May need ALIGN-ment? */
2433                 alloc_size += sizeof_priv;
2434         }
2435 
2436         hdev = kzalloc(alloc_size, GFP_KERNEL);
2437         if (!hdev)
2438                 return NULL;
2439 
2440         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2441         hdev->esco_type = (ESCO_HV1);
2442         hdev->link_mode = (HCI_LM_ACCEPT);
2443         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2444         hdev->io_capability = 0x03;     /* No Input No Output */
2445         hdev->manufacturer = 0xffff;    /* Default to internal use */
2446         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2447         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2448         hdev->adv_instance_cnt = 0;
2449         hdev->cur_adv_instance = 0x00;
2450         hdev->adv_instance_timeout = 0;
2451 
2452         hdev->advmon_allowlist_duration = 300;
2453         hdev->advmon_no_filter_duration = 500;
2454         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2455 
2456         hdev->sniff_max_interval = 800;
2457         hdev->sniff_min_interval = 80;
2458 
2459         hdev->le_adv_channel_map = 0x07;
2460         hdev->le_adv_min_interval = 0x0800;
2461         hdev->le_adv_max_interval = 0x0800;
2462         hdev->le_scan_interval = 0x0060;
2463         hdev->le_scan_window = 0x0030;
2464         hdev->le_scan_int_suspend = 0x0400;
2465         hdev->le_scan_window_suspend = 0x0012;
2466         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2467         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2468         hdev->le_scan_int_adv_monitor = 0x0060;
2469         hdev->le_scan_window_adv_monitor = 0x0030;
2470         hdev->le_scan_int_connect = 0x0060;
2471         hdev->le_scan_window_connect = 0x0060;
2472         hdev->le_conn_min_interval = 0x0018;
2473         hdev->le_conn_max_interval = 0x0028;
2474         hdev->le_conn_latency = 0x0000;
2475         hdev->le_supv_timeout = 0x002a;
2476         hdev->le_def_tx_len = 0x001b;
2477         hdev->le_def_tx_time = 0x0148;
2478         hdev->le_max_tx_len = 0x001b;
2479         hdev->le_max_tx_time = 0x0148;
2480         hdev->le_max_rx_len = 0x001b;
2481         hdev->le_max_rx_time = 0x0148;
2482         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2483         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2484         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2485         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2486         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2487         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2488         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2489         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2490         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2491 
2492         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2493         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2494         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2495         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2496         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2497         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2498 
2499         /* default 1.28 sec page scan */
2500         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2501         hdev->def_page_scan_int = 0x0800;
2502         hdev->def_page_scan_window = 0x0012;
2503 
2504         mutex_init(&hdev->lock);
2505         mutex_init(&hdev->req_lock);
2506 
2507         INIT_LIST_HEAD(&hdev->mgmt_pending);
2508         INIT_LIST_HEAD(&hdev->reject_list);
2509         INIT_LIST_HEAD(&hdev->accept_list);
2510         INIT_LIST_HEAD(&hdev->uuids);
2511         INIT_LIST_HEAD(&hdev->link_keys);
2512         INIT_LIST_HEAD(&hdev->long_term_keys);
2513         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2514         INIT_LIST_HEAD(&hdev->remote_oob_data);
2515         INIT_LIST_HEAD(&hdev->le_accept_list);
2516         INIT_LIST_HEAD(&hdev->le_resolv_list);
2517         INIT_LIST_HEAD(&hdev->le_conn_params);
2518         INIT_LIST_HEAD(&hdev->pend_le_conns);
2519         INIT_LIST_HEAD(&hdev->pend_le_reports);
2520         INIT_LIST_HEAD(&hdev->conn_hash.list);
2521         INIT_LIST_HEAD(&hdev->adv_instances);
2522         INIT_LIST_HEAD(&hdev->blocked_keys);
2523         INIT_LIST_HEAD(&hdev->monitored_devices);
2524 
2525         INIT_LIST_HEAD(&hdev->local_codecs);
2526         INIT_WORK(&hdev->rx_work, hci_rx_work);
2527         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2528         INIT_WORK(&hdev->tx_work, hci_tx_work);
2529         INIT_WORK(&hdev->power_on, hci_power_on);
2530         INIT_WORK(&hdev->error_reset, hci_error_reset);
2531 
2532         hci_cmd_sync_init(hdev);
2533 
2534         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2535 
2536         skb_queue_head_init(&hdev->rx_q);
2537         skb_queue_head_init(&hdev->cmd_q);
2538         skb_queue_head_init(&hdev->raw_q);
2539 
2540         init_waitqueue_head(&hdev->req_wait_q);
2541 
2542         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2543         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2544 
2545         hci_request_setup(hdev);
2546 
2547         hci_init_sysfs(hdev);
2548         discovery_init(hdev);
2549 
2550         return hdev;
2551 }
2552 EXPORT_SYMBOL(hci_alloc_dev_priv);
2553 
2554 /* Free HCI device */
2555 void hci_free_dev(struct hci_dev *hdev)
2556 {
2557         /* will free via device release */
2558         put_device(&hdev->dev);
2559 }
2560 EXPORT_SYMBOL(hci_free_dev);
2561 
2562 /* Register HCI device */
2563 int hci_register_dev(struct hci_dev *hdev)
2564 {
2565         int id, error;
2566 
2567         if (!hdev->open || !hdev->close || !hdev->send)
2568                 return -EINVAL;
2569 
2570         /* Do not allow HCI_AMP devices to register at index 0,
2571          * so the index can be used as the AMP controller ID.
2572          */
2573         switch (hdev->dev_type) {
2574         case HCI_PRIMARY:
2575                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2576                 break;
2577         case HCI_AMP:
2578                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2579                 break;
2580         default:
2581                 return -EINVAL;
2582         }
2583 
2584         if (id < 0)
2585                 return id;
2586 
2587         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2588         hdev->id = id;
2589 
2590         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2591 
2592         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2593         if (!hdev->workqueue) {
2594                 error = -ENOMEM;
2595                 goto err;
2596         }
2597 
2598         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2599                                                       hdev->name);
2600         if (!hdev->req_workqueue) {
2601                 destroy_workqueue(hdev->workqueue);
2602                 error = -ENOMEM;
2603                 goto err;
2604         }
2605 
2606         if (!IS_ERR_OR_NULL(bt_debugfs))
2607                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2608 
2609         dev_set_name(&hdev->dev, "%s", hdev->name);
2610 
2611         error = device_add(&hdev->dev);
2612         if (error < 0)
2613                 goto err_wqueue;
2614 
2615         hci_leds_init(hdev);
2616 
2617         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2618                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2619                                     hdev);
2620         if (hdev->rfkill) {
2621                 if (rfkill_register(hdev->rfkill) < 0) {
2622                         rfkill_destroy(hdev->rfkill);
2623                         hdev->rfkill = NULL;
2624                 }
2625         }
2626 
2627         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2628                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2629 
2630         hci_dev_set_flag(hdev, HCI_SETUP);
2631         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2632 
2633         if (hdev->dev_type == HCI_PRIMARY) {
2634                 /* Assume BR/EDR support until proven otherwise (such as
2635                  * through reading supported features during init.
2636                  */
2637                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2638         }
2639 
2640         write_lock(&hci_dev_list_lock);
2641         list_add(&hdev->list, &hci_dev_list);
2642         write_unlock(&hci_dev_list_lock);
2643 
2644         /* Devices that are marked for raw-only usage are unconfigured
2645          * and should not be included in normal operation.
2646          */
2647         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2648                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2649 
2650         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2651          * callback.
2652          */
2653         if (hdev->wakeup)
2654                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2655 
2656         hci_sock_dev_event(hdev, HCI_DEV_REG);
2657         hci_dev_hold(hdev);
2658 
2659         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2660                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2661                 error = register_pm_notifier(&hdev->suspend_notifier);
2662                 if (error)
2663                         goto err_wqueue;
2664         }
2665 
2666         queue_work(hdev->req_workqueue, &hdev->power_on);
2667 
2668         idr_init(&hdev->adv_monitors_idr);
2669         msft_register(hdev);
2670 
2671         return id;
2672 
2673 err_wqueue:
2674         debugfs_remove_recursive(hdev->debugfs);
2675         destroy_workqueue(hdev->workqueue);
2676         destroy_workqueue(hdev->req_workqueue);
2677 err:
2678         ida_simple_remove(&hci_index_ida, hdev->id);
2679 
2680         return error;
2681 }
2682 EXPORT_SYMBOL(hci_register_dev);
2683 
2684 /* Unregister HCI device */
2685 void hci_unregister_dev(struct hci_dev *hdev)
2686 {
2687         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2688 
2689         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2690 
2691         write_lock(&hci_dev_list_lock);
2692         list_del(&hdev->list);
2693         write_unlock(&hci_dev_list_lock);
2694 
2695         cancel_work_sync(&hdev->power_on);
2696 
2697         hci_cmd_sync_clear(hdev);
2698 
2699         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2700                 unregister_pm_notifier(&hdev->suspend_notifier);
2701 
2702         msft_unregister(hdev);
2703 
2704         hci_dev_do_close(hdev);
2705 
2706         if (!test_bit(HCI_INIT, &hdev->flags) &&
2707             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2708             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2709                 hci_dev_lock(hdev);
2710                 mgmt_index_removed(hdev);
2711                 hci_dev_unlock(hdev);
2712         }
2713 
2714         /* mgmt_index_removed should take care of emptying the
2715          * pending list */
2716         BUG_ON(!list_empty(&hdev->mgmt_pending));
2717 
2718         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2719 
2720         if (hdev->rfkill) {
2721                 rfkill_unregister(hdev->rfkill);
2722                 rfkill_destroy(hdev->rfkill);
2723         }
2724 
2725         device_del(&hdev->dev);
2726         /* Actual cleanup is deferred until hci_release_dev(). */
2727         hci_dev_put(hdev);
2728 }
2729 EXPORT_SYMBOL(hci_unregister_dev);
2730 
2731 /* Release HCI device */
2732 void hci_release_dev(struct hci_dev *hdev)
2733 {
2734         debugfs_remove_recursive(hdev->debugfs);
2735         kfree_const(hdev->hw_info);
2736         kfree_const(hdev->fw_info);
2737 
2738         destroy_workqueue(hdev->workqueue);
2739         destroy_workqueue(hdev->req_workqueue);
2740 
2741         hci_dev_lock(hdev);
2742         hci_bdaddr_list_clear(&hdev->reject_list);
2743         hci_bdaddr_list_clear(&hdev->accept_list);
2744         hci_uuids_clear(hdev);
2745         hci_link_keys_clear(hdev);
2746         hci_smp_ltks_clear(hdev);
2747         hci_smp_irks_clear(hdev);
2748         hci_remote_oob_data_clear(hdev);
2749         hci_adv_instances_clear(hdev);
2750         hci_adv_monitors_clear(hdev);
2751         hci_bdaddr_list_clear(&hdev->le_accept_list);
2752         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2753         hci_conn_params_clear_all(hdev);
2754         hci_discovery_filter_clear(hdev);
2755         hci_blocked_keys_clear(hdev);
2756         hci_dev_unlock(hdev);
2757 
2758         ida_simple_remove(&hci_index_ida, hdev->id);
2759         kfree_skb(hdev->sent_cmd);
2760         kfree(hdev);
2761 }
2762 EXPORT_SYMBOL(hci_release_dev);
2763 
2764 /* Suspend HCI device */
2765 int hci_suspend_dev(struct hci_dev *hdev)
2766 {
2767         int ret;
2768 
2769         bt_dev_dbg(hdev, "");
2770 
2771         /* Suspend should only act on when powered. */
2772         if (!hdev_is_powered(hdev) ||
2773             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2774                 return 0;
2775 
2776         /* If powering down don't attempt to suspend */
2777         if (mgmt_powering_down(hdev))
2778                 return 0;
2779 
2780         hci_req_sync_lock(hdev);
2781         ret = hci_suspend_sync(hdev);
2782         hci_req_sync_unlock(hdev);
2783 
2784         hci_clear_wake_reason(hdev);
2785         mgmt_suspending(hdev, hdev->suspend_state);
2786 
2787         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2788         return ret;
2789 }
2790 EXPORT_SYMBOL(hci_suspend_dev);
2791 
2792 /* Resume HCI device */
2793 int hci_resume_dev(struct hci_dev *hdev)
2794 {
2795         int ret;
2796 
2797         bt_dev_dbg(hdev, "");
2798 
2799         /* Resume should only act on when powered. */
2800         if (!hdev_is_powered(hdev) ||
2801             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2802                 return 0;
2803 
2804         /* If powering down don't attempt to resume */
2805         if (mgmt_powering_down(hdev))
2806                 return 0;
2807 
2808         hci_req_sync_lock(hdev);
2809         ret = hci_resume_sync(hdev);
2810         hci_req_sync_unlock(hdev);
2811 
2812         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2813                       hdev->wake_addr_type);
2814 
2815         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2816         return ret;
2817 }
2818 EXPORT_SYMBOL(hci_resume_dev);
2819 
2820 /* Reset HCI device */
2821 int hci_reset_dev(struct hci_dev *hdev)
2822 {
2823         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2824         struct sk_buff *skb;
2825 
2826         skb = bt_skb_alloc(3, GFP_ATOMIC);
2827         if (!skb)
2828                 return -ENOMEM;
2829 
2830         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2831         skb_put_data(skb, hw_err, 3);
2832 
2833         bt_dev_err(hdev, "Injecting HCI hardware error event");
2834 
2835         /* Send Hardware Error to upper stack */
2836         return hci_recv_frame(hdev, skb);
2837 }
2838 EXPORT_SYMBOL(hci_reset_dev);
2839 
2840 /* Receive frame from HCI drivers */
2841 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2842 {
2843         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2844                       && !test_bit(HCI_INIT, &hdev->flags))) {
2845                 kfree_skb(skb);
2846                 return -ENXIO;
2847         }
2848 
2849         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2850             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2851             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2852             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2853                 kfree_skb(skb);
2854                 return -EINVAL;
2855         }
2856 
2857         /* Incoming skb */
2858         bt_cb(skb)->incoming = 1;
2859 
2860         /* Time stamp */
2861         __net_timestamp(skb);
2862 
2863         skb_queue_tail(&hdev->rx_q, skb);
2864         queue_work(hdev->workqueue, &hdev->rx_work);
2865 
2866         return 0;
2867 }
2868 EXPORT_SYMBOL(hci_recv_frame);
2869 
2870 /* Receive diagnostic message from HCI drivers */
2871 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2872 {
2873         /* Mark as diagnostic packet */
2874         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2875 
2876         /* Time stamp */
2877         __net_timestamp(skb);
2878 
2879         skb_queue_tail(&hdev->rx_q, skb);
2880         queue_work(hdev->workqueue, &hdev->rx_work);
2881 
2882         return 0;
2883 }
2884 EXPORT_SYMBOL(hci_recv_diag);
2885 
2886 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2887 {
2888         va_list vargs;
2889 
2890         va_start(vargs, fmt);
2891         kfree_const(hdev->hw_info);
2892         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2893         va_end(vargs);
2894 }
2895 EXPORT_SYMBOL(hci_set_hw_info);
2896 
2897 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2898 {
2899         va_list vargs;
2900 
2901         va_start(vargs, fmt);
2902         kfree_const(hdev->fw_info);
2903         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2904         va_end(vargs);
2905 }
2906 EXPORT_SYMBOL(hci_set_fw_info);
2907 
2908 /* ---- Interface to upper protocols ---- */
2909 
2910 int hci_register_cb(struct hci_cb *cb)
2911 {
2912         BT_DBG("%p name %s", cb, cb->name);
2913 
2914         mutex_lock(&hci_cb_list_lock);
2915         list_add_tail(&cb->list, &hci_cb_list);
2916         mutex_unlock(&hci_cb_list_lock);
2917 
2918         return 0;
2919 }
2920 EXPORT_SYMBOL(hci_register_cb);
2921 
2922 int hci_unregister_cb(struct hci_cb *cb)
2923 {
2924         BT_DBG("%p name %s", cb, cb->name);
2925 
2926         mutex_lock(&hci_cb_list_lock);
2927         list_del(&cb->list);
2928         mutex_unlock(&hci_cb_list_lock);
2929 
2930         return 0;
2931 }
2932 EXPORT_SYMBOL(hci_unregister_cb);
2933 
2934 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2935 {
2936         int err;
2937 
2938         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2939                skb->len);
2940 
2941         /* Time stamp */
2942         __net_timestamp(skb);
2943 
2944         /* Send copy to monitor */
2945         hci_send_to_monitor(hdev, skb);
2946 
2947         if (atomic_read(&hdev->promisc)) {
2948                 /* Send copy to the sockets */
2949                 hci_send_to_sock(hdev, skb);
2950         }
2951 
2952         /* Get rid of skb owner, prior to sending to the driver. */
2953         skb_orphan(skb);
2954 
2955         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2956                 kfree_skb(skb);
2957                 return -EINVAL;
2958         }
2959 
2960         err = hdev->send(hdev, skb);
2961         if (err < 0) {
2962                 bt_dev_err(hdev, "sending frame failed (%d)", err);
2963                 kfree_skb(skb);
2964                 return err;
2965         }
2966 
2967         return 0;
2968 }
2969 
2970 /* Send HCI command */
2971 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2972                  const void *param)
2973 {
2974         struct sk_buff *skb;
2975 
2976         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2977 
2978         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2979         if (!skb) {
2980                 bt_dev_err(hdev, "no memory for command");
2981                 return -ENOMEM;
2982         }
2983 
2984         /* Stand-alone HCI commands must be flagged as
2985          * single-command requests.
2986          */
2987         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2988 
2989         skb_queue_tail(&hdev->cmd_q, skb);
2990         queue_work(hdev->workqueue, &hdev->cmd_work);
2991 
2992         return 0;
2993 }
2994 
2995 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2996                    const void *param)
2997 {
2998         struct sk_buff *skb;
2999 
3000         if (hci_opcode_ogf(opcode) != 0x3f) {
3001                 /* A controller receiving a command shall respond with either
3002                  * a Command Status Event or a Command Complete Event.
3003                  * Therefore, all standard HCI commands must be sent via the
3004                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3005                  * Some vendors do not comply with this rule for vendor-specific
3006                  * commands and do not return any event. We want to support
3007                  * unresponded commands for such cases only.
3008                  */
3009                 bt_dev_err(hdev, "unresponded command not supported");
3010                 return -EINVAL;
3011         }
3012 
3013         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3014         if (!skb) {
3015                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3016                            opcode);
3017                 return -ENOMEM;
3018         }
3019 
3020         hci_send_frame(hdev, skb);
3021 
3022         return 0;
3023 }
3024 EXPORT_SYMBOL(__hci_cmd_send);
3025 
3026 /* Get data from the previously sent command */
3027 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3028 {
3029         struct hci_command_hdr *hdr;
3030 
3031         if (!hdev->sent_cmd)
3032                 return NULL;
3033 
3034         hdr = (void *) hdev->sent_cmd->data;
3035 
3036         if (hdr->opcode != cpu_to_le16(opcode))
3037                 return NULL;
3038 
3039         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3040 
3041         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3042 }
3043 
3044 /* Send ACL data */
3045 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3046 {
3047         struct hci_acl_hdr *hdr;
3048         int len = skb->len;
3049 
3050         skb_push(skb, HCI_ACL_HDR_SIZE);
3051         skb_reset_transport_header(skb);
3052         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3053         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3054         hdr->dlen   = cpu_to_le16(len);
3055 }
3056 
3057 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3058                           struct sk_buff *skb, __u16 flags)
3059 {
3060         struct hci_conn *conn = chan->conn;
3061         struct hci_dev *hdev = conn->hdev;
3062         struct sk_buff *list;
3063 
3064         skb->len = skb_headlen(skb);
3065         skb->data_len = 0;
3066 
3067         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3068 
3069         switch (hdev->dev_type) {
3070         case HCI_PRIMARY:
3071                 hci_add_acl_hdr(skb, conn->handle, flags);
3072                 break;
3073         case HCI_AMP:
3074                 hci_add_acl_hdr(skb, chan->handle, flags);
3075                 break;
3076         default:
3077                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3078                 return;
3079         }
3080 
3081         list = skb_shinfo(skb)->frag_list;
3082         if (!list) {
3083                 /* Non fragmented */
3084                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3085 
3086                 skb_queue_tail(queue, skb);
3087         } else {
3088                 /* Fragmented */
3089                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3090 
3091                 skb_shinfo(skb)->frag_list = NULL;
3092 
3093                 /* Queue all fragments atomically. We need to use spin_lock_bh
3094                  * here because of 6LoWPAN links, as there this function is
3095                  * called from softirq and using normal spin lock could cause
3096                  * deadlocks.
3097                  */
3098                 spin_lock_bh(&queue->lock);
3099 
3100                 __skb_queue_tail(queue, skb);
3101 
3102                 flags &= ~ACL_START;
3103                 flags |= ACL_CONT;
3104                 do {
3105                         skb = list; list = list->next;
3106 
3107                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3108                         hci_add_acl_hdr(skb, conn->handle, flags);
3109 
3110                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3111 
3112                         __skb_queue_tail(queue, skb);
3113                 } while (list);
3114 
3115                 spin_unlock_bh(&queue->lock);
3116         }
3117 }
3118 
3119 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3120 {
3121         struct hci_dev *hdev = chan->conn->hdev;
3122 
3123         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3124 
3125         hci_queue_acl(chan, &chan->data_q, skb, flags);
3126 
3127         queue_work(hdev->workqueue, &hdev->tx_work);
3128 }
3129 
3130 /* Send SCO data */
3131 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3132 {
3133         struct hci_dev *hdev = conn->hdev;
3134         struct hci_sco_hdr hdr;
3135 
3136         BT_DBG("%s len %d", hdev->name, skb->len);
3137 
3138         hdr.handle = cpu_to_le16(conn->handle);
3139         hdr.dlen   = skb->len;
3140 
3141         skb_push(skb, HCI_SCO_HDR_SIZE);
3142         skb_reset_transport_header(skb);
3143         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3144 
3145         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3146 
3147         skb_queue_tail(&conn->data_q, skb);
3148         queue_work(hdev->workqueue, &hdev->tx_work);
3149 }
3150 
3151 /* ---- HCI TX task (outgoing data) ---- */
3152 
3153 /* HCI Connection scheduler */
3154 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3155                                      int *quote)
3156 {
3157         struct hci_conn_hash *h = &hdev->conn_hash;
3158         struct hci_conn *conn = NULL, *c;
3159         unsigned int num = 0, min = ~0;
3160 
3161         /* We don't have to lock device here. Connections are always
3162          * added and removed with TX task disabled. */
3163 
3164         rcu_read_lock();
3165 
3166         list_for_each_entry_rcu(c, &h->list, list) {
3167                 if (c->type != type || skb_queue_empty(&c->data_q))
3168                         continue;
3169 
3170                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3171                         continue;
3172 
3173                 num++;
3174 
3175                 if (c->sent < min) {
3176                         min  = c->sent;
3177                         conn = c;
3178                 }
3179 
3180                 if (hci_conn_num(hdev, type) == num)
3181                         break;
3182         }
3183 
3184         rcu_read_unlock();
3185 
3186         if (conn) {
3187                 int cnt, q;
3188 
3189                 switch (conn->type) {
3190                 case ACL_LINK:
3191                         cnt = hdev->acl_cnt;
3192                         break;
3193                 case SCO_LINK:
3194                 case ESCO_LINK:
3195                         cnt = hdev->sco_cnt;
3196                         break;
3197                 case LE_LINK:
3198                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3199                         break;
3200                 default:
3201                         cnt = 0;
3202                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3203                 }
3204 
3205                 q = cnt / num;
3206                 *quote = q ? q : 1;
3207         } else
3208                 *quote = 0;
3209 
3210         BT_DBG("conn %p quote %d", conn, *quote);
3211         return conn;
3212 }
3213 
3214 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3215 {
3216         struct hci_conn_hash *h = &hdev->conn_hash;
3217         struct hci_conn *c;
3218 
3219         bt_dev_err(hdev, "link tx timeout");
3220 
3221         rcu_read_lock();
3222 
3223         /* Kill stalled connections */
3224         list_for_each_entry_rcu(c, &h->list, list) {
3225                 if (c->type == type && c->sent) {
3226                         bt_dev_err(hdev, "killing stalled connection %pMR",
3227                                    &c->dst);
3228                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3229                 }
3230         }
3231 
3232         rcu_read_unlock();
3233 }
3234 
3235 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3236                                       int *quote)
3237 {
3238         struct hci_conn_hash *h = &hdev->conn_hash;
3239         struct hci_chan *chan = NULL;
3240         unsigned int num = 0, min = ~0, cur_prio = 0;
3241         struct hci_conn *conn;
3242         int cnt, q, conn_num = 0;
3243 
3244         BT_DBG("%s", hdev->name);
3245 
3246         rcu_read_lock();
3247 
3248         list_for_each_entry_rcu(conn, &h->list, list) {
3249                 struct hci_chan *tmp;
3250 
3251                 if (conn->type != type)
3252                         continue;
3253 
3254                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3255                         continue;
3256 
3257                 conn_num++;
3258 
3259                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3260                         struct sk_buff *skb;
3261 
3262                         if (skb_queue_empty(&tmp->data_q))
3263                                 continue;
3264 
3265                         skb = skb_peek(&tmp->data_q);
3266                         if (skb->priority < cur_prio)
3267                                 continue;
3268 
3269                         if (skb->priority > cur_prio) {
3270                                 num = 0;
3271                                 min = ~0;
3272                                 cur_prio = skb->priority;
3273                         }
3274 
3275                         num++;
3276 
3277                         if (conn->sent < min) {
3278                                 min  = conn->sent;
3279                                 chan = tmp;
3280                         }
3281                 }
3282 
3283                 if (hci_conn_num(hdev, type) == conn_num)
3284                         break;
3285         }
3286 
3287         rcu_read_unlock();
3288 
3289         if (!chan)
3290                 return NULL;
3291 
3292         switch (chan->conn->type) {
3293         case ACL_LINK:
3294                 cnt = hdev->acl_cnt;
3295                 break;
3296         case AMP_LINK:
3297                 cnt = hdev->block_cnt;
3298                 break;
3299         case SCO_LINK:
3300         case ESCO_LINK:
3301                 cnt = hdev->sco_cnt;
3302                 break;
3303         case LE_LINK:
3304                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3305                 break;
3306         default:
3307                 cnt = 0;
3308                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3309         }
3310 
3311         q = cnt / num;
3312         *quote = q ? q : 1;
3313         BT_DBG("chan %p quote %d", chan, *quote);
3314         return chan;
3315 }
3316 
3317 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3318 {
3319         struct hci_conn_hash *h = &hdev->conn_hash;
3320         struct hci_conn *conn;
3321         int num = 0;
3322 
3323         BT_DBG("%s", hdev->name);
3324 
3325         rcu_read_lock();
3326 
3327         list_for_each_entry_rcu(conn, &h->list, list) {
3328                 struct hci_chan *chan;
3329 
3330                 if (conn->type != type)
3331                         continue;
3332 
3333                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3334                         continue;
3335 
3336                 num++;
3337 
3338                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3339                         struct sk_buff *skb;
3340 
3341                         if (chan->sent) {
3342                                 chan->sent = 0;
3343                                 continue;
3344                         }
3345 
3346                         if (skb_queue_empty(&chan->data_q))
3347                                 continue;
3348 
3349                         skb = skb_peek(&chan->data_q);
3350                         if (skb->priority >= HCI_PRIO_MAX - 1)
3351                                 continue;
3352 
3353                         skb->priority = HCI_PRIO_MAX - 1;
3354 
3355                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3356                                skb->priority);
3357                 }
3358 
3359                 if (hci_conn_num(hdev, type) == num)
3360                         break;
3361         }
3362 
3363         rcu_read_unlock();
3364 
3365 }
3366 
3367 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3368 {
3369         /* Calculate count of blocks used by this packet */
3370         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3371 }
3372 
3373 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3374 {
3375         unsigned long last_tx;
3376 
3377         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3378                 return;
3379 
3380         switch (type) {
3381         case LE_LINK:
3382                 last_tx = hdev->le_last_tx;
3383                 break;
3384         default:
3385                 last_tx = hdev->acl_last_tx;
3386                 break;
3387         }
3388 
3389         /* tx timeout must be longer than maximum link supervision timeout
3390          * (40.9 seconds)
3391          */
3392         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3393                 hci_link_tx_to(hdev, type);
3394 }
3395 
3396 /* Schedule SCO */
3397 static void hci_sched_sco(struct hci_dev *hdev)
3398 {
3399         struct hci_conn *conn;
3400         struct sk_buff *skb;
3401         int quote;
3402 
3403         BT_DBG("%s", hdev->name);
3404 
3405         if (!hci_conn_num(hdev, SCO_LINK))
3406                 return;
3407 
3408         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3409                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3410                         BT_DBG("skb %p len %d", skb, skb->len);
3411                         hci_send_frame(hdev, skb);
3412 
3413                         conn->sent++;
3414                         if (conn->sent == ~0)
3415                                 conn->sent = 0;
3416                 }
3417         }
3418 }
3419 
3420 static void hci_sched_esco(struct hci_dev *hdev)
3421 {
3422         struct hci_conn *conn;
3423         struct sk_buff *skb;
3424         int quote;
3425 
3426         BT_DBG("%s", hdev->name);
3427 
3428         if (!hci_conn_num(hdev, ESCO_LINK))
3429                 return;
3430 
3431         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3432                                                      &quote))) {
3433                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3434                         BT_DBG("skb %p len %d", skb, skb->len);
3435                         hci_send_frame(hdev, skb);
3436 
3437                         conn->sent++;
3438                         if (conn->sent == ~0)
3439                                 conn->sent = 0;
3440                 }
3441         }
3442 }
3443 
3444 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3445 {
3446         unsigned int cnt = hdev->acl_cnt;
3447         struct hci_chan *chan;
3448         struct sk_buff *skb;
3449         int quote;
3450 
3451         __check_timeout(hdev, cnt, ACL_LINK);
3452 
3453         while (hdev->acl_cnt &&
3454                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3455                 u32 priority = (skb_peek(&chan->data_q))->priority;
3456                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3457                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3458                                skb->len, skb->priority);
3459 
3460                         /* Stop if priority has changed */
3461                         if (skb->priority < priority)
3462                                 break;
3463 
3464                         skb = skb_dequeue(&chan->data_q);
3465 
3466                         hci_conn_enter_active_mode(chan->conn,
3467                                                    bt_cb(skb)->force_active);
3468 
3469                         hci_send_frame(hdev, skb);
3470                         hdev->acl_last_tx = jiffies;
3471 
3472                         hdev->acl_cnt--;
3473                         chan->sent++;
3474                         chan->conn->sent++;
3475 
3476                         /* Send pending SCO packets right away */
3477                         hci_sched_sco(hdev);
3478                         hci_sched_esco(hdev);
3479                 }
3480         }
3481 
3482         if (cnt != hdev->acl_cnt)
3483                 hci_prio_recalculate(hdev, ACL_LINK);
3484 }
3485 
3486 static void hci_sched_acl_blk(struct hci_dev *hdev)
3487 {
3488         unsigned int cnt = hdev->block_cnt;
3489         struct hci_chan *chan;
3490         struct sk_buff *skb;
3491         int quote;
3492         u8 type;
3493 
3494         BT_DBG("%s", hdev->name);
3495 
3496         if (hdev->dev_type == HCI_AMP)
3497                 type = AMP_LINK;
3498         else
3499                 type = ACL_LINK;
3500 
3501         __check_timeout(hdev, cnt, type);
3502 
3503         while (hdev->block_cnt > 0 &&
3504                (chan = hci_chan_sent(hdev, type, &quote))) {
3505                 u32 priority = (skb_peek(&chan->data_q))->priority;
3506                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3507                         int blocks;
3508 
3509                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3510                                skb->len, skb->priority);
3511 
3512                         /* Stop if priority has changed */
3513                         if (skb->priority < priority)
3514                                 break;
3515 
3516                         skb = skb_dequeue(&chan->data_q);
3517 
3518                         blocks = __get_blocks(hdev, skb);
3519                         if (blocks > hdev->block_cnt)
3520                                 return;
3521 
3522                         hci_conn_enter_active_mode(chan->conn,
3523                                                    bt_cb(skb)->force_active);
3524 
3525                         hci_send_frame(hdev, skb);
3526                         hdev->acl_last_tx = jiffies;
3527 
3528                         hdev->block_cnt -= blocks;
3529                         quote -= blocks;
3530 
3531                         chan->sent += blocks;
3532                         chan->conn->sent += blocks;
3533                 }
3534         }
3535 
3536         if (cnt != hdev->block_cnt)
3537                 hci_prio_recalculate(hdev, type);
3538 }
3539 
3540 static void hci_sched_acl(struct hci_dev *hdev)
3541 {
3542         BT_DBG("%s", hdev->name);
3543 
3544         /* No ACL link over BR/EDR controller */
3545         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3546                 return;
3547 
3548         /* No AMP link over AMP controller */
3549         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3550                 return;
3551 
3552         switch (hdev->flow_ctl_mode) {
3553         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3554                 hci_sched_acl_pkt(hdev);
3555                 break;
3556 
3557         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3558                 hci_sched_acl_blk(hdev);
3559                 break;
3560         }
3561 }
3562 
3563 static void hci_sched_le(struct hci_dev *hdev)
3564 {
3565         struct hci_chan *chan;
3566         struct sk_buff *skb;
3567         int quote, cnt, tmp;
3568 
3569         BT_DBG("%s", hdev->name);
3570 
3571         if (!hci_conn_num(hdev, LE_LINK))
3572                 return;
3573 
3574         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3575 
3576         __check_timeout(hdev, cnt, LE_LINK);
3577 
3578         tmp = cnt;
3579         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3580                 u32 priority = (skb_peek(&chan->data_q))->priority;
3581                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3582                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3583                                skb->len, skb->priority);
3584 
3585                         /* Stop if priority has changed */
3586                         if (skb->priority < priority)
3587                                 break;
3588 
3589                         skb = skb_dequeue(&chan->data_q);
3590 
3591                         hci_send_frame(hdev, skb);
3592                         hdev->le_last_tx = jiffies;
3593 
3594                         cnt--;
3595                         chan->sent++;
3596                         chan->conn->sent++;
3597 
3598                         /* Send pending SCO packets right away */
3599                         hci_sched_sco(hdev);
3600                         hci_sched_esco(hdev);
3601                 }
3602         }
3603 
3604         if (hdev->le_pkts)
3605                 hdev->le_cnt = cnt;
3606         else
3607                 hdev->acl_cnt = cnt;
3608 
3609         if (cnt != tmp)
3610                 hci_prio_recalculate(hdev, LE_LINK);
3611 }
3612 
3613 static void hci_tx_work(struct work_struct *work)
3614 {
3615         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3616         struct sk_buff *skb;
3617 
3618         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3619                hdev->sco_cnt, hdev->le_cnt);
3620 
3621         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3622                 /* Schedule queues and send stuff to HCI driver */
3623                 hci_sched_sco(hdev);
3624                 hci_sched_esco(hdev);
3625                 hci_sched_acl(hdev);
3626                 hci_sched_le(hdev);
3627         }
3628 
3629         /* Send next queued raw (unknown type) packet */
3630         while ((skb = skb_dequeue(&hdev->raw_q)))
3631                 hci_send_frame(hdev, skb);
3632 }
3633 
3634 /* ----- HCI RX task (incoming data processing) ----- */
3635 
3636 /* ACL data packet */
3637 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3638 {
3639         struct hci_acl_hdr *hdr = (void *) skb->data;
3640         struct hci_conn *conn;
3641         __u16 handle, flags;
3642 
3643         skb_pull(skb, HCI_ACL_HDR_SIZE);
3644 
3645         handle = __le16_to_cpu(hdr->handle);
3646         flags  = hci_flags(handle);
3647         handle = hci_handle(handle);
3648 
3649         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3650                handle, flags);
3651 
3652         hdev->stat.acl_rx++;
3653 
3654         hci_dev_lock(hdev);
3655         conn = hci_conn_hash_lookup_handle(hdev, handle);
3656         hci_dev_unlock(hdev);
3657 
3658         if (conn) {
3659                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3660 
3661                 /* Send to upper protocol */
3662                 l2cap_recv_acldata(conn, skb, flags);
3663                 return;
3664         } else {
3665                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3666                            handle);
3667         }
3668 
3669         kfree_skb(skb);
3670 }
3671 
3672 /* SCO data packet */
3673 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3674 {
3675         struct hci_sco_hdr *hdr = (void *) skb->data;
3676         struct hci_conn *conn;
3677         __u16 handle, flags;
3678 
3679         skb_pull(skb, HCI_SCO_HDR_SIZE);
3680 
3681         handle = __le16_to_cpu(hdr->handle);
3682         flags  = hci_flags(handle);
3683         handle = hci_handle(handle);
3684 
3685         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3686                handle, flags);
3687 
3688         hdev->stat.sco_rx++;
3689 
3690         hci_dev_lock(hdev);
3691         conn = hci_conn_hash_lookup_handle(hdev, handle);
3692         hci_dev_unlock(hdev);
3693 
3694         if (conn) {
3695                 /* Send to upper protocol */
3696                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3697                 sco_recv_scodata(conn, skb);
3698                 return;
3699         } else {
3700                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3701                                        handle);
3702         }
3703 
3704         kfree_skb(skb);
3705 }
3706 
3707 static bool hci_req_is_complete(struct hci_dev *hdev)
3708 {
3709         struct sk_buff *skb;
3710 
3711         skb = skb_peek(&hdev->cmd_q);
3712         if (!skb)
3713                 return true;
3714 
3715         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3716 }
3717 
3718 static void hci_resend_last(struct hci_dev *hdev)
3719 {
3720         struct hci_command_hdr *sent;
3721         struct sk_buff *skb;
3722         u16 opcode;
3723 
3724         if (!hdev->sent_cmd)
3725                 return;
3726 
3727         sent = (void *) hdev->sent_cmd->data;
3728         opcode = __le16_to_cpu(sent->opcode);
3729         if (opcode == HCI_OP_RESET)
3730                 return;
3731 
3732         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3733         if (!skb)
3734                 return;
3735 
3736         skb_queue_head(&hdev->cmd_q, skb);
3737         queue_work(hdev->workqueue, &hdev->cmd_work);
3738 }
3739 
3740 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3741                           hci_req_complete_t *req_complete,
3742                           hci_req_complete_skb_t *req_complete_skb)
3743 {
3744         struct sk_buff *skb;
3745         unsigned long flags;
3746 
3747         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3748 
3749         /* If the completed command doesn't match the last one that was
3750          * sent we need to do special handling of it.
3751          */
3752         if (!hci_sent_cmd_data(hdev, opcode)) {
3753                 /* Some CSR based controllers generate a spontaneous
3754                  * reset complete event during init and any pending
3755                  * command will never be completed. In such a case we
3756                  * need to resend whatever was the last sent
3757                  * command.
3758                  */
3759                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3760                         hci_resend_last(hdev);
3761 
3762                 return;
3763         }
3764 
3765         /* If we reach this point this event matches the last command sent */
3766         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3767 
3768         /* If the command succeeded and there's still more commands in
3769          * this request the request is not yet complete.
3770          */
3771         if (!status && !hci_req_is_complete(hdev))
3772                 return;
3773 
3774         /* If this was the last command in a request the complete
3775          * callback would be found in hdev->sent_cmd instead of the
3776          * command queue (hdev->cmd_q).
3777          */
3778         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3779                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3780                 return;
3781         }
3782 
3783         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3784                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3785                 return;
3786         }
3787 
3788         /* Remove all pending commands belonging to this request */
3789         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3790         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3791                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3792                         __skb_queue_head(&hdev->cmd_q, skb);
3793                         break;
3794                 }
3795 
3796                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3797                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3798                 else
3799                         *req_complete = bt_cb(skb)->hci.req_complete;
3800                 kfree_skb(skb);
3801         }
3802         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3803 }
3804 
3805 static void hci_rx_work(struct work_struct *work)
3806 {
3807         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3808         struct sk_buff *skb;
3809 
3810         BT_DBG("%s", hdev->name);
3811 
3812         while ((skb = skb_dequeue(&hdev->rx_q))) {
3813                 /* Send copy to monitor */
3814                 hci_send_to_monitor(hdev, skb);
3815 
3816                 if (atomic_read(&hdev->promisc)) {
3817                         /* Send copy to the sockets */
3818                         hci_send_to_sock(hdev, skb);
3819                 }
3820 
3821                 /* If the device has been opened in HCI_USER_CHANNEL,
3822                  * the userspace has exclusive access to device.
3823                  * When device is HCI_INIT, we still need to process
3824                  * the data packets to the driver in order
3825                  * to complete its setup().
3826                  */
3827                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3828                     !test_bit(HCI_INIT, &hdev->flags)) {
3829                         kfree_skb(skb);
3830                         continue;
3831                 }
3832 
3833                 if (test_bit(HCI_INIT, &hdev->flags)) {
3834                         /* Don't process data packets in this states. */
3835                         switch (hci_skb_pkt_type(skb)) {
3836                         case HCI_ACLDATA_PKT:
3837                         case HCI_SCODATA_PKT:
3838                         case HCI_ISODATA_PKT:
3839                                 kfree_skb(skb);
3840                                 continue;
3841                         }
3842                 }
3843 
3844                 /* Process frame */
3845                 switch (hci_skb_pkt_type(skb)) {
3846                 case HCI_EVENT_PKT:
3847                         BT_DBG("%s Event packet", hdev->name);
3848                         hci_event_packet(hdev, skb);
3849                         break;
3850 
3851                 case HCI_ACLDATA_PKT:
3852                         BT_DBG("%s ACL data packet", hdev->name);
3853                         hci_acldata_packet(hdev, skb);
3854                         break;
3855 
3856                 case HCI_SCODATA_PKT:
3857                         BT_DBG("%s SCO data packet", hdev->name);
3858                         hci_scodata_packet(hdev, skb);
3859                         break;
3860 
3861                 default:
3862                         kfree_skb(skb);
3863                         break;
3864                 }
3865         }
3866 }
3867 
3868 static void hci_cmd_work(struct work_struct *work)
3869 {
3870         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3871         struct sk_buff *skb;
3872 
3873         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3874                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3875 
3876         /* Send queued commands */
3877         if (atomic_read(&hdev->cmd_cnt)) {
3878                 skb = skb_dequeue(&hdev->cmd_q);
3879                 if (!skb)
3880                         return;
3881 
3882                 kfree_skb(hdev->sent_cmd);
3883 
3884                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3885                 if (hdev->sent_cmd) {
3886                         int res;
3887                         if (hci_req_status_pend(hdev))
3888                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3889                         atomic_dec(&hdev->cmd_cnt);
3890 
3891                         res = hci_send_frame(hdev, skb);
3892                         if (res < 0)
3893                                 __hci_cmd_sync_cancel(hdev, -res);
3894 
3895                         rcu_read_lock();
3896                         if (test_bit(HCI_RESET, &hdev->flags) ||
3897                             hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3898                                 cancel_delayed_work(&hdev->cmd_timer);
3899                         else
3900                                 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
3901                                                    HCI_CMD_TIMEOUT);
3902                         rcu_read_unlock();
3903                 } else {
3904                         skb_queue_head(&hdev->cmd_q, skb);
3905                         queue_work(hdev->workqueue, &hdev->cmd_work);
3906                 }
3907         }
3908 }
3909 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp