~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-5.6-rc3 ] ~ [ linux-5.5.6 ] ~ [ linux-5.4.22 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.106 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.171 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.214 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.214 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.82 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/idr.h>
 30 #include <linux/rfkill.h>
 31 #include <linux/debugfs.h>
 32 #include <linux/crypto.h>
 33 #include <asm/unaligned.h>
 34 
 35 #include <net/bluetooth/bluetooth.h>
 36 #include <net/bluetooth/hci_core.h>
 37 #include <net/bluetooth/l2cap.h>
 38 #include <net/bluetooth/mgmt.h>
 39 
 40 #include "smp.h"
 41 
 42 static void hci_rx_work(struct work_struct *work);
 43 static void hci_cmd_work(struct work_struct *work);
 44 static void hci_tx_work(struct work_struct *work);
 45 
 46 /* HCI device list */
 47 LIST_HEAD(hci_dev_list);
 48 DEFINE_RWLOCK(hci_dev_list_lock);
 49 
 50 /* HCI callback list */
 51 LIST_HEAD(hci_cb_list);
 52 DEFINE_RWLOCK(hci_cb_list_lock);
 53 
 54 /* HCI ID Numbering */
 55 static DEFINE_IDA(hci_index_ida);
 56 
 57 /* ----- HCI requests ----- */
 58 
 59 #define HCI_REQ_DONE      0
 60 #define HCI_REQ_PEND      1
 61 #define HCI_REQ_CANCELED  2
 62 
 63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
 64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
 65 
 66 /* ---- HCI notifications ---- */
 67 
 68 static void hci_notify(struct hci_dev *hdev, int event)
 69 {
 70         hci_sock_dev_event(hdev, event);
 71 }
 72 
 73 /* ---- HCI debugfs entries ---- */
 74 
 75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
 76                              size_t count, loff_t *ppos)
 77 {
 78         struct hci_dev *hdev = file->private_data;
 79         char buf[3];
 80 
 81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
 82         buf[1] = '\n';
 83         buf[2] = '\0';
 84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 85 }
 86 
 87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
 88                               size_t count, loff_t *ppos)
 89 {
 90         struct hci_dev *hdev = file->private_data;
 91         struct sk_buff *skb;
 92         char buf[32];
 93         size_t buf_size = min(count, (sizeof(buf)-1));
 94         bool enable;
 95         int err;
 96 
 97         if (!test_bit(HCI_UP, &hdev->flags))
 98                 return -ENETDOWN;
 99 
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102 
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106 
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109 
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118 
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121 
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124 
125         if (err < 0)
126                 return err;
127 
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129 
130         return count;
131 }
132 
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139 
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144 
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162 
163         return 0;
164 }
165 
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170 
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177 
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182 
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187 
188         return 0;
189 }
190 
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195 
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202 
203 static int uuids_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bt_uuid *uuid;
207 
208         hci_dev_lock(hdev);
209         list_for_each_entry(uuid, &hdev->uuids, list) {
210                 u8 i, val[16];
211 
212                 /* The Bluetooth UUID values are stored in big endian,
213                  * but with reversed byte order. So convert them into
214                  * the right order for the %pUb modifier.
215                  */
216                 for (i = 0; i < 16; i++)
217                         val[i] = uuid->uuid[15 - i];
218 
219                 seq_printf(f, "%pUb\n", val);
220         }
221         hci_dev_unlock(hdev);
222 
223         return 0;
224 }
225 
226 static int uuids_open(struct inode *inode, struct file *file)
227 {
228         return single_open(file, uuids_show, inode->i_private);
229 }
230 
231 static const struct file_operations uuids_fops = {
232         .open           = uuids_open,
233         .read           = seq_read,
234         .llseek         = seq_lseek,
235         .release        = single_release,
236 };
237 
238 static int inquiry_cache_show(struct seq_file *f, void *p)
239 {
240         struct hci_dev *hdev = f->private;
241         struct discovery_state *cache = &hdev->discovery;
242         struct inquiry_entry *e;
243 
244         hci_dev_lock(hdev);
245 
246         list_for_each_entry(e, &cache->all, all) {
247                 struct inquiry_data *data = &e->data;
248                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249                            &data->bdaddr,
250                            data->pscan_rep_mode, data->pscan_period_mode,
251                            data->pscan_mode, data->dev_class[2],
252                            data->dev_class[1], data->dev_class[0],
253                            __le16_to_cpu(data->clock_offset),
254                            data->rssi, data->ssp_mode, e->timestamp);
255         }
256 
257         hci_dev_unlock(hdev);
258 
259         return 0;
260 }
261 
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
263 {
264         return single_open(file, inquiry_cache_show, inode->i_private);
265 }
266 
267 static const struct file_operations inquiry_cache_fops = {
268         .open           = inquiry_cache_open,
269         .read           = seq_read,
270         .llseek         = seq_lseek,
271         .release        = single_release,
272 };
273 
274 static int link_keys_show(struct seq_file *f, void *ptr)
275 {
276         struct hci_dev *hdev = f->private;
277         struct link_key *key;
278 
279         rcu_read_lock();
280         list_for_each_entry_rcu(key, &hdev->link_keys, list)
281                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
283         rcu_read_unlock();
284 
285         return 0;
286 }
287 
288 static int link_keys_open(struct inode *inode, struct file *file)
289 {
290         return single_open(file, link_keys_show, inode->i_private);
291 }
292 
293 static const struct file_operations link_keys_fops = {
294         .open           = link_keys_open,
295         .read           = seq_read,
296         .llseek         = seq_lseek,
297         .release        = single_release,
298 };
299 
300 static int dev_class_show(struct seq_file *f, void *ptr)
301 {
302         struct hci_dev *hdev = f->private;
303 
304         hci_dev_lock(hdev);
305         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306                    hdev->dev_class[1], hdev->dev_class[0]);
307         hci_dev_unlock(hdev);
308 
309         return 0;
310 }
311 
312 static int dev_class_open(struct inode *inode, struct file *file)
313 {
314         return single_open(file, dev_class_show, inode->i_private);
315 }
316 
317 static const struct file_operations dev_class_fops = {
318         .open           = dev_class_open,
319         .read           = seq_read,
320         .llseek         = seq_lseek,
321         .release        = single_release,
322 };
323 
324 static int voice_setting_get(void *data, u64 *val)
325 {
326         struct hci_dev *hdev = data;
327 
328         hci_dev_lock(hdev);
329         *val = hdev->voice_setting;
330         hci_dev_unlock(hdev);
331 
332         return 0;
333 }
334 
335 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336                         NULL, "0x%4.4llx\n");
337 
338 static int auto_accept_delay_set(void *data, u64 val)
339 {
340         struct hci_dev *hdev = data;
341 
342         hci_dev_lock(hdev);
343         hdev->auto_accept_delay = val;
344         hci_dev_unlock(hdev);
345 
346         return 0;
347 }
348 
349 static int auto_accept_delay_get(void *data, u64 *val)
350 {
351         struct hci_dev *hdev = data;
352 
353         hci_dev_lock(hdev);
354         *val = hdev->auto_accept_delay;
355         hci_dev_unlock(hdev);
356 
357         return 0;
358 }
359 
360 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361                         auto_accept_delay_set, "%llu\n");
362 
363 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364                                      size_t count, loff_t *ppos)
365 {
366         struct hci_dev *hdev = file->private_data;
367         char buf[3];
368 
369         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
370         buf[1] = '\n';
371         buf[2] = '\0';
372         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373 }
374 
375 static ssize_t force_sc_support_write(struct file *file,
376                                       const char __user *user_buf,
377                                       size_t count, loff_t *ppos)
378 {
379         struct hci_dev *hdev = file->private_data;
380         char buf[32];
381         size_t buf_size = min(count, (sizeof(buf)-1));
382         bool enable;
383 
384         if (test_bit(HCI_UP, &hdev->flags))
385                 return -EBUSY;
386 
387         if (copy_from_user(buf, user_buf, buf_size))
388                 return -EFAULT;
389 
390         buf[buf_size] = '\0';
391         if (strtobool(buf, &enable))
392                 return -EINVAL;
393 
394         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
395                 return -EALREADY;
396 
397         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
398 
399         return count;
400 }
401 
402 static const struct file_operations force_sc_support_fops = {
403         .open           = simple_open,
404         .read           = force_sc_support_read,
405         .write          = force_sc_support_write,
406         .llseek         = default_llseek,
407 };
408 
409 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410                                        size_t count, loff_t *ppos)
411 {
412         struct hci_dev *hdev = file->private_data;
413         char buf[3];
414 
415         buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416         buf[1] = '\n';
417         buf[2] = '\0';
418         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419 }
420 
421 static ssize_t force_lesc_support_write(struct file *file,
422                                         const char __user *user_buf,
423                                         size_t count, loff_t *ppos)
424 {
425         struct hci_dev *hdev = file->private_data;
426         char buf[32];
427         size_t buf_size = min(count, (sizeof(buf)-1));
428         bool enable;
429 
430         if (copy_from_user(buf, user_buf, buf_size))
431                 return -EFAULT;
432 
433         buf[buf_size] = '\0';
434         if (strtobool(buf, &enable))
435                 return -EINVAL;
436 
437         if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438                 return -EALREADY;
439 
440         change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441 
442         return count;
443 }
444 
445 static const struct file_operations force_lesc_support_fops = {
446         .open           = simple_open,
447         .read           = force_lesc_support_read,
448         .write          = force_lesc_support_write,
449         .llseek         = default_llseek,
450 };
451 
452 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453                                  size_t count, loff_t *ppos)
454 {
455         struct hci_dev *hdev = file->private_data;
456         char buf[3];
457 
458         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459         buf[1] = '\n';
460         buf[2] = '\0';
461         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462 }
463 
464 static const struct file_operations sc_only_mode_fops = {
465         .open           = simple_open,
466         .read           = sc_only_mode_read,
467         .llseek         = default_llseek,
468 };
469 
470 static int idle_timeout_set(void *data, u64 val)
471 {
472         struct hci_dev *hdev = data;
473 
474         if (val != 0 && (val < 500 || val > 3600000))
475                 return -EINVAL;
476 
477         hci_dev_lock(hdev);
478         hdev->idle_timeout = val;
479         hci_dev_unlock(hdev);
480 
481         return 0;
482 }
483 
484 static int idle_timeout_get(void *data, u64 *val)
485 {
486         struct hci_dev *hdev = data;
487 
488         hci_dev_lock(hdev);
489         *val = hdev->idle_timeout;
490         hci_dev_unlock(hdev);
491 
492         return 0;
493 }
494 
495 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496                         idle_timeout_set, "%llu\n");
497 
498 static int rpa_timeout_set(void *data, u64 val)
499 {
500         struct hci_dev *hdev = data;
501 
502         /* Require the RPA timeout to be at least 30 seconds and at most
503          * 24 hours.
504          */
505         if (val < 30 || val > (60 * 60 * 24))
506                 return -EINVAL;
507 
508         hci_dev_lock(hdev);
509         hdev->rpa_timeout = val;
510         hci_dev_unlock(hdev);
511 
512         return 0;
513 }
514 
515 static int rpa_timeout_get(void *data, u64 *val)
516 {
517         struct hci_dev *hdev = data;
518 
519         hci_dev_lock(hdev);
520         *val = hdev->rpa_timeout;
521         hci_dev_unlock(hdev);
522 
523         return 0;
524 }
525 
526 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527                         rpa_timeout_set, "%llu\n");
528 
529 static int sniff_min_interval_set(void *data, u64 val)
530 {
531         struct hci_dev *hdev = data;
532 
533         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534                 return -EINVAL;
535 
536         hci_dev_lock(hdev);
537         hdev->sniff_min_interval = val;
538         hci_dev_unlock(hdev);
539 
540         return 0;
541 }
542 
543 static int sniff_min_interval_get(void *data, u64 *val)
544 {
545         struct hci_dev *hdev = data;
546 
547         hci_dev_lock(hdev);
548         *val = hdev->sniff_min_interval;
549         hci_dev_unlock(hdev);
550 
551         return 0;
552 }
553 
554 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555                         sniff_min_interval_set, "%llu\n");
556 
557 static int sniff_max_interval_set(void *data, u64 val)
558 {
559         struct hci_dev *hdev = data;
560 
561         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562                 return -EINVAL;
563 
564         hci_dev_lock(hdev);
565         hdev->sniff_max_interval = val;
566         hci_dev_unlock(hdev);
567 
568         return 0;
569 }
570 
571 static int sniff_max_interval_get(void *data, u64 *val)
572 {
573         struct hci_dev *hdev = data;
574 
575         hci_dev_lock(hdev);
576         *val = hdev->sniff_max_interval;
577         hci_dev_unlock(hdev);
578 
579         return 0;
580 }
581 
582 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583                         sniff_max_interval_set, "%llu\n");
584 
585 static int conn_info_min_age_set(void *data, u64 val)
586 {
587         struct hci_dev *hdev = data;
588 
589         if (val == 0 || val > hdev->conn_info_max_age)
590                 return -EINVAL;
591 
592         hci_dev_lock(hdev);
593         hdev->conn_info_min_age = val;
594         hci_dev_unlock(hdev);
595 
596         return 0;
597 }
598 
599 static int conn_info_min_age_get(void *data, u64 *val)
600 {
601         struct hci_dev *hdev = data;
602 
603         hci_dev_lock(hdev);
604         *val = hdev->conn_info_min_age;
605         hci_dev_unlock(hdev);
606 
607         return 0;
608 }
609 
610 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611                         conn_info_min_age_set, "%llu\n");
612 
613 static int conn_info_max_age_set(void *data, u64 val)
614 {
615         struct hci_dev *hdev = data;
616 
617         if (val == 0 || val < hdev->conn_info_min_age)
618                 return -EINVAL;
619 
620         hci_dev_lock(hdev);
621         hdev->conn_info_max_age = val;
622         hci_dev_unlock(hdev);
623 
624         return 0;
625 }
626 
627 static int conn_info_max_age_get(void *data, u64 *val)
628 {
629         struct hci_dev *hdev = data;
630 
631         hci_dev_lock(hdev);
632         *val = hdev->conn_info_max_age;
633         hci_dev_unlock(hdev);
634 
635         return 0;
636 }
637 
638 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639                         conn_info_max_age_set, "%llu\n");
640 
641 static int identity_show(struct seq_file *f, void *p)
642 {
643         struct hci_dev *hdev = f->private;
644         bdaddr_t addr;
645         u8 addr_type;
646 
647         hci_dev_lock(hdev);
648 
649         hci_copy_identity_address(hdev, &addr, &addr_type);
650 
651         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652                    16, hdev->irk, &hdev->rpa);
653 
654         hci_dev_unlock(hdev);
655 
656         return 0;
657 }
658 
659 static int identity_open(struct inode *inode, struct file *file)
660 {
661         return single_open(file, identity_show, inode->i_private);
662 }
663 
664 static const struct file_operations identity_fops = {
665         .open           = identity_open,
666         .read           = seq_read,
667         .llseek         = seq_lseek,
668         .release        = single_release,
669 };
670 
671 static int random_address_show(struct seq_file *f, void *p)
672 {
673         struct hci_dev *hdev = f->private;
674 
675         hci_dev_lock(hdev);
676         seq_printf(f, "%pMR\n", &hdev->random_addr);
677         hci_dev_unlock(hdev);
678 
679         return 0;
680 }
681 
682 static int random_address_open(struct inode *inode, struct file *file)
683 {
684         return single_open(file, random_address_show, inode->i_private);
685 }
686 
687 static const struct file_operations random_address_fops = {
688         .open           = random_address_open,
689         .read           = seq_read,
690         .llseek         = seq_lseek,
691         .release        = single_release,
692 };
693 
694 static int static_address_show(struct seq_file *f, void *p)
695 {
696         struct hci_dev *hdev = f->private;
697 
698         hci_dev_lock(hdev);
699         seq_printf(f, "%pMR\n", &hdev->static_addr);
700         hci_dev_unlock(hdev);
701 
702         return 0;
703 }
704 
705 static int static_address_open(struct inode *inode, struct file *file)
706 {
707         return single_open(file, static_address_show, inode->i_private);
708 }
709 
710 static const struct file_operations static_address_fops = {
711         .open           = static_address_open,
712         .read           = seq_read,
713         .llseek         = seq_lseek,
714         .release        = single_release,
715 };
716 
717 static ssize_t force_static_address_read(struct file *file,
718                                          char __user *user_buf,
719                                          size_t count, loff_t *ppos)
720 {
721         struct hci_dev *hdev = file->private_data;
722         char buf[3];
723 
724         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
725         buf[1] = '\n';
726         buf[2] = '\0';
727         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728 }
729 
730 static ssize_t force_static_address_write(struct file *file,
731                                           const char __user *user_buf,
732                                           size_t count, loff_t *ppos)
733 {
734         struct hci_dev *hdev = file->private_data;
735         char buf[32];
736         size_t buf_size = min(count, (sizeof(buf)-1));
737         bool enable;
738 
739         if (test_bit(HCI_UP, &hdev->flags))
740                 return -EBUSY;
741 
742         if (copy_from_user(buf, user_buf, buf_size))
743                 return -EFAULT;
744 
745         buf[buf_size] = '\0';
746         if (strtobool(buf, &enable))
747                 return -EINVAL;
748 
749         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
750                 return -EALREADY;
751 
752         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
753 
754         return count;
755 }
756 
757 static const struct file_operations force_static_address_fops = {
758         .open           = simple_open,
759         .read           = force_static_address_read,
760         .write          = force_static_address_write,
761         .llseek         = default_llseek,
762 };
763 
764 static int white_list_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct bdaddr_list *b;
768 
769         hci_dev_lock(hdev);
770         list_for_each_entry(b, &hdev->le_white_list, list)
771                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772         hci_dev_unlock(hdev);
773 
774         return 0;
775 }
776 
777 static int white_list_open(struct inode *inode, struct file *file)
778 {
779         return single_open(file, white_list_show, inode->i_private);
780 }
781 
782 static const struct file_operations white_list_fops = {
783         .open           = white_list_open,
784         .read           = seq_read,
785         .llseek         = seq_lseek,
786         .release        = single_release,
787 };
788 
789 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790 {
791         struct hci_dev *hdev = f->private;
792         struct smp_irk *irk;
793 
794         rcu_read_lock();
795         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797                            &irk->bdaddr, irk->addr_type,
798                            16, irk->val, &irk->rpa);
799         }
800         rcu_read_unlock();
801 
802         return 0;
803 }
804 
805 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806 {
807         return single_open(file, identity_resolving_keys_show,
808                            inode->i_private);
809 }
810 
811 static const struct file_operations identity_resolving_keys_fops = {
812         .open           = identity_resolving_keys_open,
813         .read           = seq_read,
814         .llseek         = seq_lseek,
815         .release        = single_release,
816 };
817 
818 static int long_term_keys_show(struct seq_file *f, void *ptr)
819 {
820         struct hci_dev *hdev = f->private;
821         struct smp_ltk *ltk;
822 
823         rcu_read_lock();
824         list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828                            __le64_to_cpu(ltk->rand), 16, ltk->val);
829         rcu_read_unlock();
830 
831         return 0;
832 }
833 
834 static int long_term_keys_open(struct inode *inode, struct file *file)
835 {
836         return single_open(file, long_term_keys_show, inode->i_private);
837 }
838 
839 static const struct file_operations long_term_keys_fops = {
840         .open           = long_term_keys_open,
841         .read           = seq_read,
842         .llseek         = seq_lseek,
843         .release        = single_release,
844 };
845 
846 static int conn_min_interval_set(void *data, u64 val)
847 {
848         struct hci_dev *hdev = data;
849 
850         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851                 return -EINVAL;
852 
853         hci_dev_lock(hdev);
854         hdev->le_conn_min_interval = val;
855         hci_dev_unlock(hdev);
856 
857         return 0;
858 }
859 
860 static int conn_min_interval_get(void *data, u64 *val)
861 {
862         struct hci_dev *hdev = data;
863 
864         hci_dev_lock(hdev);
865         *val = hdev->le_conn_min_interval;
866         hci_dev_unlock(hdev);
867 
868         return 0;
869 }
870 
871 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872                         conn_min_interval_set, "%llu\n");
873 
874 static int conn_max_interval_set(void *data, u64 val)
875 {
876         struct hci_dev *hdev = data;
877 
878         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879                 return -EINVAL;
880 
881         hci_dev_lock(hdev);
882         hdev->le_conn_max_interval = val;
883         hci_dev_unlock(hdev);
884 
885         return 0;
886 }
887 
888 static int conn_max_interval_get(void *data, u64 *val)
889 {
890         struct hci_dev *hdev = data;
891 
892         hci_dev_lock(hdev);
893         *val = hdev->le_conn_max_interval;
894         hci_dev_unlock(hdev);
895 
896         return 0;
897 }
898 
899 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900                         conn_max_interval_set, "%llu\n");
901 
902 static int conn_latency_set(void *data, u64 val)
903 {
904         struct hci_dev *hdev = data;
905 
906         if (val > 0x01f3)
907                 return -EINVAL;
908 
909         hci_dev_lock(hdev);
910         hdev->le_conn_latency = val;
911         hci_dev_unlock(hdev);
912 
913         return 0;
914 }
915 
916 static int conn_latency_get(void *data, u64 *val)
917 {
918         struct hci_dev *hdev = data;
919 
920         hci_dev_lock(hdev);
921         *val = hdev->le_conn_latency;
922         hci_dev_unlock(hdev);
923 
924         return 0;
925 }
926 
927 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928                         conn_latency_set, "%llu\n");
929 
930 static int supervision_timeout_set(void *data, u64 val)
931 {
932         struct hci_dev *hdev = data;
933 
934         if (val < 0x000a || val > 0x0c80)
935                 return -EINVAL;
936 
937         hci_dev_lock(hdev);
938         hdev->le_supv_timeout = val;
939         hci_dev_unlock(hdev);
940 
941         return 0;
942 }
943 
944 static int supervision_timeout_get(void *data, u64 *val)
945 {
946         struct hci_dev *hdev = data;
947 
948         hci_dev_lock(hdev);
949         *val = hdev->le_supv_timeout;
950         hci_dev_unlock(hdev);
951 
952         return 0;
953 }
954 
955 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956                         supervision_timeout_set, "%llu\n");
957 
958 static int adv_channel_map_set(void *data, u64 val)
959 {
960         struct hci_dev *hdev = data;
961 
962         if (val < 0x01 || val > 0x07)
963                 return -EINVAL;
964 
965         hci_dev_lock(hdev);
966         hdev->le_adv_channel_map = val;
967         hci_dev_unlock(hdev);
968 
969         return 0;
970 }
971 
972 static int adv_channel_map_get(void *data, u64 *val)
973 {
974         struct hci_dev *hdev = data;
975 
976         hci_dev_lock(hdev);
977         *val = hdev->le_adv_channel_map;
978         hci_dev_unlock(hdev);
979 
980         return 0;
981 }
982 
983 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984                         adv_channel_map_set, "%llu\n");
985 
986 static int adv_min_interval_set(void *data, u64 val)
987 {
988         struct hci_dev *hdev = data;
989 
990         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991                 return -EINVAL;
992 
993         hci_dev_lock(hdev);
994         hdev->le_adv_min_interval = val;
995         hci_dev_unlock(hdev);
996 
997         return 0;
998 }
999 
1000 static int adv_min_interval_get(void *data, u64 *val)
1001 {
1002         struct hci_dev *hdev = data;
1003 
1004         hci_dev_lock(hdev);
1005         *val = hdev->le_adv_min_interval;
1006         hci_dev_unlock(hdev);
1007 
1008         return 0;
1009 }
1010 
1011 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012                         adv_min_interval_set, "%llu\n");
1013 
1014 static int adv_max_interval_set(void *data, u64 val)
1015 {
1016         struct hci_dev *hdev = data;
1017 
1018         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1019                 return -EINVAL;
1020 
1021         hci_dev_lock(hdev);
1022         hdev->le_adv_max_interval = val;
1023         hci_dev_unlock(hdev);
1024 
1025         return 0;
1026 }
1027 
1028 static int adv_max_interval_get(void *data, u64 *val)
1029 {
1030         struct hci_dev *hdev = data;
1031 
1032         hci_dev_lock(hdev);
1033         *val = hdev->le_adv_max_interval;
1034         hci_dev_unlock(hdev);
1035 
1036         return 0;
1037 }
1038 
1039 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040                         adv_max_interval_set, "%llu\n");
1041 
1042 static int device_list_show(struct seq_file *f, void *ptr)
1043 {
1044         struct hci_dev *hdev = f->private;
1045         struct hci_conn_params *p;
1046         struct bdaddr_list *b;
1047 
1048         hci_dev_lock(hdev);
1049         list_for_each_entry(b, &hdev->whitelist, list)
1050                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051         list_for_each_entry(p, &hdev->le_conn_params, list) {
1052                 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1053                            p->auto_connect);
1054         }
1055         hci_dev_unlock(hdev);
1056 
1057         return 0;
1058 }
1059 
1060 static int device_list_open(struct inode *inode, struct file *file)
1061 {
1062         return single_open(file, device_list_show, inode->i_private);
1063 }
1064 
1065 static const struct file_operations device_list_fops = {
1066         .open           = device_list_open,
1067         .read           = seq_read,
1068         .llseek         = seq_lseek,
1069         .release        = single_release,
1070 };
1071 
1072 /* ---- HCI requests ---- */
1073 
1074 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1075 {
1076         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1077 
1078         if (hdev->req_status == HCI_REQ_PEND) {
1079                 hdev->req_result = result;
1080                 hdev->req_status = HCI_REQ_DONE;
1081                 wake_up_interruptible(&hdev->req_wait_q);
1082         }
1083 }
1084 
1085 static void hci_req_cancel(struct hci_dev *hdev, int err)
1086 {
1087         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088 
1089         if (hdev->req_status == HCI_REQ_PEND) {
1090                 hdev->req_result = err;
1091                 hdev->req_status = HCI_REQ_CANCELED;
1092                 wake_up_interruptible(&hdev->req_wait_q);
1093         }
1094 }
1095 
1096 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097                                             u8 event)
1098 {
1099         struct hci_ev_cmd_complete *ev;
1100         struct hci_event_hdr *hdr;
1101         struct sk_buff *skb;
1102 
1103         hci_dev_lock(hdev);
1104 
1105         skb = hdev->recv_evt;
1106         hdev->recv_evt = NULL;
1107 
1108         hci_dev_unlock(hdev);
1109 
1110         if (!skb)
1111                 return ERR_PTR(-ENODATA);
1112 
1113         if (skb->len < sizeof(*hdr)) {
1114                 BT_ERR("Too short HCI event");
1115                 goto failed;
1116         }
1117 
1118         hdr = (void *) skb->data;
1119         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120 
1121         if (event) {
1122                 if (hdr->evt != event)
1123                         goto failed;
1124                 return skb;
1125         }
1126 
1127         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129                 goto failed;
1130         }
1131 
1132         if (skb->len < sizeof(*ev)) {
1133                 BT_ERR("Too short cmd_complete event");
1134                 goto failed;
1135         }
1136 
1137         ev = (void *) skb->data;
1138         skb_pull(skb, sizeof(*ev));
1139 
1140         if (opcode == __le16_to_cpu(ev->opcode))
1141                 return skb;
1142 
1143         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144                __le16_to_cpu(ev->opcode));
1145 
1146 failed:
1147         kfree_skb(skb);
1148         return ERR_PTR(-ENODATA);
1149 }
1150 
1151 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1152                                   const void *param, u8 event, u32 timeout)
1153 {
1154         DECLARE_WAITQUEUE(wait, current);
1155         struct hci_request req;
1156         int err = 0;
1157 
1158         BT_DBG("%s", hdev->name);
1159 
1160         hci_req_init(&req, hdev);
1161 
1162         hci_req_add_ev(&req, opcode, plen, param, event);
1163 
1164         hdev->req_status = HCI_REQ_PEND;
1165 
1166         add_wait_queue(&hdev->req_wait_q, &wait);
1167         set_current_state(TASK_INTERRUPTIBLE);
1168 
1169         err = hci_req_run(&req, hci_req_sync_complete);
1170         if (err < 0) {
1171                 remove_wait_queue(&hdev->req_wait_q, &wait);
1172                 set_current_state(TASK_RUNNING);
1173                 return ERR_PTR(err);
1174         }
1175 
1176         schedule_timeout(timeout);
1177 
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179 
1180         if (signal_pending(current))
1181                 return ERR_PTR(-EINTR);
1182 
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187 
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191 
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196 
1197         hdev->req_status = hdev->req_result = 0;
1198 
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200 
1201         if (err < 0)
1202                 return ERR_PTR(err);
1203 
1204         return hci_get_cmd_complete(hdev, opcode, event);
1205 }
1206 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207 
1208 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1209                                const void *param, u32 timeout)
1210 {
1211         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1212 }
1213 EXPORT_SYMBOL(__hci_cmd_sync);
1214 
1215 /* Execute request and wait for completion. */
1216 static int __hci_req_sync(struct hci_dev *hdev,
1217                           void (*func)(struct hci_request *req,
1218                                       unsigned long opt),
1219                           unsigned long opt, __u32 timeout)
1220 {
1221         struct hci_request req;
1222         DECLARE_WAITQUEUE(wait, current);
1223         int err = 0;
1224 
1225         BT_DBG("%s start", hdev->name);
1226 
1227         hci_req_init(&req, hdev);
1228 
1229         hdev->req_status = HCI_REQ_PEND;
1230 
1231         func(&req, opt);
1232 
1233         add_wait_queue(&hdev->req_wait_q, &wait);
1234         set_current_state(TASK_INTERRUPTIBLE);
1235 
1236         err = hci_req_run(&req, hci_req_sync_complete);
1237         if (err < 0) {
1238                 hdev->req_status = 0;
1239 
1240                 remove_wait_queue(&hdev->req_wait_q, &wait);
1241                 set_current_state(TASK_RUNNING);
1242 
1243                 /* ENODATA means the HCI request command queue is empty.
1244                  * This can happen when a request with conditionals doesn't
1245                  * trigger any commands to be sent. This is normal behavior
1246                  * and should not trigger an error return.
1247                  */
1248                 if (err == -ENODATA)
1249                         return 0;
1250 
1251                 return err;
1252         }
1253 
1254         schedule_timeout(timeout);
1255 
1256         remove_wait_queue(&hdev->req_wait_q, &wait);
1257 
1258         if (signal_pending(current))
1259                 return -EINTR;
1260 
1261         switch (hdev->req_status) {
1262         case HCI_REQ_DONE:
1263                 err = -bt_to_errno(hdev->req_result);
1264                 break;
1265 
1266         case HCI_REQ_CANCELED:
1267                 err = -hdev->req_result;
1268                 break;
1269 
1270         default:
1271                 err = -ETIMEDOUT;
1272                 break;
1273         }
1274 
1275         hdev->req_status = hdev->req_result = 0;
1276 
1277         BT_DBG("%s end: err %d", hdev->name, err);
1278 
1279         return err;
1280 }
1281 
1282 static int hci_req_sync(struct hci_dev *hdev,
1283                         void (*req)(struct hci_request *req,
1284                                     unsigned long opt),
1285                         unsigned long opt, __u32 timeout)
1286 {
1287         int ret;
1288 
1289         if (!test_bit(HCI_UP, &hdev->flags))
1290                 return -ENETDOWN;
1291 
1292         /* Serialize all requests */
1293         hci_req_lock(hdev);
1294         ret = __hci_req_sync(hdev, req, opt, timeout);
1295         hci_req_unlock(hdev);
1296 
1297         return ret;
1298 }
1299 
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302         BT_DBG("%s %ld", req->hdev->name, opt);
1303 
1304         /* Reset device */
1305         set_bit(HCI_RESET, &req->hdev->flags);
1306         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308 
1309 static void bredr_init(struct hci_request *req)
1310 {
1311         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312 
1313         /* Read Local Supported Features */
1314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315 
1316         /* Read Local Version */
1317         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318 
1319         /* Read BD Address */
1320         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322 
1323 static void amp_init(struct hci_request *req)
1324 {
1325         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326 
1327         /* Read Local Version */
1328         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329 
1330         /* Read Local Supported Commands */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332 
1333         /* Read Local Supported Features */
1334         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335 
1336         /* Read Local AMP Info */
1337         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338 
1339         /* Read Data Blk size */
1340         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341 
1342         /* Read Flow Control Mode */
1343         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344 
1345         /* Read Location Data */
1346         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348 
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351         struct hci_dev *hdev = req->hdev;
1352 
1353         BT_DBG("%s %ld", hdev->name, opt);
1354 
1355         /* Reset */
1356         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357                 hci_reset_req(req, 0);
1358 
1359         switch (hdev->dev_type) {
1360         case HCI_BREDR:
1361                 bredr_init(req);
1362                 break;
1363 
1364         case HCI_AMP:
1365                 amp_init(req);
1366                 break;
1367 
1368         default:
1369                 BT_ERR("Unknown device type %d", hdev->dev_type);
1370                 break;
1371         }
1372 }
1373 
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376         __le16 param;
1377         __u8 flt_type;
1378 
1379         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1380         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1381 
1382         /* Read Class of Device */
1383         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1384 
1385         /* Read Local Name */
1386         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1387 
1388         /* Read Voice Setting */
1389         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1390 
1391         /* Read Number of Supported IAC */
1392         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1393 
1394         /* Read Current IAC LAP */
1395         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1396 
1397         /* Clear Event Filters */
1398         flt_type = HCI_FLT_CLEAR_ALL;
1399         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1400 
1401         /* Connection accept timeout ~20 secs */
1402         param = cpu_to_le16(0x7d00);
1403         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1404 }
1405 
1406 static void le_setup(struct hci_request *req)
1407 {
1408         struct hci_dev *hdev = req->hdev;
1409 
1410         /* Read LE Buffer Size */
1411         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1412 
1413         /* Read LE Local Supported Features */
1414         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1415 
1416         /* Read LE Supported States */
1417         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1418 
1419         /* Read LE White List Size */
1420         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1421 
1422         /* Clear LE White List */
1423         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1424 
1425         /* LE-only controllers have LE implicitly enabled */
1426         if (!lmp_bredr_capable(hdev))
1427                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1428 }
1429 
1430 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1431 {
1432         if (lmp_ext_inq_capable(hdev))
1433                 return 0x02;
1434 
1435         if (lmp_inq_rssi_capable(hdev))
1436                 return 0x01;
1437 
1438         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1439             hdev->lmp_subver == 0x0757)
1440                 return 0x01;
1441 
1442         if (hdev->manufacturer == 15) {
1443                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1444                         return 0x01;
1445                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1446                         return 0x01;
1447                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1448                         return 0x01;
1449         }
1450 
1451         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1452             hdev->lmp_subver == 0x1805)
1453                 return 0x01;
1454 
1455         return 0x00;
1456 }
1457 
1458 static void hci_setup_inquiry_mode(struct hci_request *req)
1459 {
1460         u8 mode;
1461 
1462         mode = hci_get_inquiry_mode(req->hdev);
1463 
1464         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1465 }
1466 
1467 static void hci_setup_event_mask(struct hci_request *req)
1468 {
1469         struct hci_dev *hdev = req->hdev;
1470 
1471         /* The second byte is 0xff instead of 0x9f (two reserved bits
1472          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1473          * command otherwise.
1474          */
1475         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1476 
1477         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1478          * any event mask for pre 1.2 devices.
1479          */
1480         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1481                 return;
1482 
1483         if (lmp_bredr_capable(hdev)) {
1484                 events[4] |= 0x01; /* Flow Specification Complete */
1485                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1486                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1487                 events[5] |= 0x08; /* Synchronous Connection Complete */
1488                 events[5] |= 0x10; /* Synchronous Connection Changed */
1489         } else {
1490                 /* Use a different default for LE-only devices */
1491                 memset(events, 0, sizeof(events));
1492                 events[0] |= 0x10; /* Disconnection Complete */
1493                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1494                 events[1] |= 0x20; /* Command Complete */
1495                 events[1] |= 0x40; /* Command Status */
1496                 events[1] |= 0x80; /* Hardware Error */
1497                 events[2] |= 0x04; /* Number of Completed Packets */
1498                 events[3] |= 0x02; /* Data Buffer Overflow */
1499 
1500                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1501                         events[0] |= 0x80; /* Encryption Change */
1502                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1503                 }
1504         }
1505 
1506         if (lmp_inq_rssi_capable(hdev))
1507                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1508 
1509         if (lmp_sniffsubr_capable(hdev))
1510                 events[5] |= 0x20; /* Sniff Subrating */
1511 
1512         if (lmp_pause_enc_capable(hdev))
1513                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1514 
1515         if (lmp_ext_inq_capable(hdev))
1516                 events[5] |= 0x40; /* Extended Inquiry Result */
1517 
1518         if (lmp_no_flush_capable(hdev))
1519                 events[7] |= 0x01; /* Enhanced Flush Complete */
1520 
1521         if (lmp_lsto_capable(hdev))
1522                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1523 
1524         if (lmp_ssp_capable(hdev)) {
1525                 events[6] |= 0x01;      /* IO Capability Request */
1526                 events[6] |= 0x02;      /* IO Capability Response */
1527                 events[6] |= 0x04;      /* User Confirmation Request */
1528                 events[6] |= 0x08;      /* User Passkey Request */
1529                 events[6] |= 0x10;      /* Remote OOB Data Request */
1530                 events[6] |= 0x20;      /* Simple Pairing Complete */
1531                 events[7] |= 0x04;      /* User Passkey Notification */
1532                 events[7] |= 0x08;      /* Keypress Notification */
1533                 events[7] |= 0x10;      /* Remote Host Supported
1534                                          * Features Notification
1535                                          */
1536         }
1537 
1538         if (lmp_le_capable(hdev))
1539                 events[7] |= 0x20;      /* LE Meta-Event */
1540 
1541         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1542 }
1543 
1544 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1545 {
1546         struct hci_dev *hdev = req->hdev;
1547 
1548         if (lmp_bredr_capable(hdev))
1549                 bredr_setup(req);
1550         else
1551                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1552 
1553         if (lmp_le_capable(hdev))
1554                 le_setup(req);
1555 
1556         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1557          * local supported commands HCI command.
1558          */
1559         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1561 
1562         if (lmp_ssp_capable(hdev)) {
1563                 /* When SSP is available, then the host features page
1564                  * should also be available as well. However some
1565                  * controllers list the max_page as 0 as long as SSP
1566                  * has not been enabled. To achieve proper debugging
1567                  * output, force the minimum max_page to 1 at least.
1568                  */
1569                 hdev->max_page = 0x01;
1570 
1571                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1572                         u8 mode = 0x01;
1573                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1574                                     sizeof(mode), &mode);
1575                 } else {
1576                         struct hci_cp_write_eir cp;
1577 
1578                         memset(hdev->eir, 0, sizeof(hdev->eir));
1579                         memset(&cp, 0, sizeof(cp));
1580 
1581                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1582                 }
1583         }
1584 
1585         if (lmp_inq_rssi_capable(hdev))
1586                 hci_setup_inquiry_mode(req);
1587 
1588         if (lmp_inq_tx_pwr_capable(hdev))
1589                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1590 
1591         if (lmp_ext_feat_capable(hdev)) {
1592                 struct hci_cp_read_local_ext_features cp;
1593 
1594                 cp.page = 0x01;
1595                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1596                             sizeof(cp), &cp);
1597         }
1598 
1599         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1600                 u8 enable = 1;
1601                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1602                             &enable);
1603         }
1604 }
1605 
1606 static void hci_setup_link_policy(struct hci_request *req)
1607 {
1608         struct hci_dev *hdev = req->hdev;
1609         struct hci_cp_write_def_link_policy cp;
1610         u16 link_policy = 0;
1611 
1612         if (lmp_rswitch_capable(hdev))
1613                 link_policy |= HCI_LP_RSWITCH;
1614         if (lmp_hold_capable(hdev))
1615                 link_policy |= HCI_LP_HOLD;
1616         if (lmp_sniff_capable(hdev))
1617                 link_policy |= HCI_LP_SNIFF;
1618         if (lmp_park_capable(hdev))
1619                 link_policy |= HCI_LP_PARK;
1620 
1621         cp.policy = cpu_to_le16(link_policy);
1622         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1623 }
1624 
1625 static void hci_set_le_support(struct hci_request *req)
1626 {
1627         struct hci_dev *hdev = req->hdev;
1628         struct hci_cp_write_le_host_supported cp;
1629 
1630         /* LE-only devices do not support explicit enablement */
1631         if (!lmp_bredr_capable(hdev))
1632                 return;
1633 
1634         memset(&cp, 0, sizeof(cp));
1635 
1636         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1637                 cp.le = 0x01;
1638                 cp.simul = 0x00;
1639         }
1640 
1641         if (cp.le != lmp_host_le_capable(hdev))
1642                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1643                             &cp);
1644 }
1645 
1646 static void hci_set_event_mask_page_2(struct hci_request *req)
1647 {
1648         struct hci_dev *hdev = req->hdev;
1649         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1650 
1651         /* If Connectionless Slave Broadcast master role is supported
1652          * enable all necessary events for it.
1653          */
1654         if (lmp_csb_master_capable(hdev)) {
1655                 events[1] |= 0x40;      /* Triggered Clock Capture */
1656                 events[1] |= 0x80;      /* Synchronization Train Complete */
1657                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1658                 events[2] |= 0x20;      /* CSB Channel Map Change */
1659         }
1660 
1661         /* If Connectionless Slave Broadcast slave role is supported
1662          * enable all necessary events for it.
1663          */
1664         if (lmp_csb_slave_capable(hdev)) {
1665                 events[2] |= 0x01;      /* Synchronization Train Received */
1666                 events[2] |= 0x02;      /* CSB Receive */
1667                 events[2] |= 0x04;      /* CSB Timeout */
1668                 events[2] |= 0x08;      /* Truncated Page Complete */
1669         }
1670 
1671         /* Enable Authenticated Payload Timeout Expired event if supported */
1672         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1673                 events[2] |= 0x80;
1674 
1675         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1676 }
1677 
1678 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1679 {
1680         struct hci_dev *hdev = req->hdev;
1681         u8 p;
1682 
1683         hci_setup_event_mask(req);
1684 
1685         /* Some Broadcom based Bluetooth controllers do not support the
1686          * Delete Stored Link Key command. They are clearly indicating its
1687          * absence in the bit mask of supported commands.
1688          *
1689          * Check the supported commands and only if the the command is marked
1690          * as supported send it. If not supported assume that the controller
1691          * does not have actual support for stored link keys which makes this
1692          * command redundant anyway.
1693          *
1694          * Some controllers indicate that they support handling deleting
1695          * stored link keys, but they don't. The quirk lets a driver
1696          * just disable this command.
1697          */
1698         if (hdev->commands[6] & 0x80 &&
1699             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1700                 struct hci_cp_delete_stored_link_key cp;
1701 
1702                 bacpy(&cp.bdaddr, BDADDR_ANY);
1703                 cp.delete_all = 0x01;
1704                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1705                             sizeof(cp), &cp);
1706         }
1707 
1708         if (hdev->commands[5] & 0x10)
1709                 hci_setup_link_policy(req);
1710 
1711         if (hdev->commands[8] & 0x01)
1712                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1713 
1714         /* Some older Broadcom based Bluetooth 1.2 controllers do not
1715          * support the Read Page Scan Type command. Check support for
1716          * this command in the bit mask of supported commands.
1717          */
1718         if (hdev->commands[13] & 0x01)
1719                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1720 
1721         if (lmp_le_capable(hdev)) {
1722                 u8 events[8];
1723 
1724                 memset(events, 0, sizeof(events));
1725                 events[0] = 0x0f;
1726 
1727                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728                         events[0] |= 0x10;      /* LE Long Term Key Request */
1729 
1730                 /* If controller supports the Connection Parameters Request
1731                  * Link Layer Procedure, enable the corresponding event.
1732                  */
1733                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734                         events[0] |= 0x20;      /* LE Remote Connection
1735                                                  * Parameter Request
1736                                                  */
1737 
1738                 /* If the controller supports Extended Scanner Filter
1739                  * Policies, enable the correspondig event.
1740                  */
1741                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742                         events[1] |= 0x04;      /* LE Direct Advertising
1743                                                  * Report
1744                                                  */
1745 
1746                 /* If the controller supports the LE Read Local P-256
1747                  * Public Key command, enable the corresponding event.
1748                  */
1749                 if (hdev->commands[34] & 0x02)
1750                         events[0] |= 0x80;      /* LE Read Local P-256
1751                                                  * Public Key Complete
1752                                                  */
1753 
1754                 /* If the controller supports the LE Generate DHKey
1755                  * command, enable the corresponding event.
1756                  */
1757                 if (hdev->commands[34] & 0x04)
1758                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
1759 
1760                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1761                             events);
1762 
1763                 if (hdev->commands[25] & 0x40) {
1764                         /* Read LE Advertising Channel TX Power */
1765                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1766                 }
1767 
1768                 hci_set_le_support(req);
1769         }
1770 
1771         /* Read features beyond page 1 if available */
1772         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1773                 struct hci_cp_read_local_ext_features cp;
1774 
1775                 cp.page = p;
1776                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1777                             sizeof(cp), &cp);
1778         }
1779 }
1780 
1781 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1782 {
1783         struct hci_dev *hdev = req->hdev;
1784 
1785         /* Set event mask page 2 if the HCI command for it is supported */
1786         if (hdev->commands[22] & 0x04)
1787                 hci_set_event_mask_page_2(req);
1788 
1789         /* Read local codec list if the HCI command is supported */
1790         if (hdev->commands[29] & 0x20)
1791                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1792 
1793         /* Get MWS transport configuration if the HCI command is supported */
1794         if (hdev->commands[30] & 0x08)
1795                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1796 
1797         /* Check for Synchronization Train support */
1798         if (lmp_sync_train_capable(hdev))
1799                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1800 
1801         /* Enable Secure Connections if supported and configured */
1802         if (bredr_sc_enabled(hdev)) {
1803                 u8 support = 0x01;
1804                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805                             sizeof(support), &support);
1806         }
1807 }
1808 
1809 static int __hci_init(struct hci_dev *hdev)
1810 {
1811         int err;
1812 
1813         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1814         if (err < 0)
1815                 return err;
1816 
1817         /* The Device Under Test (DUT) mode is special and available for
1818          * all controller types. So just create it early on.
1819          */
1820         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1821                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1822                                     &dut_mode_fops);
1823         }
1824 
1825         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1826          * BR/EDR/LE type controllers. AMP controllers only need the
1827          * first stage init.
1828          */
1829         if (hdev->dev_type != HCI_BREDR)
1830                 return 0;
1831 
1832         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1833         if (err < 0)
1834                 return err;
1835 
1836         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1837         if (err < 0)
1838                 return err;
1839 
1840         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1841         if (err < 0)
1842                 return err;
1843 
1844         /* Only create debugfs entries during the initial setup
1845          * phase and not every time the controller gets powered on.
1846          */
1847         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1848                 return 0;
1849 
1850         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1851                             &features_fops);
1852         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853                            &hdev->manufacturer);
1854         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1856         debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1857                             &device_list_fops);
1858         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1859                             &blacklist_fops);
1860         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1861 
1862         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863                             &conn_info_min_age_fops);
1864         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865                             &conn_info_max_age_fops);
1866 
1867         if (lmp_bredr_capable(hdev)) {
1868                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869                                     hdev, &inquiry_cache_fops);
1870                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871                                     hdev, &link_keys_fops);
1872                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873                                     hdev, &dev_class_fops);
1874                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875                                     hdev, &voice_setting_fops);
1876         }
1877 
1878         if (lmp_ssp_capable(hdev)) {
1879                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1880                                     hdev, &auto_accept_delay_fops);
1881                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882                                     hdev, &force_sc_support_fops);
1883                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884                                     hdev, &sc_only_mode_fops);
1885                 if (lmp_le_capable(hdev))
1886                         debugfs_create_file("force_lesc_support", 0644,
1887                                             hdev->debugfs, hdev,
1888                                             &force_lesc_support_fops);
1889         }
1890 
1891         if (lmp_sniff_capable(hdev)) {
1892                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893                                     hdev, &idle_timeout_fops);
1894                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895                                     hdev, &sniff_min_interval_fops);
1896                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897                                     hdev, &sniff_max_interval_fops);
1898         }
1899 
1900         if (lmp_le_capable(hdev)) {
1901                 debugfs_create_file("identity", 0400, hdev->debugfs,
1902                                     hdev, &identity_fops);
1903                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904                                     hdev, &rpa_timeout_fops);
1905                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906                                     hdev, &random_address_fops);
1907                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908                                     hdev, &static_address_fops);
1909 
1910                 /* For controllers with a public address, provide a debug
1911                  * option to force the usage of the configured static
1912                  * address. By default the public address is used.
1913                  */
1914                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915                         debugfs_create_file("force_static_address", 0644,
1916                                             hdev->debugfs, hdev,
1917                                             &force_static_address_fops);
1918 
1919                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920                                   &hdev->le_white_list_size);
1921                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1922                                     &white_list_fops);
1923                 debugfs_create_file("identity_resolving_keys", 0400,
1924                                     hdev->debugfs, hdev,
1925                                     &identity_resolving_keys_fops);
1926                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927                                     hdev, &long_term_keys_fops);
1928                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929                                     hdev, &conn_min_interval_fops);
1930                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931                                     hdev, &conn_max_interval_fops);
1932                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933                                     hdev, &conn_latency_fops);
1934                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935                                     hdev, &supervision_timeout_fops);
1936                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937                                     hdev, &adv_channel_map_fops);
1938                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939                                     hdev, &adv_min_interval_fops);
1940                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941                                     hdev, &adv_max_interval_fops);
1942                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1943                                    hdev->debugfs,
1944                                    &hdev->discov_interleaved_timeout);
1945 
1946                 smp_register(hdev);
1947         }
1948 
1949         return 0;
1950 }
1951 
1952 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1953 {
1954         struct hci_dev *hdev = req->hdev;
1955 
1956         BT_DBG("%s %ld", hdev->name, opt);
1957 
1958         /* Reset */
1959         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1960                 hci_reset_req(req, 0);
1961 
1962         /* Read Local Version */
1963         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1964 
1965         /* Read BD Address */
1966         if (hdev->set_bdaddr)
1967                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1968 }
1969 
1970 static int __hci_unconf_init(struct hci_dev *hdev)
1971 {
1972         int err;
1973 
1974         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975                 return 0;
1976 
1977         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1978         if (err < 0)
1979                 return err;
1980 
1981         return 0;
1982 }
1983 
1984 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1985 {
1986         __u8 scan = opt;
1987 
1988         BT_DBG("%s %x", req->hdev->name, scan);
1989 
1990         /* Inquiry and Page scans */
1991         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1992 }
1993 
1994 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1995 {
1996         __u8 auth = opt;
1997 
1998         BT_DBG("%s %x", req->hdev->name, auth);
1999 
2000         /* Authentication */
2001         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
2002 }
2003 
2004 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
2005 {
2006         __u8 encrypt = opt;
2007 
2008         BT_DBG("%s %x", req->hdev->name, encrypt);
2009 
2010         /* Encryption */
2011         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
2012 }
2013 
2014 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
2015 {
2016         __le16 policy = cpu_to_le16(opt);
2017 
2018         BT_DBG("%s %x", req->hdev->name, policy);
2019 
2020         /* Default link policy */
2021         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2022 }
2023 
2024 /* Get HCI device by index.
2025  * Device is held on return. */
2026 struct hci_dev *hci_dev_get(int index)
2027 {
2028         struct hci_dev *hdev = NULL, *d;
2029 
2030         BT_DBG("%d", index);
2031 
2032         if (index < 0)
2033                 return NULL;
2034 
2035         read_lock(&hci_dev_list_lock);
2036         list_for_each_entry(d, &hci_dev_list, list) {
2037                 if (d->id == index) {
2038                         hdev = hci_dev_hold(d);
2039                         break;
2040                 }
2041         }
2042         read_unlock(&hci_dev_list_lock);
2043         return hdev;
2044 }
2045 
2046 /* ---- Inquiry support ---- */
2047 
2048 bool hci_discovery_active(struct hci_dev *hdev)
2049 {
2050         struct discovery_state *discov = &hdev->discovery;
2051 
2052         switch (discov->state) {
2053         case DISCOVERY_FINDING:
2054         case DISCOVERY_RESOLVING:
2055                 return true;
2056 
2057         default:
2058                 return false;
2059         }
2060 }
2061 
2062 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2063 {
2064         int old_state = hdev->discovery.state;
2065 
2066         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2067 
2068         if (old_state == state)
2069                 return;
2070 
2071         hdev->discovery.state = state;
2072 
2073         switch (state) {
2074         case DISCOVERY_STOPPED:
2075                 hci_update_background_scan(hdev);
2076 
2077                 if (old_state != DISCOVERY_STARTING)
2078                         mgmt_discovering(hdev, 0);
2079                 break;
2080         case DISCOVERY_STARTING:
2081                 break;
2082         case DISCOVERY_FINDING:
2083                 mgmt_discovering(hdev, 1);
2084                 break;
2085         case DISCOVERY_RESOLVING:
2086                 break;
2087         case DISCOVERY_STOPPING:
2088                 break;
2089         }
2090 }
2091 
2092 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2093 {
2094         struct discovery_state *cache = &hdev->discovery;
2095         struct inquiry_entry *p, *n;
2096 
2097         list_for_each_entry_safe(p, n, &cache->all, all) {
2098                 list_del(&p->all);
2099                 kfree(p);
2100         }
2101 
2102         INIT_LIST_HEAD(&cache->unknown);
2103         INIT_LIST_HEAD(&cache->resolve);
2104 }
2105 
2106 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2107                                                bdaddr_t *bdaddr)
2108 {
2109         struct discovery_state *cache = &hdev->discovery;
2110         struct inquiry_entry *e;
2111 
2112         BT_DBG("cache %p, %pMR", cache, bdaddr);
2113 
2114         list_for_each_entry(e, &cache->all, all) {
2115                 if (!bacmp(&e->data.bdaddr, bdaddr))
2116                         return e;
2117         }
2118 
2119         return NULL;
2120 }
2121 
2122 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2123                                                        bdaddr_t *bdaddr)
2124 {
2125         struct discovery_state *cache = &hdev->discovery;
2126         struct inquiry_entry *e;
2127 
2128         BT_DBG("cache %p, %pMR", cache, bdaddr);
2129 
2130         list_for_each_entry(e, &cache->unknown, list) {
2131                 if (!bacmp(&e->data.bdaddr, bdaddr))
2132                         return e;
2133         }
2134 
2135         return NULL;
2136 }
2137 
2138 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2139                                                        bdaddr_t *bdaddr,
2140                                                        int state)
2141 {
2142         struct discovery_state *cache = &hdev->discovery;
2143         struct inquiry_entry *e;
2144 
2145         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2146 
2147         list_for_each_entry(e, &cache->resolve, list) {
2148                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2149                         return e;
2150                 if (!bacmp(&e->data.bdaddr, bdaddr))
2151                         return e;
2152         }
2153 
2154         return NULL;
2155 }
2156 
2157 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2158                                       struct inquiry_entry *ie)
2159 {
2160         struct discovery_state *cache = &hdev->discovery;
2161         struct list_head *pos = &cache->resolve;
2162         struct inquiry_entry *p;
2163 
2164         list_del(&ie->list);
2165 
2166         list_for_each_entry(p, &cache->resolve, list) {
2167                 if (p->name_state != NAME_PENDING &&
2168                     abs(p->data.rssi) >= abs(ie->data.rssi))
2169                         break;
2170                 pos = &p->list;
2171         }
2172 
2173         list_add(&ie->list, pos);
2174 }
2175 
2176 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2177                              bool name_known)
2178 {
2179         struct discovery_state *cache = &hdev->discovery;
2180         struct inquiry_entry *ie;
2181         u32 flags = 0;
2182 
2183         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2184 
2185         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2186 
2187         if (!data->ssp_mode)
2188                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2189 
2190         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2191         if (ie) {
2192                 if (!ie->data.ssp_mode)
2193                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2194 
2195                 if (ie->name_state == NAME_NEEDED &&
2196                     data->rssi != ie->data.rssi) {
2197                         ie->data.rssi = data->rssi;
2198                         hci_inquiry_cache_update_resolve(hdev, ie);
2199                 }
2200 
2201                 goto update;
2202         }
2203 
2204         /* Entry not in the cache. Add new one. */
2205         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2206         if (!ie) {
2207                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2208                 goto done;
2209         }
2210 
2211         list_add(&ie->all, &cache->all);
2212 
2213         if (name_known) {
2214                 ie->name_state = NAME_KNOWN;
2215         } else {
2216                 ie->name_state = NAME_NOT_KNOWN;
2217                 list_add(&ie->list, &cache->unknown);
2218         }
2219 
2220 update:
2221         if (name_known && ie->name_state != NAME_KNOWN &&
2222             ie->name_state != NAME_PENDING) {
2223                 ie->name_state = NAME_KNOWN;
2224                 list_del(&ie->list);
2225         }
2226 
2227         memcpy(&ie->data, data, sizeof(*data));
2228         ie->timestamp = jiffies;
2229         cache->timestamp = jiffies;
2230 
2231         if (ie->name_state == NAME_NOT_KNOWN)
2232                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2233 
2234 done:
2235         return flags;
2236 }
2237 
2238 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2239 {
2240         struct discovery_state *cache = &hdev->discovery;
2241         struct inquiry_info *info = (struct inquiry_info *) buf;
2242         struct inquiry_entry *e;
2243         int copied = 0;
2244 
2245         list_for_each_entry(e, &cache->all, all) {
2246                 struct inquiry_data *data = &e->data;
2247 
2248                 if (copied >= num)
2249                         break;
2250 
2251                 bacpy(&info->bdaddr, &data->bdaddr);
2252                 info->pscan_rep_mode    = data->pscan_rep_mode;
2253                 info->pscan_period_mode = data->pscan_period_mode;
2254                 info->pscan_mode        = data->pscan_mode;
2255                 memcpy(info->dev_class, data->dev_class, 3);
2256                 info->clock_offset      = data->clock_offset;
2257 
2258                 info++;
2259                 copied++;
2260         }
2261 
2262         BT_DBG("cache %p, copied %d", cache, copied);
2263         return copied;
2264 }
2265 
2266 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2267 {
2268         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2269         struct hci_dev *hdev = req->hdev;
2270         struct hci_cp_inquiry cp;
2271 
2272         BT_DBG("%s", hdev->name);
2273 
2274         if (test_bit(HCI_INQUIRY, &hdev->flags))
2275                 return;
2276 
2277         /* Start Inquiry */
2278         memcpy(&cp.lap, &ir->lap, 3);
2279         cp.length  = ir->length;
2280         cp.num_rsp = ir->num_rsp;
2281         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2282 }
2283 
2284 int hci_inquiry(void __user *arg)
2285 {
2286         __u8 __user *ptr = arg;
2287         struct hci_inquiry_req ir;
2288         struct hci_dev *hdev;
2289         int err = 0, do_inquiry = 0, max_rsp;
2290         long timeo;
2291         __u8 *buf;
2292 
2293         if (copy_from_user(&ir, ptr, sizeof(ir)))
2294                 return -EFAULT;
2295 
2296         hdev = hci_dev_get(ir.dev_id);
2297         if (!hdev)
2298                 return -ENODEV;
2299 
2300         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2301                 err = -EBUSY;
2302                 goto done;
2303         }
2304 
2305         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2306                 err = -EOPNOTSUPP;
2307                 goto done;
2308         }
2309 
2310         if (hdev->dev_type != HCI_BREDR) {
2311                 err = -EOPNOTSUPP;
2312                 goto done;
2313         }
2314 
2315         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2316                 err = -EOPNOTSUPP;
2317                 goto done;
2318         }
2319 
2320         hci_dev_lock(hdev);
2321         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2322             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2323                 hci_inquiry_cache_flush(hdev);
2324                 do_inquiry = 1;
2325         }
2326         hci_dev_unlock(hdev);
2327 
2328         timeo = ir.length * msecs_to_jiffies(2000);
2329 
2330         if (do_inquiry) {
2331                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2332                                    timeo);
2333                 if (err < 0)
2334                         goto done;
2335 
2336                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2337                  * cleared). If it is interrupted by a signal, return -EINTR.
2338                  */
2339                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2340                                 TASK_INTERRUPTIBLE))
2341                         return -EINTR;
2342         }
2343 
2344         /* for unlimited number of responses we will use buffer with
2345          * 255 entries
2346          */
2347         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2348 
2349         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2350          * copy it to the user space.
2351          */
2352         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2353         if (!buf) {
2354                 err = -ENOMEM;
2355                 goto done;
2356         }
2357 
2358         hci_dev_lock(hdev);
2359         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2360         hci_dev_unlock(hdev);
2361 
2362         BT_DBG("num_rsp %d", ir.num_rsp);
2363 
2364         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2365                 ptr += sizeof(ir);
2366                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2367                                  ir.num_rsp))
2368                         err = -EFAULT;
2369         } else
2370                 err = -EFAULT;
2371 
2372         kfree(buf);
2373 
2374 done:
2375         hci_dev_put(hdev);
2376         return err;
2377 }
2378 
2379 static int hci_dev_do_open(struct hci_dev *hdev)
2380 {
2381         int ret = 0;
2382 
2383         BT_DBG("%s %p", hdev->name, hdev);
2384 
2385         hci_req_lock(hdev);
2386 
2387         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2388                 ret = -ENODEV;
2389                 goto done;
2390         }
2391 
2392         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2393             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394                 /* Check for rfkill but allow the HCI setup stage to
2395                  * proceed (which in itself doesn't cause any RF activity).
2396                  */
2397                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2398                         ret = -ERFKILL;
2399                         goto done;
2400                 }
2401 
2402                 /* Check for valid public address or a configured static
2403                  * random adddress, but let the HCI setup proceed to
2404                  * be able to determine if there is a public address
2405                  * or not.
2406                  *
2407                  * In case of user channel usage, it is not important
2408                  * if a public address or static random address is
2409                  * available.
2410                  *
2411                  * This check is only valid for BR/EDR controllers
2412                  * since AMP controllers do not have an address.
2413                  */
2414                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2415                     hdev->dev_type == HCI_BREDR &&
2416                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2417                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2418                         ret = -EADDRNOTAVAIL;
2419                         goto done;
2420                 }
2421         }
2422 
2423         if (test_bit(HCI_UP, &hdev->flags)) {
2424                 ret = -EALREADY;
2425                 goto done;
2426         }
2427 
2428         if (hdev->open(hdev)) {
2429                 ret = -EIO;
2430                 goto done;
2431         }
2432 
2433         atomic_set(&hdev->cmd_cnt, 1);
2434         set_bit(HCI_INIT, &hdev->flags);
2435 
2436         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2437                 if (hdev->setup)
2438                         ret = hdev->setup(hdev);
2439 
2440                 /* The transport driver can set these quirks before
2441                  * creating the HCI device or in its setup callback.
2442                  *
2443                  * In case any of them is set, the controller has to
2444                  * start up as unconfigured.
2445                  */
2446                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2447                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2448                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2449 
2450                 /* For an unconfigured controller it is required to
2451                  * read at least the version information provided by
2452                  * the Read Local Version Information command.
2453                  *
2454                  * If the set_bdaddr driver callback is provided, then
2455                  * also the original Bluetooth public device address
2456                  * will be read using the Read BD Address command.
2457                  */
2458                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2459                         ret = __hci_unconf_init(hdev);
2460         }
2461 
2462         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2463                 /* If public address change is configured, ensure that
2464                  * the address gets programmed. If the driver does not
2465                  * support changing the public address, fail the power
2466                  * on procedure.
2467                  */
2468                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2469                     hdev->set_bdaddr)
2470                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2471                 else
2472                         ret = -EADDRNOTAVAIL;
2473         }
2474 
2475         if (!ret) {
2476                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2478                         ret = __hci_init(hdev);
2479         }
2480 
2481         clear_bit(HCI_INIT, &hdev->flags);
2482 
2483         if (!ret) {
2484                 hci_dev_hold(hdev);
2485                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2486                 set_bit(HCI_UP, &hdev->flags);
2487                 hci_notify(hdev, HCI_DEV_UP);
2488                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2489                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2490                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2491                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2492                     hdev->dev_type == HCI_BREDR) {
2493                         hci_dev_lock(hdev);
2494                         mgmt_powered(hdev, 1);
2495                         hci_dev_unlock(hdev);
2496                 }
2497         } else {
2498                 /* Init failed, cleanup */
2499                 flush_work(&hdev->tx_work);
2500                 flush_work(&hdev->cmd_work);
2501                 flush_work(&hdev->rx_work);
2502 
2503                 skb_queue_purge(&hdev->cmd_q);
2504                 skb_queue_purge(&hdev->rx_q);
2505 
2506                 if (hdev->flush)
2507                         hdev->flush(hdev);
2508 
2509                 if (hdev->sent_cmd) {
2510                         kfree_skb(hdev->sent_cmd);
2511                         hdev->sent_cmd = NULL;
2512                 }
2513 
2514                 hdev->close(hdev);
2515                 hdev->flags &= BIT(HCI_RAW);
2516         }
2517 
2518 done:
2519         hci_req_unlock(hdev);
2520         return ret;
2521 }
2522 
2523 /* ---- HCI ioctl helpers ---- */
2524 
2525 int hci_dev_open(__u16 dev)
2526 {
2527         struct hci_dev *hdev;
2528         int err;
2529 
2530         hdev = hci_dev_get(dev);
2531         if (!hdev)
2532                 return -ENODEV;
2533 
2534         /* Devices that are marked as unconfigured can only be powered
2535          * up as user channel. Trying to bring them up as normal devices
2536          * will result into a failure. Only user channel operation is
2537          * possible.
2538          *
2539          * When this function is called for a user channel, the flag
2540          * HCI_USER_CHANNEL will be set first before attempting to
2541          * open the device.
2542          */
2543         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2544             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545                 err = -EOPNOTSUPP;
2546                 goto done;
2547         }
2548 
2549         /* We need to ensure that no other power on/off work is pending
2550          * before proceeding to call hci_dev_do_open. This is
2551          * particularly important if the setup procedure has not yet
2552          * completed.
2553          */
2554         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2555                 cancel_delayed_work(&hdev->power_off);
2556 
2557         /* After this call it is guaranteed that the setup procedure
2558          * has finished. This means that error conditions like RFKILL
2559          * or no valid public or static random address apply.
2560          */
2561         flush_workqueue(hdev->req_workqueue);
2562 
2563         /* For controllers not using the management interface and that
2564          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2565          * so that pairing works for them. Once the management interface
2566          * is in use this bit will be cleared again and userspace has
2567          * to explicitly enable it.
2568          */
2569         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2570             !test_bit(HCI_MGMT, &hdev->dev_flags))
2571                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2572 
2573         err = hci_dev_do_open(hdev);
2574 
2575 done:
2576         hci_dev_put(hdev);
2577         return err;
2578 }
2579 
2580 /* This function requires the caller holds hdev->lock */
2581 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2582 {
2583         struct hci_conn_params *p;
2584 
2585         list_for_each_entry(p, &hdev->le_conn_params, list) {
2586                 if (p->conn) {
2587                         hci_conn_drop(p->conn);
2588                         hci_conn_put(p->conn);
2589                         p->conn = NULL;
2590                 }
2591                 list_del_init(&p->action);
2592         }
2593 
2594         BT_DBG("All LE pending actions cleared");
2595 }
2596 
2597 static int hci_dev_do_close(struct hci_dev *hdev)
2598 {
2599         BT_DBG("%s %p", hdev->name, hdev);
2600 
2601         cancel_delayed_work(&hdev->power_off);
2602 
2603         hci_req_cancel(hdev, ENODEV);
2604         hci_req_lock(hdev);
2605 
2606         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2607                 cancel_delayed_work_sync(&hdev->cmd_timer);
2608                 hci_req_unlock(hdev);
2609                 return 0;
2610         }
2611 
2612         /* Flush RX and TX works */
2613         flush_work(&hdev->tx_work);
2614         flush_work(&hdev->rx_work);
2615 
2616         if (hdev->discov_timeout > 0) {
2617                 cancel_delayed_work(&hdev->discov_off);
2618                 hdev->discov_timeout = 0;
2619                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2620                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2621         }
2622 
2623         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2624                 cancel_delayed_work(&hdev->service_cache);
2625 
2626         cancel_delayed_work_sync(&hdev->le_scan_disable);
2627 
2628         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629                 cancel_delayed_work_sync(&hdev->rpa_expired);
2630 
2631         /* Avoid potential lockdep warnings from the *_flush() calls by
2632          * ensuring the workqueue is empty up front.
2633          */
2634         drain_workqueue(hdev->workqueue);
2635 
2636         hci_dev_lock(hdev);
2637 
2638         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2639                 if (hdev->dev_type == HCI_BREDR)
2640                         mgmt_powered(hdev, 0);
2641         }
2642 
2643         hci_inquiry_cache_flush(hdev);
2644         hci_pend_le_actions_clear(hdev);
2645         hci_conn_hash_flush(hdev);
2646         hci_dev_unlock(hdev);
2647 
2648         hci_notify(hdev, HCI_DEV_DOWN);
2649 
2650         if (hdev->flush)
2651                 hdev->flush(hdev);
2652 
2653         /* Reset device */
2654         skb_queue_purge(&hdev->cmd_q);
2655         atomic_set(&hdev->cmd_cnt, 1);
2656         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2657             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2658             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2659                 set_bit(HCI_INIT, &hdev->flags);
2660                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2661                 clear_bit(HCI_INIT, &hdev->flags);
2662         }
2663 
2664         /* flush cmd  work */
2665         flush_work(&hdev->cmd_work);
2666 
2667         /* Drop queues */
2668         skb_queue_purge(&hdev->rx_q);
2669         skb_queue_purge(&hdev->cmd_q);
2670         skb_queue_purge(&hdev->raw_q);
2671 
2672         /* Drop last sent command */
2673         if (hdev->sent_cmd) {
2674                 cancel_delayed_work_sync(&hdev->cmd_timer);
2675                 kfree_skb(hdev->sent_cmd);
2676                 hdev->sent_cmd = NULL;
2677         }
2678 
2679         kfree_skb(hdev->recv_evt);
2680         hdev->recv_evt = NULL;
2681 
2682         /* After this point our queues are empty
2683          * and no tasks are scheduled. */
2684         hdev->close(hdev);
2685 
2686         /* Clear flags */
2687         hdev->flags &= BIT(HCI_RAW);
2688         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2689 
2690         /* Controller radio is available but is currently powered down */
2691         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2692 
2693         memset(hdev->eir, 0, sizeof(hdev->eir));
2694         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2695         bacpy(&hdev->random_addr, BDADDR_ANY);
2696 
2697         hci_req_unlock(hdev);
2698 
2699         hci_dev_put(hdev);
2700         return 0;
2701 }
2702 
2703 int hci_dev_close(__u16 dev)
2704 {
2705         struct hci_dev *hdev;
2706         int err;
2707 
2708         hdev = hci_dev_get(dev);
2709         if (!hdev)
2710                 return -ENODEV;
2711 
2712         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2713                 err = -EBUSY;
2714                 goto done;
2715         }
2716 
2717         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2718                 cancel_delayed_work(&hdev->power_off);
2719 
2720         err = hci_dev_do_close(hdev);
2721 
2722 done:
2723         hci_dev_put(hdev);
2724         return err;
2725 }
2726 
2727 int hci_dev_reset(__u16 dev)
2728 {
2729         struct hci_dev *hdev;
2730         int ret = 0;
2731 
2732         hdev = hci_dev_get(dev);
2733         if (!hdev)
2734                 return -ENODEV;
2735 
2736         hci_req_lock(hdev);
2737 
2738         if (!test_bit(HCI_UP, &hdev->flags)) {
2739                 ret = -ENETDOWN;
2740                 goto done;
2741         }
2742 
2743         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2744                 ret = -EBUSY;
2745                 goto done;
2746         }
2747 
2748         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2749                 ret = -EOPNOTSUPP;
2750                 goto done;
2751         }
2752 
2753         /* Drop queues */
2754         skb_queue_purge(&hdev->rx_q);
2755         skb_queue_purge(&hdev->cmd_q);
2756 
2757         /* Avoid potential lockdep warnings from the *_flush() calls by
2758          * ensuring the workqueue is empty up front.
2759          */
2760         drain_workqueue(hdev->workqueue);
2761 
2762         hci_dev_lock(hdev);
2763         hci_inquiry_cache_flush(hdev);
2764         hci_conn_hash_flush(hdev);
2765         hci_dev_unlock(hdev);
2766 
2767         if (hdev->flush)
2768                 hdev->flush(hdev);
2769 
2770         atomic_set(&hdev->cmd_cnt, 1);
2771         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2772 
2773         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2774 
2775 done:
2776         hci_req_unlock(hdev);
2777         hci_dev_put(hdev);
2778         return ret;
2779 }
2780 
2781 int hci_dev_reset_stat(__u16 dev)
2782 {
2783         struct hci_dev *hdev;
2784         int ret = 0;
2785 
2786         hdev = hci_dev_get(dev);
2787         if (!hdev)
2788                 return -ENODEV;
2789 
2790         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2791                 ret = -EBUSY;
2792                 goto done;
2793         }
2794 
2795         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2796                 ret = -EOPNOTSUPP;
2797                 goto done;
2798         }
2799 
2800         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2801 
2802 done:
2803         hci_dev_put(hdev);
2804         return ret;
2805 }
2806 
2807 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2808 {
2809         bool conn_changed, discov_changed;
2810 
2811         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2812 
2813         if ((scan & SCAN_PAGE))
2814                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2815                                                  &hdev->dev_flags);
2816         else
2817                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2818                                                   &hdev->dev_flags);
2819 
2820         if ((scan & SCAN_INQUIRY)) {
2821                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2822                                                    &hdev->dev_flags);
2823         } else {
2824                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2825                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2826                                                     &hdev->dev_flags);
2827         }
2828 
2829         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2830                 return;
2831 
2832         if (conn_changed || discov_changed) {
2833                 /* In case this was disabled through mgmt */
2834                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2835 
2836                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2837                         mgmt_update_adv_data(hdev);
2838 
2839                 mgmt_new_settings(hdev);
2840         }
2841 }
2842 
2843 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2844 {
2845         struct hci_dev *hdev;
2846         struct hci_dev_req dr;
2847         int err = 0;
2848 
2849         if (copy_from_user(&dr, arg, sizeof(dr)))
2850                 return -EFAULT;
2851 
2852         hdev = hci_dev_get(dr.dev_id);
2853         if (!hdev)
2854                 return -ENODEV;
2855 
2856         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2857                 err = -EBUSY;
2858                 goto done;
2859         }
2860 
2861         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2862                 err = -EOPNOTSUPP;
2863                 goto done;
2864         }
2865 
2866         if (hdev->dev_type != HCI_BREDR) {
2867                 err = -EOPNOTSUPP;
2868                 goto done;
2869         }
2870 
2871         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2872                 err = -EOPNOTSUPP;
2873                 goto done;
2874         }
2875 
2876         switch (cmd) {
2877         case HCISETAUTH:
2878                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2879                                    HCI_INIT_TIMEOUT);
2880                 break;
2881 
2882         case HCISETENCRYPT:
2883                 if (!lmp_encrypt_capable(hdev)) {
2884                         err = -EOPNOTSUPP;
2885                         break;
2886                 }
2887 
2888                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2889                         /* Auth must be enabled first */
2890                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2891                                            HCI_INIT_TIMEOUT);
2892                         if (err)
2893                                 break;
2894                 }
2895 
2896                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2897                                    HCI_INIT_TIMEOUT);
2898                 break;
2899 
2900         case HCISETSCAN:
2901                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2902                                    HCI_INIT_TIMEOUT);
2903 
2904                 /* Ensure that the connectable and discoverable states
2905                  * get correctly modified as this was a non-mgmt change.
2906                  */
2907                 if (!err)
2908                         hci_update_scan_state(hdev, dr.dev_opt);
2909                 break;
2910 
2911         case HCISETLINKPOL:
2912                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2913                                    HCI_INIT_TIMEOUT);
2914                 break;
2915 
2916         case HCISETLINKMODE:
2917                 hdev->link_mode = ((__u16) dr.dev_opt) &
2918                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2919                 break;
2920 
2921         case HCISETPTYPE:
2922                 hdev->pkt_type = (__u16) dr.dev_opt;
2923                 break;
2924 
2925         case HCISETACLMTU:
2926                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2927                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2928                 break;
2929 
2930         case HCISETSCOMTU:
2931                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2932                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2933                 break;
2934 
2935         default:
2936                 err = -EINVAL;
2937                 break;
2938         }
2939 
2940 done:
2941         hci_dev_put(hdev);
2942         return err;
2943 }
2944 
2945 int hci_get_dev_list(void __user *arg)
2946 {
2947         struct hci_dev *hdev;
2948         struct hci_dev_list_req *dl;
2949         struct hci_dev_req *dr;
2950         int n = 0, size, err;
2951         __u16 dev_num;
2952 
2953         if (get_user(dev_num, (__u16 __user *) arg))
2954                 return -EFAULT;
2955 
2956         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2957                 return -EINVAL;
2958 
2959         size = sizeof(*dl) + dev_num * sizeof(*dr);
2960 
2961         dl = kzalloc(size, GFP_KERNEL);
2962         if (!dl)
2963                 return -ENOMEM;
2964 
2965         dr = dl->dev_req;
2966 
2967         read_lock(&hci_dev_list_lock);
2968         list_for_each_entry(hdev, &hci_dev_list, list) {
2969                 unsigned long flags = hdev->flags;
2970 
2971                 /* When the auto-off is configured it means the transport
2972                  * is running, but in that case still indicate that the
2973                  * device is actually down.
2974                  */
2975                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2976                         flags &= ~BIT(HCI_UP);
2977 
2978                 (dr + n)->dev_id  = hdev->id;
2979                 (dr + n)->dev_opt = flags;
2980 
2981                 if (++n >= dev_num)
2982                         break;
2983         }
2984         read_unlock(&hci_dev_list_lock);
2985 
2986         dl->dev_num = n;
2987         size = sizeof(*dl) + n * sizeof(*dr);
2988 
2989         err = copy_to_user(arg, dl, size);
2990         kfree(dl);
2991 
2992         return err ? -EFAULT : 0;
2993 }
2994 
2995 int hci_get_dev_info(void __user *arg)
2996 {
2997         struct hci_dev *hdev;
2998         struct hci_dev_info di;
2999         unsigned long flags;
3000         int err = 0;
3001 
3002         if (copy_from_user(&di, arg, sizeof(di)))
3003                 return -EFAULT;
3004 
3005         hdev = hci_dev_get(di.dev_id);
3006         if (!hdev)
3007                 return -ENODEV;
3008 
3009         /* When the auto-off is configured it means the transport
3010          * is running, but in that case still indicate that the
3011          * device is actually down.
3012          */
3013         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3014                 flags = hdev->flags & ~BIT(HCI_UP);
3015         else
3016                 flags = hdev->flags;
3017 
3018         strcpy(di.name, hdev->name);
3019         di.bdaddr   = hdev->bdaddr;
3020         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3021         di.flags    = flags;
3022         di.pkt_type = hdev->pkt_type;
3023         if (lmp_bredr_capable(hdev)) {
3024                 di.acl_mtu  = hdev->acl_mtu;
3025                 di.acl_pkts = hdev->acl_pkts;
3026                 di.sco_mtu  = hdev->sco_mtu;
3027                 di.sco_pkts = hdev->sco_pkts;
3028         } else {
3029                 di.acl_mtu  = hdev->le_mtu;
3030                 di.acl_pkts = hdev->le_pkts;
3031                 di.sco_mtu  = 0;
3032                 di.sco_pkts = 0;
3033         }
3034         di.link_policy = hdev->link_policy;
3035         di.link_mode   = hdev->link_mode;
3036 
3037         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3038         memcpy(&di.features, &hdev->features, sizeof(di.features));
3039 
3040         if (copy_to_user(arg, &di, sizeof(di)))
3041                 err = -EFAULT;
3042 
3043         hci_dev_put(hdev);
3044 
3045         return err;
3046 }
3047 
3048 /* ---- Interface to HCI drivers ---- */
3049 
3050 static int hci_rfkill_set_block(void *data, bool blocked)
3051 {
3052         struct hci_dev *hdev = data;
3053 
3054         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3055 
3056         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3057                 return -EBUSY;
3058 
3059         if (blocked) {
3060                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3061                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3062                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3063                         hci_dev_do_close(hdev);
3064         } else {
3065                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3066         }
3067 
3068         return 0;
3069 }
3070 
3071 static const struct rfkill_ops hci_rfkill_ops = {
3072         .set_block = hci_rfkill_set_block,
3073 };
3074 
3075 static void hci_power_on(struct work_struct *work)
3076 {
3077         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3078         int err;
3079 
3080         BT_DBG("%s", hdev->name);
3081 
3082         err = hci_dev_do_open(hdev);
3083         if (err < 0) {
3084                 hci_dev_lock(hdev);
3085                 mgmt_set_powered_failed(hdev, err);
3086                 hci_dev_unlock(hdev);
3087                 return;
3088         }
3089 
3090         /* During the HCI setup phase, a few error conditions are
3091          * ignored and they need to be checked now. If they are still
3092          * valid, it is important to turn the device back off.
3093          */
3094         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3095             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3096             (hdev->dev_type == HCI_BREDR &&
3097              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3098              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3099                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3100                 hci_dev_do_close(hdev);
3101         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3102                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3103                                    HCI_AUTO_OFF_TIMEOUT);
3104         }
3105 
3106         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3107                 /* For unconfigured devices, set the HCI_RAW flag
3108                  * so that userspace can easily identify them.
3109                  */
3110                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3111                         set_bit(HCI_RAW, &hdev->flags);
3112 
3113                 /* For fully configured devices, this will send
3114                  * the Index Added event. For unconfigured devices,
3115                  * it will send Unconfigued Index Added event.
3116                  *
3117                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3118                  * and no event will be send.
3119                  */
3120                 mgmt_index_added(hdev);
3121         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3122                 /* When the controller is now configured, then it
3123                  * is important to clear the HCI_RAW flag.
3124                  */
3125                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3126                         clear_bit(HCI_RAW, &hdev->flags);
3127 
3128                 /* Powering on the controller with HCI_CONFIG set only
3129                  * happens with the transition from unconfigured to
3130                  * configured. This will send the Index Added event.
3131                  */
3132                 mgmt_index_added(hdev);
3133         }
3134 }
3135 
3136 static void hci_power_off(struct work_struct *work)
3137 {
3138         struct hci_dev *hdev = container_of(work, struct hci_dev,
3139                                             power_off.work);
3140 
3141         BT_DBG("%s", hdev->name);
3142 
3143         hci_dev_do_close(hdev);
3144 }
3145 
3146 static void hci_discov_off(struct work_struct *work)
3147 {
3148         struct hci_dev *hdev;
3149 
3150         hdev = container_of(work, struct hci_dev, discov_off.work);
3151 
3152         BT_DBG("%s", hdev->name);
3153 
3154         mgmt_discoverable_timeout(hdev);
3155 }
3156 
3157 void hci_uuids_clear(struct hci_dev *hdev)
3158 {
3159         struct bt_uuid *uuid, *tmp;
3160 
3161         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3162                 list_del(&uuid->list);
3163                 kfree(uuid);
3164         }
3165 }
3166 
3167 void hci_link_keys_clear(struct hci_dev *hdev)
3168 {
3169         struct link_key *key;
3170 
3171         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3172                 list_del_rcu(&key->list);
3173                 kfree_rcu(key, rcu);
3174         }
3175 }
3176 
3177 void hci_smp_ltks_clear(struct hci_dev *hdev)
3178 {
3179         struct smp_ltk *k;
3180 
3181         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3182                 list_del_rcu(&k->list);
3183                 kfree_rcu(k, rcu);
3184         }
3185 }
3186 
3187 void hci_smp_irks_clear(struct hci_dev *hdev)
3188 {
3189         struct smp_irk *k;
3190 
3191         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3192                 list_del_rcu(&k->list);
3193                 kfree_rcu(k, rcu);
3194         }
3195 }
3196 
3197 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3198 {
3199         struct link_key *k;
3200 
3201         rcu_read_lock();
3202         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3203                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3204                         rcu_read_unlock();
3205                         return k;
3206                 }
3207         }
3208         rcu_read_unlock();
3209 
3210         return NULL;
3211 }
3212 
3213 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3214                                u8 key_type, u8 old_key_type)
3215 {
3216         /* Legacy key */
3217         if (key_type < 0x03)
3218                 return true;
3219 
3220         /* Debug keys are insecure so don't store them persistently */
3221         if (key_type == HCI_LK_DEBUG_COMBINATION)
3222                 return false;
3223 
3224         /* Changed combination key and there's no previous one */
3225         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3226                 return false;
3227 
3228         /* Security mode 3 case */
3229         if (!conn)
3230                 return true;
3231 
3232         /* BR/EDR key derived using SC from an LE link */
3233         if (conn->type == LE_LINK)
3234                 return true;
3235 
3236         /* Neither local nor remote side had no-bonding as requirement */
3237         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3238                 return true;
3239 
3240         /* Local side had dedicated bonding as requirement */
3241         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3242                 return true;
3243 
3244         /* Remote side had dedicated bonding as requirement */
3245         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3246                 return true;
3247 
3248         /* If none of the above criteria match, then don't store the key
3249          * persistently */
3250         return false;
3251 }
3252 
3253 static u8 ltk_role(u8 type)
3254 {
3255         if (type == SMP_LTK)
3256                 return HCI_ROLE_MASTER;
3257 
3258         return HCI_ROLE_SLAVE;
3259 }
3260 
3261 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262                              u8 addr_type, u8 role)
3263 {
3264         struct smp_ltk *k;
3265 
3266         rcu_read_lock();
3267         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3268                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3269                         continue;
3270 
3271                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3272                         rcu_read_unlock();
3273                         return k;
3274                 }
3275         }
3276         rcu_read_unlock();
3277 
3278         return NULL;
3279 }
3280 
3281 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3282 {
3283         struct smp_irk *irk;
3284 
3285         rcu_read_lock();
3286         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3287                 if (!bacmp(&irk->rpa, rpa)) {
3288                         rcu_read_unlock();
3289                         return irk;
3290                 }
3291         }
3292 
3293         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3294                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3295                         bacpy(&irk->rpa, rpa);
3296                         rcu_read_unlock();
3297                         return irk;
3298                 }
3299         }
3300         rcu_read_unlock();
3301 
3302         return NULL;
3303 }
3304 
3305 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3306                                      u8 addr_type)
3307 {
3308         struct smp_irk *irk;
3309 
3310         /* Identity Address must be public or static random */
3311         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3312                 return NULL;
3313 
3314         rcu_read_lock();
3315         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3316                 if (addr_type == irk->addr_type &&
3317                     bacmp(bdaddr, &irk->bdaddr) == 0) {
3318                         rcu_read_unlock();
3319                         return irk;
3320                 }
3321         }
3322         rcu_read_unlock();
3323 
3324         return NULL;
3325 }
3326 
3327 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3328                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3329                                   u8 pin_len, bool *persistent)
3330 {
3331         struct link_key *key, *old_key;
3332         u8 old_key_type;
3333 
3334         old_key = hci_find_link_key(hdev, bdaddr);
3335         if (old_key) {
3336                 old_key_type = old_key->type;
3337                 key = old_key;
3338         } else {
3339                 old_key_type = conn ? conn->key_type : 0xff;
3340                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3341                 if (!key)
3342                         return NULL;
3343                 list_add_rcu(&key->list, &hdev->link_keys);
3344         }
3345 
3346         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3347 
3348         /* Some buggy controller combinations generate a changed
3349          * combination key for legacy pairing even when there's no
3350          * previous key */
3351         if (type == HCI_LK_CHANGED_COMBINATION &&
3352             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3353                 type = HCI_LK_COMBINATION;
3354                 if (conn)
3355                         conn->key_type = type;
3356         }
3357 
3358         bacpy(&key->bdaddr, bdaddr);
3359         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3360         key->pin_len = pin_len;
3361 
3362         if (type == HCI_LK_CHANGED_COMBINATION)
3363                 key->type = old_key_type;
3364         else
3365                 key->type = type;
3366 
3367         if (persistent)
3368                 *persistent = hci_persistent_key(hdev, conn, type,
3369                                                  old_key_type);
3370 
3371         return key;
3372 }
3373 
3374 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3375                             u8 addr_type, u8 type, u8 authenticated,
3376                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3377 {
3378         struct smp_ltk *key, *old_key;
3379         u8 role = ltk_role(type);
3380 
3381         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3382         if (old_key)
3383                 key = old_key;
3384         else {
3385                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3386                 if (!key)
3387                         return NULL;
3388                 list_add_rcu(&key->list, &hdev->long_term_keys);
3389         }
3390 
3391         bacpy(&key->bdaddr, bdaddr);
3392         key->bdaddr_type = addr_type;
3393         memcpy(key->val, tk, sizeof(key->val));
3394         key->authenticated = authenticated;
3395         key->ediv = ediv;
3396         key->rand = rand;
3397         key->enc_size = enc_size;
3398         key->type = type;
3399 
3400         return key;
3401 }
3402 
3403 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3404                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3405 {
3406         struct smp_irk *irk;
3407 
3408         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3409         if (!irk) {
3410                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3411                 if (!irk)
3412                         return NULL;
3413 
3414                 bacpy(&irk->bdaddr, bdaddr);
3415                 irk->addr_type = addr_type;
3416 
3417                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3418         }
3419 
3420         memcpy(irk->val, val, 16);
3421         bacpy(&irk->rpa, rpa);
3422 
3423         return irk;
3424 }
3425 
3426 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3427 {
3428         struct link_key *key;
3429 
3430         key = hci_find_link_key(hdev, bdaddr);
3431         if (!key)
3432                 return -ENOENT;
3433 
3434         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3435 
3436         list_del_rcu(&key->list);
3437         kfree_rcu(key, rcu);
3438 
3439         return 0;
3440 }
3441 
3442 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3443 {
3444         struct smp_ltk *k;
3445         int removed = 0;
3446 
3447         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3448                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3449                         continue;
3450 
3451                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3452 
3453                 list_del_rcu(&k->list);
3454                 kfree_rcu(k, rcu);
3455                 removed++;
3456         }
3457 
3458         return removed ? 0 : -ENOENT;
3459 }
3460 
3461 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3462 {
3463         struct smp_irk *k;
3464 
3465         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3466                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3467                         continue;
3468 
3469                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3470 
3471                 list_del_rcu(&k->list);
3472                 kfree_rcu(k, rcu);
3473         }
3474 }
3475 
3476 /* HCI command timer function */
3477 static void hci_cmd_timeout(struct work_struct *work)
3478 {
3479         struct hci_dev *hdev = container_of(work, struct hci_dev,
3480                                             cmd_timer.work);
3481 
3482         if (hdev->sent_cmd) {
3483                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3484                 u16 opcode = __le16_to_cpu(sent->opcode);
3485 
3486                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3487         } else {
3488                 BT_ERR("%s command tx timeout", hdev->name);
3489         }
3490 
3491         atomic_set(&hdev->cmd_cnt, 1);
3492         queue_work(hdev->workqueue, &hdev->cmd_work);
3493 }
3494 
3495 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3496                                           bdaddr_t *bdaddr, u8 bdaddr_type)
3497 {
3498         struct oob_data *data;
3499 
3500         list_for_each_entry(data, &hdev->remote_oob_data, list) {
3501                 if (bacmp(bdaddr, &data->bdaddr) != 0)
3502                         continue;
3503                 if (data->bdaddr_type != bdaddr_type)
3504                         continue;
3505                 return data;
3506         }
3507 
3508         return NULL;
3509 }
3510 
3511 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3512                                u8 bdaddr_type)
3513 {
3514         struct oob_data *data;
3515 
3516         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3517         if (!data)
3518                 return -ENOENT;
3519 
3520         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3521 
3522         list_del(&data->list);
3523         kfree(data);
3524 
3525         return 0;
3526 }
3527 
3528 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3529 {
3530         struct oob_data *data, *n;
3531 
3532         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3533                 list_del(&data->list);
3534                 kfree(data);
3535         }
3536 }
3537 
3538 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3539                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
3540                             u8 *hash256, u8 *rand256)
3541 {
3542         struct oob_data *data;
3543 
3544         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3545         if (!data) {
3546                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3547                 if (!data)
3548                         return -ENOMEM;
3549 
3550                 bacpy(&data->bdaddr, bdaddr);
3551                 data->bdaddr_type = bdaddr_type;
3552                 list_add(&data->list, &hdev->remote_oob_data);
3553         }
3554 
3555         if (hash192 && rand192) {
3556                 memcpy(data->hash192, hash192, sizeof(data->hash192));
3557                 memcpy(data->rand192, rand192, sizeof(data->rand192));
3558         } else {
3559                 memset(data->hash192, 0, sizeof(data->hash192));
3560                 memset(data->rand192, 0, sizeof(data->rand192));
3561         }
3562 
3563         if (hash256 && rand256) {
3564                 memcpy(data->hash256, hash256, sizeof(data->hash256));
3565                 memcpy(data->rand256, rand256, sizeof(data->rand256));
3566         } else {
3567                 memset(data->hash256, 0, sizeof(data->hash256));
3568                 memset(data->rand256, 0, sizeof(data->rand256));
3569         }
3570 
3571         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3572 
3573         return 0;
3574 }
3575 
3576 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3577                                          bdaddr_t *bdaddr, u8 type)
3578 {
3579         struct bdaddr_list *b;
3580 
3581         list_for_each_entry(b, bdaddr_list, list) {
3582                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3583                         return b;
3584         }
3585 
3586         return NULL;
3587 }
3588 
3589 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3590 {
3591         struct list_head *p, *n;
3592 
3593         list_for_each_safe(p, n, bdaddr_list) {
3594                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3595 
3596                 list_del(p);
3597                 kfree(b);
3598         }
3599 }
3600 
3601 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3602 {
3603         struct bdaddr_list *entry;
3604 
3605         if (!bacmp(bdaddr, BDADDR_ANY))
3606                 return -EBADF;
3607 
3608         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3609                 return -EEXIST;
3610 
3611         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3612         if (!entry)
3613                 return -ENOMEM;
3614 
3615         bacpy(&entry->bdaddr, bdaddr);
3616         entry->bdaddr_type = type;
3617 
3618         list_add(&entry->list, list);
3619 
3620         return 0;
3621 }
3622 
3623 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3624 {
3625         struct bdaddr_list *entry;
3626 
3627         if (!bacmp(bdaddr, BDADDR_ANY)) {
3628                 hci_bdaddr_list_clear(list);
3629                 return 0;
3630         }
3631 
3632         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3633         if (!entry)
3634                 return -ENOENT;
3635 
3636         list_del(&entry->list);
3637         kfree(entry);
3638 
3639         return 0;
3640 }
3641 
3642 /* This function requires the caller holds hdev->lock */
3643 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3644                                                bdaddr_t *addr, u8 addr_type)
3645 {
3646         struct hci_conn_params *params;
3647 
3648         /* The conn params list only contains identity addresses */
3649         if (!hci_is_identity_address(addr, addr_type))
3650                 return NULL;
3651 
3652         list_for_each_entry(params, &hdev->le_conn_params, list) {
3653                 if (bacmp(&params->addr, addr) == 0 &&
3654                     params->addr_type == addr_type) {
3655                         return params;
3656                 }
3657         }
3658 
3659         return NULL;
3660 }
3661 
3662 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3663 {
3664         struct hci_conn *conn;
3665 
3666         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3667         if (!conn)
3668                 return false;
3669 
3670         if (conn->dst_type != type)
3671                 return false;
3672 
3673         if (conn->state != BT_CONNECTED)
3674                 return false;
3675 
3676         return true;
3677 }
3678 
3679 /* This function requires the caller holds hdev->lock */
3680 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3681                                                   bdaddr_t *addr, u8 addr_type)
3682 {
3683         struct hci_conn_params *param;
3684 
3685         /* The list only contains identity addresses */
3686         if (!hci_is_identity_address(addr, addr_type))
3687                 return NULL;
3688 
3689         list_for_each_entry(param, list, action) {
3690                 if (bacmp(&param->addr, addr) == 0 &&
3691                     param->addr_type == addr_type)
3692                         return param;
3693         }
3694 
3695         return NULL;
3696 }
3697 
3698 /* This function requires the caller holds hdev->lock */
3699 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3700                                             bdaddr_t *addr, u8 addr_type)
3701 {
3702         struct hci_conn_params *params;
3703 
3704         if (!hci_is_identity_address(addr, addr_type))
3705                 return NULL;
3706 
3707         params = hci_conn_params_lookup(hdev, addr, addr_type);
3708         if (params)
3709                 return params;
3710 
3711         params = kzalloc(sizeof(*params), GFP_KERNEL);
3712         if (!params) {
3713                 BT_ERR("Out of memory");
3714                 return NULL;
3715         }
3716 
3717         bacpy(&params->addr, addr);
3718         params->addr_type = addr_type;
3719 
3720         list_add(&params->list, &hdev->le_conn_params);
3721         INIT_LIST_HEAD(&params->action);
3722 
3723         params->conn_min_interval = hdev->le_conn_min_interval;
3724         params->conn_max_interval = hdev->le_conn_max_interval;
3725         params->conn_latency = hdev->le_conn_latency;
3726         params->supervision_timeout = hdev->le_supv_timeout;
3727         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3728 
3729         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3730 
3731         return params;
3732 }
3733 
3734 /* This function requires the caller holds hdev->lock */
3735 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3736                         u8 auto_connect)
3737 {
3738         struct hci_conn_params *params;
3739 
3740         params = hci_conn_params_add(hdev, addr, addr_type);
3741         if (!params)
3742                 return -EIO;
3743 
3744         if (params->auto_connect == auto_connect)
3745                 return 0;
3746 
3747         list_del_init(&params->action);
3748 
3749         switch (auto_connect) {
3750         case HCI_AUTO_CONN_DISABLED:
3751         case HCI_AUTO_CONN_LINK_LOSS:
3752                 hci_update_background_scan(hdev);
3753                 break;
3754         case HCI_AUTO_CONN_REPORT:
3755                 list_add(&params->action, &hdev->pend_le_reports);
3756                 hci_update_background_scan(hdev);
3757                 break;
3758         case HCI_AUTO_CONN_DIRECT:
3759         case HCI_AUTO_CONN_ALWAYS:
3760                 if (!is_connected(hdev, addr, addr_type)) {
3761                         list_add(&params->action, &hdev->pend_le_conns);
3762                         hci_update_background_scan(hdev);
3763                 }
3764                 break;
3765         }
3766 
3767         params->auto_connect = auto_connect;
3768 
3769         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3770                auto_connect);
3771 
3772         return 0;
3773 }
3774 
3775 static void hci_conn_params_free(struct hci_conn_params *params)
3776 {
3777         if (params->conn) {
3778                 hci_conn_drop(params->conn);
3779                 hci_conn_put(params->conn);
3780         }
3781 
3782         list_del(&params->action);
3783         list_del(&params->list);
3784         kfree(params);
3785 }
3786 
3787 /* This function requires the caller holds hdev->lock */
3788 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3789 {
3790         struct hci_conn_params *params;
3791 
3792         params = hci_conn_params_lookup(hdev, addr, addr_type);
3793         if (!params)
3794                 return;
3795 
3796         hci_conn_params_free(params);
3797 
3798         hci_update_background_scan(hdev);
3799 
3800         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3801 }
3802 
3803 /* This function requires the caller holds hdev->lock */
3804 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3805 {
3806         struct hci_conn_params *params, *tmp;
3807 
3808         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3809                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3810                         continue;
3811                 list_del(&params->list);
3812                 kfree(params);
3813         }
3814 
3815         BT_DBG("All LE disabled connection parameters were removed");
3816 }
3817 
3818 /* This function requires the caller holds hdev->lock */
3819 void hci_conn_params_clear_all(struct hci_dev *hdev)
3820 {
3821         struct hci_conn_params *params, *tmp;
3822 
3823         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3824                 hci_conn_params_free(params);
3825 
3826         hci_update_background_scan(hdev);
3827 
3828         BT_DBG("All LE connection parameters were removed");
3829 }
3830 
3831 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3832 {
3833         if (status) {
3834                 BT_ERR("Failed to start inquiry: status %d", status);
3835 
3836                 hci_dev_lock(hdev);
3837                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838                 hci_dev_unlock(hdev);
3839                 return;
3840         }
3841 }
3842 
3843 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3844 {
3845         /* General inquiry access code (GIAC) */
3846         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3847         struct hci_request req;
3848         struct hci_cp_inquiry cp;
3849         int err;
3850 
3851         if (status) {
3852                 BT_ERR("Failed to disable LE scanning: status %d", status);
3853                 return;
3854         }
3855 
3856         switch (hdev->discovery.type) {
3857         case DISCOV_TYPE_LE:
3858                 hci_dev_lock(hdev);
3859                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3860                 hci_dev_unlock(hdev);
3861                 break;
3862 
3863         case DISCOV_TYPE_INTERLEAVED:
3864                 hci_req_init(&req, hdev);
3865 
3866                 memset(&cp, 0, sizeof(cp));
3867                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3868                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3869                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3870 
3871                 hci_dev_lock(hdev);
3872 
3873                 hci_inquiry_cache_flush(hdev);
3874 
3875                 err = hci_req_run(&req, inquiry_complete);
3876                 if (err) {
3877                         BT_ERR("Inquiry request failed: err %d", err);
3878                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3879                 }
3880 
3881                 hci_dev_unlock(hdev);
3882                 break;
3883         }
3884 }
3885 
3886 static void le_scan_disable_work(struct work_struct *work)
3887 {
3888         struct hci_dev *hdev = container_of(work, struct hci_dev,
3889                                             le_scan_disable.work);
3890         struct hci_request req;
3891         int err;
3892 
3893         BT_DBG("%s", hdev->name);
3894 
3895         hci_req_init(&req, hdev);
3896 
3897         hci_req_add_le_scan_disable(&req);
3898 
3899         err = hci_req_run(&req, le_scan_disable_work_complete);
3900         if (err)
3901                 BT_ERR("Disable LE scanning request failed: err %d", err);
3902 }
3903 
3904 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3905 {
3906         struct hci_dev *hdev = req->hdev;
3907 
3908         /* If we're advertising or initiating an LE connection we can't
3909          * go ahead and change the random address at this time. This is
3910          * because the eventual initiator address used for the
3911          * subsequently created connection will be undefined (some
3912          * controllers use the new address and others the one we had
3913          * when the operation started).
3914          *
3915          * In this kind of scenario skip the update and let the random
3916          * address be updated at the next cycle.
3917          */
3918         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3919             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3920                 BT_DBG("Deferring random address update");
3921                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3922                 return;
3923         }
3924 
3925         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3926 }
3927 
3928 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3929                               u8 *own_addr_type)
3930 {
3931         struct hci_dev *hdev = req->hdev;
3932         int err;
3933 
3934         /* If privacy is enabled use a resolvable private address. If
3935          * current RPA has expired or there is something else than
3936          * the current RPA in use, then generate a new one.
3937          */
3938         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3939                 int to;
3940 
3941                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942 
3943                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3944                     !bacmp(&hdev->random_addr, &hdev->rpa))
3945                         return 0;
3946 
3947                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3948                 if (err < 0) {
3949                         BT_ERR("%s failed to generate new RPA", hdev->name);
3950                         return err;
3951                 }
3952 
3953                 set_random_addr(req, &hdev->rpa);
3954 
3955                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3956                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3957 
3958                 return 0;
3959         }
3960 
3961         /* In case of required privacy without resolvable private address,
3962          * use an non-resolvable private address. This is useful for active
3963          * scanning and non-connectable advertising.
3964          */
3965         if (require_privacy) {
3966                 bdaddr_t nrpa;
3967 
3968                 while (true) {
3969                         /* The non-resolvable private address is generated
3970                          * from random six bytes with the two most significant
3971                          * bits cleared.
3972                          */
3973                         get_random_bytes(&nrpa, 6);
3974                         nrpa.b[5] &= 0x3f;
3975 
3976                         /* The non-resolvable private address shall not be
3977                          * equal to the public address.
3978                          */
3979                         if (bacmp(&hdev->bdaddr, &nrpa))
3980                                 break;
3981                 }
3982 
3983                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3984                 set_random_addr(req, &nrpa);
3985                 return 0;
3986         }
3987 
3988         /* If forcing static address is in use or there is no public
3989          * address use the static address as random address (but skip
3990          * the HCI command if the current random address is already the
3991          * static one.
3992          */
3993         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3994             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3995                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3996                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3997                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3998                                     &hdev->static_addr);
3999                 return 0;
4000         }
4001 
4002         /* Neither privacy nor static address is being used so use a
4003          * public address.
4004          */
4005         *own_addr_type = ADDR_LE_DEV_PUBLIC;
4006 
4007         return 0;
4008 }
4009 
4010 /* Copy the Identity Address of the controller.
4011  *
4012  * If the controller has a public BD_ADDR, then by default use that one.
4013  * If this is a LE only controller without a public address, default to
4014  * the static random address.
4015  *
4016  * For debugging purposes it is possible to force controllers with a
4017  * public address to use the static random address instead.
4018  */
4019 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4020                                u8 *bdaddr_type)
4021 {
4022         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
4023             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4024                 bacpy(bdaddr, &hdev->static_addr);
4025                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4026         } else {
4027                 bacpy(bdaddr, &hdev->bdaddr);
4028                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4029         }
4030 }
4031 
4032 /* Alloc HCI device */
4033 struct hci_dev *hci_alloc_dev(void)
4034 {
4035         struct hci_dev *hdev;
4036 
4037         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
4038         if (!hdev)
4039                 return NULL;
4040 
4041         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4042         hdev->esco_type = (ESCO_HV1);
4043         hdev->link_mode = (HCI_LM_ACCEPT);
4044         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
4045         hdev->io_capability = 0x03;     /* No Input No Output */
4046         hdev->manufacturer = 0xffff;    /* Default to internal use */
4047         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4048         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4049 
4050         hdev->sniff_max_interval = 800;
4051         hdev->sniff_min_interval = 80;
4052 
4053         hdev->le_adv_channel_map = 0x07;
4054         hdev->le_adv_min_interval = 0x0800;
4055         hdev->le_adv_max_interval = 0x0800;
4056         hdev->le_scan_interval = 0x0060;
4057         hdev->le_scan_window = 0x0030;
4058         hdev->le_conn_min_interval = 0x0028;
4059         hdev->le_conn_max_interval = 0x0038;
4060         hdev->le_conn_latency = 0x0000;
4061         hdev->le_supv_timeout = 0x002a;
4062 
4063         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4064         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4065         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4066         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4067 
4068         mutex_init(&hdev->lock);
4069         mutex_init(&hdev->req_lock);
4070 
4071         INIT_LIST_HEAD(&hdev->mgmt_pending);
4072         INIT_LIST_HEAD(&hdev->blacklist);
4073         INIT_LIST_HEAD(&hdev->whitelist);
4074         INIT_LIST_HEAD(&hdev->uuids);
4075         INIT_LIST_HEAD(&hdev->link_keys);
4076         INIT_LIST_HEAD(&hdev->long_term_keys);
4077         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4078         INIT_LIST_HEAD(&hdev->remote_oob_data);
4079         INIT_LIST_HEAD(&hdev->le_white_list);
4080         INIT_LIST_HEAD(&hdev->le_conn_params);
4081         INIT_LIST_HEAD(&hdev->pend_le_conns);
4082         INIT_LIST_HEAD(&hdev->pend_le_reports);
4083         INIT_LIST_HEAD(&hdev->conn_hash.list);
4084 
4085         INIT_WORK(&hdev->rx_work, hci_rx_work);
4086         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4087         INIT_WORK(&hdev->tx_work, hci_tx_work);
4088         INIT_WORK(&hdev->power_on, hci_power_on);
4089 
4090         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4091         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4092         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4093 
4094         skb_queue_head_init(&hdev->rx_q);
4095         skb_queue_head_init(&hdev->cmd_q);
4096         skb_queue_head_init(&hdev->raw_q);
4097 
4098         init_waitqueue_head(&hdev->req_wait_q);
4099 
4100         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4101 
4102         hci_init_sysfs(hdev);
4103         discovery_init(hdev);
4104 
4105         return hdev;
4106 }
4107 EXPORT_SYMBOL(hci_alloc_dev);
4108 
4109 /* Free HCI device */
4110 void hci_free_dev(struct hci_dev *hdev)
4111 {
4112         /* will free via device release */
4113         put_device(&hdev->dev);
4114 }
4115 EXPORT_SYMBOL(hci_free_dev);
4116 
4117 /* Register HCI device */
4118 int hci_register_dev(struct hci_dev *hdev)
4119 {
4120         int id, error;
4121 
4122         if (!hdev->open || !hdev->close || !hdev->send)
4123                 return -EINVAL;
4124 
4125         /* Do not allow HCI_AMP devices to register at index 0,
4126          * so the index can be used as the AMP controller ID.
4127          */
4128         switch (hdev->dev_type) {
4129         case HCI_BREDR:
4130                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4131                 break;
4132         case HCI_AMP:
4133                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4134                 break;
4135         default:
4136                 return -EINVAL;
4137         }
4138 
4139         if (id < 0)
4140                 return id;
4141 
4142         sprintf(hdev->name, "hci%d", id);
4143         hdev->id = id;
4144 
4145         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4146 
4147         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4148                                           WQ_MEM_RECLAIM, 1, hdev->name);
4149         if (!hdev->workqueue) {
4150                 error = -ENOMEM;
4151                 goto err;
4152         }
4153 
4154         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4155                                               WQ_MEM_RECLAIM, 1, hdev->name);
4156         if (!hdev->req_workqueue) {
4157                 destroy_workqueue(hdev->workqueue);
4158                 error = -ENOMEM;
4159                 goto err;
4160         }
4161 
4162         if (!IS_ERR_OR_NULL(bt_debugfs))
4163                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4164 
4165         dev_set_name(&hdev->dev, "%s", hdev->name);
4166 
4167         error = device_add(&hdev->dev);
4168         if (error < 0)
4169                 goto err_wqueue;
4170 
4171         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4172                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4173                                     hdev);
4174         if (hdev->rfkill) {
4175                 if (rfkill_register(hdev->rfkill) < 0) {
4176                         rfkill_destroy(hdev->rfkill);
4177                         hdev->rfkill = NULL;
4178                 }
4179         }
4180 
4181         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4182                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4183 
4184         set_bit(HCI_SETUP, &hdev->dev_flags);
4185         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4186 
4187         if (hdev->dev_type == HCI_BREDR) {
4188                 /* Assume BR/EDR support until proven otherwise (such as
4189                  * through reading supported features during init.
4190                  */
4191                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4192         }
4193 
4194         write_lock(&hci_dev_list_lock);
4195         list_add(&hdev->list, &hci_dev_list);
4196         write_unlock(&hci_dev_list_lock);
4197 
4198         /* Devices that are marked for raw-only usage are unconfigured
4199          * and should not be included in normal operation.
4200          */
4201         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4202                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4203 
4204         hci_notify(hdev, HCI_DEV_REG);
4205         hci_dev_hold(hdev);
4206 
4207         queue_work(hdev->req_workqueue, &hdev->power_on);
4208 
4209         return id;
4210 
4211 err_wqueue:
4212         destroy_workqueue(hdev->workqueue);
4213         destroy_workqueue(hdev->req_workqueue);
4214 err:
4215         ida_simple_remove(&hci_index_ida, hdev->id);
4216 
4217         return error;
4218 }
4219 EXPORT_SYMBOL(hci_register_dev);
4220 
4221 /* Unregister HCI device */
4222 void hci_unregister_dev(struct hci_dev *hdev)
4223 {
4224         int i, id;
4225 
4226         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4227 
4228         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4229 
4230         id = hdev->id;
4231 
4232         write_lock(&hci_dev_list_lock);
4233         list_del(&hdev->list);
4234         write_unlock(&hci_dev_list_lock);
4235 
4236         hci_dev_do_close(hdev);
4237 
4238         for (i = 0; i < NUM_REASSEMBLY; i++)
4239                 kfree_skb(hdev->reassembly[i]);
4240 
4241         cancel_work_sync(&hdev->power_on);
4242 
4243         if (!test_bit(HCI_INIT, &hdev->flags) &&
4244             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4245             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4246                 hci_dev_lock(hdev);
4247                 mgmt_index_removed(hdev);
4248                 hci_dev_unlock(hdev);
4249         }
4250 
4251         /* mgmt_index_removed should take care of emptying the
4252          * pending list */
4253         BUG_ON(!list_empty(&hdev->mgmt_pending));
4254 
4255         hci_notify(hdev, HCI_DEV_UNREG);
4256 
4257         if (hdev->rfkill) {
4258                 rfkill_unregister(hdev->rfkill);
4259                 rfkill_destroy(hdev->rfkill);
4260         }
4261 
4262         smp_unregister(hdev);
4263 
4264         device_del(&hdev->dev);
4265 
4266         debugfs_remove_recursive(hdev->debugfs);
4267 
4268         destroy_workqueue(hdev->workqueue);
4269         destroy_workqueue(hdev->req_workqueue);
4270 
4271         hci_dev_lock(hdev);
4272         hci_bdaddr_list_clear(&hdev->blacklist);
4273         hci_bdaddr_list_clear(&hdev->whitelist);
4274         hci_uuids_clear(hdev);
4275         hci_link_keys_clear(hdev);
4276         hci_smp_ltks_clear(hdev);
4277         hci_smp_irks_clear(hdev);
4278         hci_remote_oob_data_clear(hdev);
4279         hci_bdaddr_list_clear(&hdev->le_white_list);
4280         hci_conn_params_clear_all(hdev);
4281         hci_discovery_filter_clear(hdev);
4282         hci_dev_unlock(hdev);
4283 
4284         hci_dev_put(hdev);
4285 
4286         ida_simple_remove(&hci_index_ida, id);
4287 }
4288 EXPORT_SYMBOL(hci_unregister_dev);
4289 
4290 /* Suspend HCI device */
4291 int hci_suspend_dev(struct hci_dev *hdev)
4292 {
4293         hci_notify(hdev, HCI_DEV_SUSPEND);
4294         return 0;
4295 }
4296 EXPORT_SYMBOL(hci_suspend_dev);
4297 
4298 /* Resume HCI device */
4299 int hci_resume_dev(struct hci_dev *hdev)
4300 {
4301         hci_notify(hdev, HCI_DEV_RESUME);
4302         return 0;
4303 }
4304 EXPORT_SYMBOL(hci_resume_dev);
4305 
4306 /* Reset HCI device */
4307 int hci_reset_dev(struct hci_dev *hdev)
4308 {
4309         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4310         struct sk_buff *skb;
4311 
4312         skb = bt_skb_alloc(3, GFP_ATOMIC);
4313         if (!skb)
4314                 return -ENOMEM;
4315 
4316         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4317         memcpy(skb_put(skb, 3), hw_err, 3);
4318 
4319         /* Send Hardware Error to upper stack */
4320         return hci_recv_frame(hdev, skb);
4321 }
4322 EXPORT_SYMBOL(hci_reset_dev);
4323 
4324 /* Receive frame from HCI drivers */
4325 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4326 {
4327         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4328                       && !test_bit(HCI_INIT, &hdev->flags))) {
4329                 kfree_skb(skb);
4330                 return -ENXIO;
4331         }
4332 
4333         /* Incoming skb */
4334         bt_cb(skb)->incoming = 1;
4335 
4336         /* Time stamp */
4337         __net_timestamp(skb);
4338 
4339         skb_queue_tail(&hdev->rx_q, skb);
4340         queue_work(hdev->workqueue, &hdev->rx_work);
4341 
4342         return 0;
4343 }
4344 EXPORT_SYMBOL(hci_recv_frame);
4345 
4346 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4347                           int count, __u8 index)
4348 {
4349         int len = 0;
4350         int hlen = 0;
4351         int remain = count;
4352         struct sk_buff *skb;
4353         struct bt_skb_cb *scb;
4354 
4355         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4356             index >= NUM_REASSEMBLY)
4357                 return -EILSEQ;
4358 
4359         skb = hdev->reassembly[index];
4360 
4361         if (!skb) {
4362                 switch (type) {
4363                 case HCI_ACLDATA_PKT:
4364                         len = HCI_MAX_FRAME_SIZE;
4365                         hlen = HCI_ACL_HDR_SIZE;
4366                         break;
4367                 case HCI_EVENT_PKT:
4368                         len = HCI_MAX_EVENT_SIZE;
4369                         hlen = HCI_EVENT_HDR_SIZE;
4370                         break;
4371                 case HCI_SCODATA_PKT:
4372                         len = HCI_MAX_SCO_SIZE;
4373                         hlen = HCI_SCO_HDR_SIZE;
4374                         break;
4375                 }
4376 
4377                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4378                 if (!skb)
4379                         return -ENOMEM;
4380 
4381                 scb = (void *) skb->cb;
4382                 scb->expect = hlen;
4383                 scb->pkt_type = type;
4384 
4385                 hdev->reassembly[index] = skb;
4386         }
4387 
4388         while (count) {
4389                 scb = (void *) skb->cb;
4390                 len = min_t(uint, scb->expect, count);
4391 
4392                 memcpy(skb_put(skb, len), data, len);
4393 
4394                 count -= len;
4395                 data += len;
4396                 scb->expect -= len;
4397                 remain = count;
4398 
4399                 switch (type) {
4400                 case HCI_EVENT_PKT:
4401                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4402                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4403                                 scb->expect = h->plen;
4404 
4405                                 if (skb_tailroom(skb) < scb->expect) {
4406                                         kfree_skb(skb);
4407                                         hdev->reassembly[index] = NULL;
4408                                         return -ENOMEM;
4409                                 }
4410                         }
4411                         break;
4412 
4413                 case HCI_ACLDATA_PKT:
4414                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4415                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4416                                 scb->expect = __le16_to_cpu(h->dlen);
4417 
4418                                 if (skb_tailroom(skb) < scb->expect) {
4419                                         kfree_skb(skb);
4420                                         hdev->reassembly[index] = NULL;
4421                                         return -ENOMEM;
4422                                 }
4423                         }
4424                         break;
4425 
4426                 case HCI_SCODATA_PKT:
4427                         if (skb->len == HCI_SCO_HDR_SIZE) {
4428                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4429                                 scb->expect = h->dlen;
4430 
4431                                 if (skb_tailroom(skb) < scb->expect) {
4432                                         kfree_skb(skb);
4433                                         hdev->reassembly[index] = NULL;
4434                                         return -ENOMEM;
4435                                 }
4436                         }
4437                         break;
4438                 }
4439 
4440                 if (scb->expect == 0) {
4441                         /* Complete frame */
4442 
4443                         bt_cb(skb)->pkt_type = type;
4444                         hci_recv_frame(hdev, skb);
4445 
4446                         hdev->reassembly[index] = NULL;
4447                         return remain;
4448                 }
4449         }
4450 
4451         return remain;
4452 }
4453 
4454 #define STREAM_REASSEMBLY 0
4455 
4456 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4457 {
4458         int type;
4459         int rem = 0;
4460 
4461         while (count) {
4462                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4463 
4464                 if (!skb) {
4465                         struct { char type; } *pkt;
4466 
4467                         /* Start of the frame */
4468                         pkt = data;
4469                         type = pkt->type;
4470 
4471                         data++;
4472                         count--;
4473                 } else
4474                         type = bt_cb(skb)->pkt_type;
4475 
4476                 rem = hci_reassembly(hdev, type, data, count,
4477                                      STREAM_REASSEMBLY);
4478                 if (rem < 0)
4479                         return rem;
4480 
4481                 data += (count - rem);
4482                 count = rem;
4483         }
4484 
4485         return rem;
4486 }
4487 EXPORT_SYMBOL(hci_recv_stream_fragment);
4488 
4489 /* ---- Interface to upper protocols ---- */
4490 
4491 int hci_register_cb(struct hci_cb *cb)
4492 {
4493         BT_DBG("%p name %s", cb, cb->name);
4494 
4495         write_lock(&hci_cb_list_lock);
4496         list_add(&cb->list, &hci_cb_list);
4497         write_unlock(&hci_cb_list_lock);
4498 
4499         return 0;
4500 }
4501 EXPORT_SYMBOL(hci_register_cb);
4502 
4503 int hci_unregister_cb(struct hci_cb *cb)
4504 {
4505         BT_DBG("%p name %s", cb, cb->name);
4506 
4507         write_lock(&hci_cb_list_lock);
4508         list_del(&cb->list);
4509         write_unlock(&hci_cb_list_lock);
4510 
4511         return 0;
4512 }
4513 EXPORT_SYMBOL(hci_unregister_cb);
4514 
4515 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4516 {
4517         int err;
4518 
4519         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4520 
4521         /* Time stamp */
4522         __net_timestamp(skb);
4523 
4524         /* Send copy to monitor */
4525         hci_send_to_monitor(hdev, skb);
4526 
4527         if (atomic_read(&hdev->promisc)) {
4528                 /* Send copy to the sockets */
4529                 hci_send_to_sock(hdev, skb);
4530         }
4531 
4532         /* Get rid of skb owner, prior to sending to the driver. */
4533         skb_orphan(skb);
4534 
4535         err = hdev->send(hdev, skb);
4536         if (err < 0) {
4537                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4538                 kfree_skb(skb);
4539         }
4540 }
4541 
4542 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4543 {
4544         skb_queue_head_init(&req->cmd_q);
4545         req->hdev = hdev;
4546         req->err = 0;
4547 }
4548 
4549 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4550 {
4551         struct hci_dev *hdev = req->hdev;
4552         struct sk_buff *skb;
4553         unsigned long flags;
4554 
4555         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4556 
4557         /* If an error occurred during request building, remove all HCI
4558          * commands queued on the HCI request queue.
4559          */
4560         if (req->err) {
4561                 skb_queue_purge(&req->cmd_q);
4562                 return req->err;
4563         }
4564 
4565         /* Do not allow empty requests */
4566         if (skb_queue_empty(&req->cmd_q))
4567                 return -ENODATA;
4568 
4569         skb = skb_peek_tail(&req->cmd_q);
4570         bt_cb(skb)->req.complete = complete;
4571 
4572         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4573         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4574         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4575 
4576         queue_work(hdev->workqueue, &hdev->cmd_work);
4577 
4578         return 0;
4579 }
4580 
4581 bool hci_req_pending(struct hci_dev *hdev)
4582 {
4583         return (hdev->req_status == HCI_REQ_PEND);
4584 }
4585 
4586 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4587                                        u32 plen, const void *param)
4588 {
4589         int len = HCI_COMMAND_HDR_SIZE + plen;
4590         struct hci_command_hdr *hdr;
4591         struct sk_buff *skb;
4592 
4593         skb = bt_skb_alloc(len, GFP_ATOMIC);
4594         if (!skb)
4595                 return NULL;
4596 
4597         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4598         hdr->opcode = cpu_to_le16(opcode);
4599         hdr->plen   = plen;
4600 
4601         if (plen)
4602                 memcpy(skb_put(skb, plen), param, plen);
4603 
4604         BT_DBG("skb len %d", skb->len);
4605 
4606         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4607         bt_cb(skb)->opcode = opcode;
4608 
4609         return skb;
4610 }
4611 
4612 /* Send HCI command */
4613 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4614                  const void *param)
4615 {
4616         struct sk_buff *skb;
4617 
4618         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4619 
4620         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4621         if (!skb) {
4622                 BT_ERR("%s no memory for command", hdev->name);
4623                 return -ENOMEM;
4624         }
4625 
4626         /* Stand-alone HCI commands must be flagged as
4627          * single-command requests.
4628          */
4629         bt_cb(skb)->req.start = true;
4630 
4631         skb_queue_tail(&hdev->cmd_q, skb);
4632         queue_work(hdev->workqueue, &hdev->cmd_work);
4633 
4634         return 0;
4635 }
4636 
4637 /* Queue a command to an asynchronous HCI request */
4638 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4639                     const void *param, u8 event)
4640 {
4641         struct hci_dev *hdev = req->hdev;
4642         struct sk_buff *skb;
4643 
4644         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4645 
4646         /* If an error occurred during request building, there is no point in
4647          * queueing the HCI command. We can simply return.
4648          */
4649         if (req->err)
4650                 return;
4651 
4652         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4653         if (!skb) {
4654                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4655                        hdev->name, opcode);
4656                 req->err = -ENOMEM;
4657                 return;
4658         }
4659 
4660         if (skb_queue_empty(&req->cmd_q))
4661                 bt_cb(skb)->req.start = true;
4662 
4663         bt_cb(skb)->req.event = event;
4664 
4665         skb_queue_tail(&req->cmd_q, skb);
4666 }
4667 
4668 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4669                  const void *param)
4670 {
4671         hci_req_add_ev(req, opcode, plen, param, 0);
4672 }
4673 
4674 /* Get data from the previously sent command */
4675 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4676 {
4677         struct hci_command_hdr *hdr;
4678 
4679         if (!hdev->sent_cmd)
4680                 return NULL;
4681 
4682         hdr = (void *) hdev->sent_cmd->data;
4683 
4684         if (hdr->opcode != cpu_to_le16(opcode))
4685                 return NULL;
4686 
4687         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4688 
4689         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4690 }
4691 
4692 /* Send ACL data */
4693 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4694 {
4695         struct hci_acl_hdr *hdr;
4696         int len = skb->len;
4697 
4698         skb_push(skb, HCI_ACL_HDR_SIZE);
4699         skb_reset_transport_header(skb);
4700         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4701         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4702         hdr->dlen   = cpu_to_le16(len);
4703 }
4704 
4705 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4706                           struct sk_buff *skb, __u16 flags)
4707 {
4708         struct hci_conn *conn = chan->conn;
4709         struct hci_dev *hdev = conn->hdev;
4710         struct sk_buff *list;
4711 
4712         skb->len = skb_headlen(skb);
4713         skb->data_len = 0;
4714 
4715         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4716 
4717         switch (hdev->dev_type) {
4718         case HCI_BREDR:
4719                 hci_add_acl_hdr(skb, conn->handle, flags);
4720                 break;
4721         case HCI_AMP:
4722                 hci_add_acl_hdr(skb, chan->handle, flags);
4723                 break;
4724         default:
4725                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4726                 return;
4727         }
4728 
4729         list = skb_shinfo(skb)->frag_list;
4730         if (!list) {
4731                 /* Non fragmented */
4732                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4733 
4734                 skb_queue_tail(queue, skb);
4735         } else {
4736                 /* Fragmented */
4737                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4738 
4739                 skb_shinfo(skb)->frag_list = NULL;
4740 
4741                 /* Queue all fragments atomically. We need to use spin_lock_bh
4742                  * here because of 6LoWPAN links, as there this function is
4743                  * called from softirq and using normal spin lock could cause
4744                  * deadlocks.
4745                  */
4746                 spin_lock_bh(&queue->lock);
4747 
4748                 __skb_queue_tail(queue, skb);
4749 
4750                 flags &= ~ACL_START;
4751                 flags |= ACL_CONT;
4752                 do {
4753                         skb = list; list = list->next;
4754 
4755                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4756                         hci_add_acl_hdr(skb, conn->handle, flags);
4757 
4758                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4759 
4760                         __skb_queue_tail(queue, skb);
4761                 } while (list);
4762 
4763                 spin_unlock_bh(&queue->lock);
4764         }
4765 }
4766 
4767 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4768 {
4769         struct hci_dev *hdev = chan->conn->hdev;
4770 
4771         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4772 
4773         hci_queue_acl(chan, &chan->data_q, skb, flags);
4774 
4775         queue_work(hdev->workqueue, &hdev->tx_work);
4776 }
4777 
4778 /* Send SCO data */
4779 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4780 {
4781         struct hci_dev *hdev = conn->hdev;
4782         struct hci_sco_hdr hdr;
4783 
4784         BT_DBG("%s len %d", hdev->name, skb->len);
4785 
4786         hdr.handle = cpu_to_le16(conn->handle);
4787         hdr.dlen   = skb->len;
4788 
4789         skb_push(skb, HCI_SCO_HDR_SIZE);
4790         skb_reset_transport_header(skb);
4791         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4792 
4793         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4794 
4795         skb_queue_tail(&conn->data_q, skb);
4796         queue_work(hdev->workqueue, &hdev->tx_work);
4797 }
4798 
4799 /* ---- HCI TX task (outgoing data) ---- */
4800 
4801 /* HCI Connection scheduler */
4802 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4803                                      int *quote)
4804 {
4805         struct hci_conn_hash *h = &hdev->conn_hash;
4806         struct hci_conn *conn = NULL, *c;
4807         unsigned int num = 0, min = ~0;
4808 
4809         /* We don't have to lock device here. Connections are always
4810          * added and removed with TX task disabled. */
4811 
4812         rcu_read_lock();
4813 
4814         list_for_each_entry_rcu(c, &h->list, list) {
4815                 if (c->type != type || skb_queue_empty(&c->data_q))
4816                         continue;
4817 
4818                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4819                         continue;
4820 
4821                 num++;
4822 
4823                 if (c->sent < min) {
4824                         min  = c->sent;
4825                         conn = c;
4826                 }
4827 
4828                 if (hci_conn_num(hdev, type) == num)
4829                         break;
4830         }
4831 
4832         rcu_read_unlock();
4833 
4834         if (conn) {
4835                 int cnt, q;
4836 
4837                 switch (conn->type) {
4838                 case ACL_LINK:
4839                         cnt = hdev->acl_cnt;
4840                         break;
4841                 case SCO_LINK:
4842                 case ESCO_LINK:
4843                         cnt = hdev->sco_cnt;
4844                         break;
4845                 case LE_LINK:
4846                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4847                         break;
4848                 default:
4849                         cnt = 0;
4850                         BT_ERR("Unknown link type");
4851                 }
4852 
4853                 q = cnt / num;
4854                 *quote = q ? q : 1;
4855         } else
4856                 *quote = 0;
4857 
4858         BT_DBG("conn %p quote %d", conn, *quote);
4859         return conn;
4860 }
4861 
4862 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4863 {
4864         struct hci_conn_hash *h = &hdev->conn_hash;
4865         struct hci_conn *c;
4866 
4867         BT_ERR("%s link tx timeout", hdev->name);
4868 
4869         rcu_read_lock();
4870 
4871         /* Kill stalled connections */
4872         list_for_each_entry_rcu(c, &h->list, list) {
4873                 if (c->type == type && c->sent) {
4874                         BT_ERR("%s killing stalled connection %pMR",
4875                                hdev->name, &c->dst);
4876                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4877                 }
4878         }
4879 
4880         rcu_read_unlock();
4881 }
4882 
4883 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4884                                       int *quote)
4885 {
4886         struct hci_conn_hash *h = &hdev->conn_hash;
4887         struct hci_chan *chan = NULL;
4888         unsigned int num = 0, min = ~0, cur_prio = 0;
4889         struct hci_conn *conn;
4890         int cnt, q, conn_num = 0;
4891 
4892         BT_DBG("%s", hdev->name);
4893 
4894         rcu_read_lock();
4895 
4896         list_for_each_entry_rcu(conn, &h->list, list) {
4897                 struct hci_chan *tmp;
4898 
4899                 if (conn->type != type)
4900                         continue;
4901 
4902                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4903                         continue;
4904 
4905                 conn_num++;
4906 
4907                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4908                         struct sk_buff *skb;
4909 
4910                         if (skb_queue_empty(&tmp->data_q))
4911                                 continue;
4912 
4913                         skb = skb_peek(&tmp->data_q);
4914                         if (skb->priority < cur_prio)
4915                                 continue;
4916 
4917                         if (skb->priority > cur_prio) {
4918                                 num = 0;
4919                                 min = ~0;
4920                                 cur_prio = skb->priority;
4921                         }
4922 
4923                         num++;
4924 
4925                         if (conn->sent < min) {
4926                                 min  = conn->sent;
4927                                 chan = tmp;
4928                         }
4929                 }
4930 
4931                 if (hci_conn_num(hdev, type) == conn_num)
4932                         break;
4933         }
4934 
4935         rcu_read_unlock();
4936 
4937         if (!chan)
4938                 return NULL;
4939 
4940         switch (chan->conn->type) {
4941         case ACL_LINK:
4942                 cnt = hdev->acl_cnt;
4943                 break;
4944         case AMP_LINK:
4945                 cnt = hdev->block_cnt;
4946                 break;
4947         case SCO_LINK:
4948         case ESCO_LINK:
4949                 cnt = hdev->sco_cnt;
4950                 break;
4951         case LE_LINK:
4952                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4953                 break;
4954         default:
4955                 cnt = 0;
4956                 BT_ERR("Unknown link type");
4957         }
4958 
4959         q = cnt / num;
4960         *quote = q ? q : 1;
4961         BT_DBG("chan %p quote %d", chan, *quote);
4962         return chan;
4963 }
4964 
4965 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4966 {
4967         struct hci_conn_hash *h = &hdev->conn_hash;
4968         struct hci_conn *conn;
4969         int num = 0;
4970 
4971         BT_DBG("%s", hdev->name);
4972 
4973         rcu_read_lock();
4974 
4975         list_for_each_entry_rcu(conn, &h->list, list) {
4976                 struct hci_chan *chan;
4977 
4978                 if (conn->type != type)
4979                         continue;
4980 
4981                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4982                         continue;
4983 
4984                 num++;
4985 
4986                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4987                         struct sk_buff *skb;
4988 
4989                         if (chan->sent) {
4990                                 chan->sent = 0;
4991                                 continue;
4992                         }
4993 
4994                         if (skb_queue_empty(&chan->data_q))
4995                                 continue;
4996 
4997                         skb = skb_peek(&chan->data_q);
4998                         if (skb->priority >= HCI_PRIO_MAX - 1)
4999                                 continue;
5000 
5001                         skb->priority = HCI_PRIO_MAX - 1;
5002 
5003                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
5004                                skb->priority);
5005                 }
5006 
5007                 if (hci_conn_num(hdev, type) == num)
5008                         break;
5009         }
5010 
5011         rcu_read_unlock();
5012 
5013 }
5014 
5015 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
5016 {
5017         /* Calculate count of blocks used by this packet */
5018         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
5019 }
5020 
5021 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
5022 {
5023         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5024                 /* ACL tx timeout must be longer than maximum
5025                  * link supervision timeout (40.9 seconds) */
5026                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5027                                        HCI_ACL_TX_TIMEOUT))
5028                         hci_link_tx_to(hdev, ACL_LINK);
5029         }
5030 }
5031 
5032 static void hci_sched_acl_pkt(struct hci_dev *hdev)
5033 {
5034         unsigned int cnt = hdev->acl_cnt;
5035         struct hci_chan *chan;
5036         struct sk_buff *skb;
5037         int quote;
5038 
5039         __check_timeout(hdev, cnt);
5040 
5041         while (hdev->acl_cnt &&
5042                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
5043                 u32 priority = (skb_peek(&chan->data_q))->priority;
5044                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5045                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5046                                skb->len, skb->priority);
5047 
5048                         /* Stop if priority has changed */
5049                         if (skb->priority < priority)
5050                                 break;
5051 
5052                         skb = skb_dequeue(&chan->data_q);
5053 
5054                         hci_conn_enter_active_mode(chan->conn,
5055                                                    bt_cb(skb)->force_active);
5056 
5057                         hci_send_frame(hdev, skb);
5058                         hdev->acl_last_tx = jiffies;
5059 
5060                         hdev->acl_cnt--;
5061                         chan->sent++;
5062                         chan->conn->sent++;
5063                 }
5064         }
5065 
5066         if (cnt != hdev->acl_cnt)
5067                 hci_prio_recalculate(hdev, ACL_LINK);
5068 }
5069 
5070 static void hci_sched_acl_blk(struct hci_dev *hdev)
5071 {
5072         unsigned int cnt = hdev->block_cnt;
5073         struct hci_chan *chan;
5074         struct sk_buff *skb;
5075         int quote;
5076         u8 type;
5077 
5078         __check_timeout(hdev, cnt);
5079 
5080         BT_DBG("%s", hdev->name);
5081 
5082         if (hdev->dev_type == HCI_AMP)
5083                 type = AMP_LINK;
5084         else
5085                 type = ACL_LINK;
5086 
5087         while (hdev->block_cnt > 0 &&
5088                (chan = hci_chan_sent(hdev, type, &quote))) {
5089                 u32 priority = (skb_peek(&chan->data_q))->priority;
5090                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5091                         int blocks;
5092 
5093                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5094                                skb->len, skb->priority);
5095 
5096                         /* Stop if priority has changed */
5097                         if (skb->priority < priority)
5098                                 break;
5099 
5100                         skb = skb_dequeue(&chan->data_q);
5101 
5102                         blocks = __get_blocks(hdev, skb);
5103                         if (blocks > hdev->block_cnt)
5104                                 return;
5105 
5106                         hci_conn_enter_active_mode(chan->conn,
5107                                                    bt_cb(skb)->force_active);
5108 
5109                         hci_send_frame(hdev, skb);
5110                         hdev->acl_last_tx = jiffies;
5111 
5112                         hdev->block_cnt -= blocks;
5113                         quote -= blocks;
5114 
5115                         chan->sent += blocks;
5116                         chan->conn->sent += blocks;
5117                 }
5118         }
5119 
5120         if (cnt != hdev->block_cnt)
5121                 hci_prio_recalculate(hdev, type);
5122 }
5123 
5124 static void hci_sched_acl(struct hci_dev *hdev)
5125 {
5126         BT_DBG("%s", hdev->name);
5127 
5128         /* No ACL link over BR/EDR controller */
5129         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5130                 return;
5131 
5132         /* No AMP link over AMP controller */
5133         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5134                 return;
5135 
5136         switch (hdev->flow_ctl_mode) {
5137         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5138                 hci_sched_acl_pkt(hdev);
5139                 break;
5140 
5141         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5142                 hci_sched_acl_blk(hdev);
5143                 break;
5144         }
5145 }
5146 
5147 /* Schedule SCO */
5148 static void hci_sched_sco(struct hci_dev *hdev)
5149 {
5150         struct hci_conn *conn;
5151         struct sk_buff *skb;
5152         int quote;
5153 
5154         BT_DBG("%s", hdev->name);
5155 
5156         if (!hci_conn_num(hdev, SCO_LINK))
5157                 return;
5158 
5159         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5160                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5161                         BT_DBG("skb %p len %d", skb, skb->len);
5162                         hci_send_frame(hdev, skb);
5163 
5164                         conn->sent++;
5165                         if (conn->sent == ~0)
5166                                 conn->sent = 0;
5167                 }
5168         }
5169 }
5170 
5171 static void hci_sched_esco(struct hci_dev *hdev)
5172 {
5173         struct hci_conn *conn;
5174         struct sk_buff *skb;
5175         int quote;
5176 
5177         BT_DBG("%s", hdev->name);
5178 
5179         if (!hci_conn_num(hdev, ESCO_LINK))
5180                 return;
5181 
5182         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5183                                                      &quote))) {
5184                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5185                         BT_DBG("skb %p len %d", skb, skb->len);
5186                         hci_send_frame(hdev, skb);
5187 
5188                         conn->sent++;
5189                         if (conn->sent == ~0)
5190                                 conn->sent = 0;
5191                 }
5192         }
5193 }
5194 
5195 static void hci_sched_le(struct hci_dev *hdev)
5196 {
5197         struct hci_chan *chan;
5198         struct sk_buff *skb;
5199         int quote, cnt, tmp;
5200 
5201         BT_DBG("%s", hdev->name);
5202 
5203         if (!hci_conn_num(hdev, LE_LINK))
5204                 return;
5205 
5206         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5207                 /* LE tx timeout must be longer than maximum
5208                  * link supervision timeout (40.9 seconds) */
5209                 if (!hdev->le_cnt && hdev->le_pkts &&
5210                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5211                         hci_link_tx_to(hdev, LE_LINK);
5212         }
5213 
5214         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5215         tmp = cnt;
5216         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5217                 u32 priority = (skb_peek(&chan->data_q))->priority;
5218                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5219                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5220                                skb->len, skb->priority);
5221 
5222                         /* Stop if priority has changed */
5223                         if (skb->priority < priority)
5224                                 break;
5225 
5226                         skb = skb_dequeue(&chan->data_q);
5227 
5228                         hci_send_frame(hdev, skb);
5229                         hdev->le_last_tx = jiffies;
5230 
5231                         cnt--;
5232                         chan->sent++;
5233                         chan->conn->sent++;
5234                 }
5235         }
5236 
5237         if (hdev->le_pkts)
5238                 hdev->le_cnt = cnt;
5239         else
5240                 hdev->acl_cnt = cnt;
5241 
5242         if (cnt != tmp)
5243                 hci_prio_recalculate(hdev, LE_LINK);
5244 }
5245 
5246 static void hci_tx_work(struct work_struct *work)
5247 {
5248         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5249         struct sk_buff *skb;
5250 
5251         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5252                hdev->sco_cnt, hdev->le_cnt);
5253 
5254         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5255                 /* Schedule queues and send stuff to HCI driver */
5256                 hci_sched_acl(hdev);
5257                 hci_sched_sco(hdev);
5258                 hci_sched_esco(hdev);
5259                 hci_sched_le(hdev);
5260         }
5261 
5262         /* Send next queued raw (unknown type) packet */
5263         while ((skb = skb_dequeue(&hdev->raw_q)))
5264                 hci_send_frame(hdev, skb);
5265 }
5266 
5267 /* ----- HCI RX task (incoming data processing) ----- */
5268 
5269 /* ACL data packet */
5270 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5271 {
5272         struct hci_acl_hdr *hdr = (void *) skb->data;
5273         struct hci_conn *conn;
5274         __u16 handle, flags;
5275 
5276         skb_pull(skb, HCI_ACL_HDR_SIZE);
5277 
5278         handle = __le16_to_cpu(hdr->handle);
5279         flags  = hci_flags(handle);
5280         handle = hci_handle(handle);
5281 
5282         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5283                handle, flags);
5284 
5285         hdev->stat.acl_rx++;
5286 
5287         hci_dev_lock(hdev);
5288         conn = hci_conn_hash_lookup_handle(hdev, handle);
5289         hci_dev_unlock(hdev);
5290 
5291         if (conn) {
5292                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5293 
5294                 /* Send to upper protocol */
5295                 l2cap_recv_acldata(conn, skb, flags);
5296                 return;
5297         } else {
5298                 BT_ERR("%s ACL packet for unknown connection handle %d",
5299                        hdev->name, handle);
5300         }
5301 
5302         kfree_skb(skb);
5303 }
5304 
5305 /* SCO data packet */
5306 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5307 {
5308         struct hci_sco_hdr *hdr = (void *) skb->data;
5309         struct hci_conn *conn;
5310         __u16 handle;
5311 
5312         skb_pull(skb, HCI_SCO_HDR_SIZE);
5313 
5314         handle = __le16_to_cpu(hdr->handle);
5315 
5316         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5317 
5318         hdev->stat.sco_rx++;
5319 
5320         hci_dev_lock(hdev);
5321         conn = hci_conn_hash_lookup_handle(hdev, handle);
5322         hci_dev_unlock(hdev);
5323 
5324         if (conn) {
5325                 /* Send to upper protocol */
5326                 sco_recv_scodata(conn, skb);
5327                 return;
5328         } else {
5329                 BT_ERR("%s SCO packet for unknown connection handle %d",
5330                        hdev->name, handle);
5331         }
5332 
5333         kfree_skb(skb);
5334 }
5335 
5336 static bool hci_req_is_complete(struct hci_dev *hdev)
5337 {
5338         struct sk_buff *skb;
5339 
5340         skb = skb_peek(&hdev->cmd_q);
5341         if (!skb)
5342                 return true;
5343 
5344         return bt_cb(skb)->req.start;
5345 }
5346 
5347 static void hci_resend_last(struct hci_dev *hdev)
5348 {
5349         struct hci_command_hdr *sent;
5350         struct sk_buff *skb;
5351         u16 opcode;
5352 
5353         if (!hdev->sent_cmd)
5354                 return;
5355 
5356         sent = (void *) hdev->sent_cmd->data;
5357         opcode = __le16_to_cpu(sent->opcode);
5358         if (opcode == HCI_OP_RESET)
5359                 return;
5360 
5361         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5362         if (!skb)
5363                 return;
5364 
5365         skb_queue_head(&hdev->cmd_q, skb);
5366         queue_work(hdev->workqueue, &hdev->cmd_work);
5367 }
5368 
5369 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5370 {
5371         hci_req_complete_t req_complete = NULL;
5372         struct sk_buff *skb;
5373         unsigned long flags;
5374 
5375         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5376 
5377         /* If the completed command doesn't match the last one that was
5378          * sent we need to do special handling of it.
5379          */
5380         if (!hci_sent_cmd_data(hdev, opcode)) {
5381                 /* Some CSR based controllers generate a spontaneous
5382                  * reset complete event during init and any pending
5383                  * command will never be completed. In such a case we
5384                  * need to resend whatever was the last sent
5385                  * command.
5386                  */
5387                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5388                         hci_resend_last(hdev);
5389 
5390                 return;
5391         }
5392 
5393         /* If the command succeeded and there's still more commands in
5394          * this request the request is not yet complete.
5395          */
5396         if (!status && !hci_req_is_complete(hdev))
5397                 return;
5398 
5399         /* If this was the last command in a request the complete
5400          * callback would be found in hdev->sent_cmd instead of the
5401          * command queue (hdev->cmd_q).
5402          */
5403         if (hdev->sent_cmd) {
5404                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5405 
5406                 if (req_complete) {
5407                         /* We must set the complete callback to NULL to
5408                          * avoid calling the callback more than once if
5409                          * this function gets called again.
5410                          */
5411                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5412 
5413                         goto call_complete;
5414                 }
5415         }
5416 
5417         /* Remove all pending commands belonging to this request */
5418         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5419         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5420                 if (bt_cb(skb)->req.start) {
5421                         __skb_queue_head(&hdev->cmd_q, skb);
5422                         break;
5423                 }
5424 
5425                 req_complete = bt_cb(skb)->req.complete;
5426                 kfree_skb(skb);
5427         }
5428         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5429 
5430 call_complete:
5431         if (req_complete)
5432                 req_complete(hdev, status);
5433 }
5434 
5435 static void hci_rx_work(struct work_struct *work)
5436 {
5437         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5438         struct sk_buff *skb;
5439 
5440         BT_DBG("%s", hdev->name);
5441 
5442         while ((skb = skb_dequeue(&hdev->rx_q))) {
5443                 /* Send copy to monitor */
5444                 hci_send_to_monitor(hdev, skb);
5445 
5446                 if (atomic_read(&hdev->promisc)) {
5447                         /* Send copy to the sockets */
5448                         hci_send_to_sock(hdev, skb);
5449                 }
5450 
5451                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5452                         kfree_skb(skb);
5453                         continue;
5454                 }
5455 
5456                 if (test_bit(HCI_INIT, &hdev->flags)) {
5457                         /* Don't process data packets in this states. */
5458                         switch (bt_cb(skb)->pkt_type) {
5459                         case HCI_ACLDATA_PKT:
5460                         case HCI_SCODATA_PKT:
5461                                 kfree_skb(skb);
5462                                 continue;
5463                         }
5464                 }
5465 
5466                 /* Process frame */
5467                 switch (bt_cb(skb)->pkt_type) {
5468                 case HCI_EVENT_PKT:
5469                         BT_DBG("%s Event packet", hdev->name);
5470                         hci_event_packet(hdev, skb);
5471                         break;
5472 
5473                 case HCI_ACLDATA_PKT:
5474                         BT_DBG("%s ACL data packet", hdev->name);
5475                         hci_acldata_packet(hdev, skb);
5476                         break;
5477 
5478                 case HCI_SCODATA_PKT:
5479                         BT_DBG("%s SCO data packet", hdev->name);
5480                         hci_scodata_packet(hdev, skb);
5481                         break;
5482 
5483                 default:
5484                         kfree_skb(skb);
5485                         break;
5486                 }
5487         }
5488 }
5489 
5490 static void hci_cmd_work(struct work_struct *work)
5491 {
5492         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5493         struct sk_buff *skb;
5494 
5495         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5496                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5497 
5498         /* Send queued commands */
5499         if (atomic_read(&hdev->cmd_cnt)) {
5500                 skb = skb_dequeue(&hdev->cmd_q);
5501                 if (!skb)
5502                         return;
5503 
5504                 kfree_skb(hdev->sent_cmd);
5505 
5506                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5507                 if (hdev->sent_cmd) {
5508                         atomic_dec(&hdev->cmd_cnt);
5509                         hci_send_frame(hdev, skb);
5510                         if (test_bit(HCI_RESET, &hdev->flags))
5511                                 cancel_delayed_work(&hdev->cmd_timer);
5512                         else
5513                                 schedule_delayed_work(&hdev->cmd_timer,
5514                                                       HCI_CMD_TIMEOUT);
5515                 } else {
5516                         skb_queue_head(&hdev->cmd_q, skb);
5517                         queue_work(hdev->workqueue, &hdev->cmd_work);
5518                 }
5519         }
5520 }
5521 
5522 void hci_req_add_le_scan_disable(struct hci_request *req)
5523 {
5524         struct hci_cp_le_set_scan_enable cp;
5525 
5526         memset(&cp, 0, sizeof(cp));
5527         cp.enable = LE_SCAN_DISABLE;
5528         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5529 }
5530 
5531 static void add_to_white_list(struct hci_request *req,
5532                               struct hci_conn_params *params)
5533 {
5534         struct hci_cp_le_add_to_white_list cp;
5535 
5536         cp.bdaddr_type = params->addr_type;
5537         bacpy(&cp.bdaddr, &params->addr);
5538 
5539         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5540 }
5541 
5542 static u8 update_white_list(struct hci_request *req)
5543 {
5544         struct hci_dev *hdev = req->hdev;
5545         struct hci_conn_params *params;
5546         struct bdaddr_list *b;
5547         uint8_t white_list_entries = 0;
5548 
5549         /* Go through the current white list programmed into the
5550          * controller one by one and check if that address is still
5551          * in the list of pending connections or list of devices to
5552          * report. If not present in either list, then queue the
5553          * command to remove it from the controller.
5554          */
5555         list_for_each_entry(b, &hdev->le_white_list, list) {
5556                 struct hci_cp_le_del_from_white_list cp;
5557 
5558                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5559                                               &b->bdaddr, b->bdaddr_type) ||
5560                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5561                                               &b->bdaddr, b->bdaddr_type)) {
5562                         white_list_entries++;
5563                         continue;
5564                 }
5565 
5566                 cp.bdaddr_type = b->bdaddr_type;
5567                 bacpy(&cp.bdaddr, &b->bdaddr);
5568 
5569                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5570                             sizeof(cp), &cp);
5571         }
5572 
5573         /* Since all no longer valid white list entries have been
5574          * removed, walk through the list of pending connections
5575          * and ensure that any new device gets programmed into
5576          * the controller.
5577          *
5578          * If the list of the devices is larger than the list of
5579          * available white list entries in the controller, then
5580          * just abort and return filer policy value to not use the
5581          * white list.
5582          */
5583         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5584                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5585                                            &params->addr, params->addr_type))
5586                         continue;
5587 
5588                 if (white_list_entries >= hdev->le_white_list_size) {
5589                         /* Select filter policy to accept all advertising */
5590                         return 0x00;
5591                 }
5592 
5593                 if (hci_find_irk_by_addr(hdev, &params->addr,
5594                                          params->addr_type)) {
5595                         /* White list can not be used with RPAs */
5596                         return 0x00;
5597                 }
5598 
5599                 white_list_entries++;
5600                 add_to_white_list(req, params);
5601         }
5602 
5603         /* After adding all new pending connections, walk through
5604          * the list of pending reports and also add these to the
5605          * white list if there is still space.
5606          */
5607         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5608                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5609                                            &params->addr, params->addr_type))
5610                         continue;
5611 
5612                 if (white_list_entries >= hdev->le_white_list_size) {
5613                         /* Select filter policy to accept all advertising */
5614                         return 0x00;
5615                 }
5616 
5617                 if (hci_find_irk_by_addr(hdev, &params->addr,
5618                                          params->addr_type)) {
5619                         /* White list can not be used with RPAs */
5620                         return 0x00;
5621                 }
5622 
5623                 white_list_entries++;
5624                 add_to_white_list(req, params);
5625         }
5626 
5627         /* Select filter policy to use white list */
5628         return 0x01;
5629 }
5630 
5631 void hci_req_add_le_passive_scan(struct hci_request *req)
5632 {
5633         struct hci_cp_le_set_scan_param param_cp;
5634         struct hci_cp_le_set_scan_enable enable_cp;
5635         struct hci_dev *hdev = req->hdev;
5636         u8 own_addr_type;
5637         u8 filter_policy;
5638 
5639         /* Set require_privacy to false since no SCAN_REQ are send
5640          * during passive scanning. Not using an non-resolvable address
5641          * here is important so that peer devices using direct
5642          * advertising with our address will be correctly reported
5643          * by the controller.
5644          */
5645         if (hci_update_random_address(req, false, &own_addr_type))
5646                 return;
5647 
5648         /* Adding or removing entries from the white list must
5649          * happen before enabling scanning. The controller does
5650          * not allow white list modification while scanning.
5651          */
5652         filter_policy = update_white_list(req);
5653 
5654         /* When the controller is using random resolvable addresses and
5655          * with that having LE privacy enabled, then controllers with
5656          * Extended Scanner Filter Policies support can now enable support
5657          * for handling directed advertising.
5658          *
5659          * So instead of using filter polices 0x00 (no whitelist)
5660          * and 0x01 (whitelist enabled) use the new filter policies
5661          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5662          */
5663         if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5664             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5665                 filter_policy |= 0x02;
5666 
5667         memset(&param_cp, 0, sizeof(param_cp));
5668         param_cp.type = LE_SCAN_PASSIVE;
5669         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5670         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5671         param_cp.own_address_type = own_addr_type;
5672         param_cp.filter_policy = filter_policy;
5673         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5674                     &param_cp);
5675 
5676         memset(&enable_cp, 0, sizeof(enable_cp));
5677         enable_cp.enable = LE_SCAN_ENABLE;
5678         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5679         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5680                     &enable_cp);
5681 }
5682 
5683 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5684 {
5685         if (status)
5686                 BT_DBG("HCI request failed to update background scanning: "
5687                        "status 0x%2.2x", status);
5688 }
5689 
5690 /* This function controls the background scanning based on hdev->pend_le_conns
5691  * list. If there are pending LE connection we start the background scanning,
5692  * otherwise we stop it.
5693  *
5694  * This function requires the caller holds hdev->lock.
5695  */
5696 void hci_update_background_scan(struct hci_dev *hdev)
5697 {
5698         struct hci_request req;
5699         struct hci_conn *conn;
5700         int err;
5701 
5702         if (!test_bit(HCI_UP, &hdev->flags) ||
5703             test_bit(HCI_INIT, &hdev->flags) ||
5704             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5705             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5706             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5707             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5708                 return;
5709 
5710         /* No point in doing scanning if LE support hasn't been enabled */
5711         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5712                 return;
5713 
5714         /* If discovery is active don't interfere with it */
5715         if (hdev->discovery.state != DISCOVERY_STOPPED)
5716                 return;
5717 
5718         /* Reset RSSI and UUID filters when starting background scanning
5719          * since these filters are meant for service discovery only.
5720          *
5721          * The Start Discovery and Start Service Discovery operations
5722          * ensure to set proper values for RSSI threshold and UUID
5723          * filter list. So it is safe to just reset them here.
5724          */
5725         hci_discovery_filter_clear(hdev);
5726 
5727         hci_req_init(&req, hdev);
5728 
5729         if (list_empty(&hdev->pend_le_conns) &&
5730             list_empty(&hdev->pend_le_reports)) {
5731                 /* If there is no pending LE connections or devices
5732                  * to be scanned for, we should stop the background
5733                  * scanning.
5734                  */
5735 
5736                 /* If controller is not scanning we are done. */
5737                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5738                         return;
5739 
5740                 hci_req_add_le_scan_disable(&req);
5741 
5742                 BT_DBG("%s stopping background scanning", hdev->name);
5743         } else {
5744                 /* If there is at least one pending LE connection, we should
5745                  * keep the background scan running.
5746                  */
5747 
5748                 /* If controller is connecting, we should not start scanning
5749                  * since some controllers are not able to scan and connect at
5750                  * the same time.
5751                  */
5752                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5753                 if (conn)
5754                         return;
5755 
5756                 /* If controller is currently scanning, we stop it to ensure we
5757                  * don't miss any advertising (due to duplicates filter).
5758                  */
5759                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5760                         hci_req_add_le_scan_disable(&req);
5761 
5762                 hci_req_add_le_passive_scan(&req);
5763 
5764                 BT_DBG("%s starting background scanning", hdev->name);
5765         }
5766 
5767         err = hci_req_run(&req, update_background_scan_complete);
5768         if (err)
5769                 BT_ERR("Failed to run HCI request: err %d", err);
5770 }
5771 
5772 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5773 {
5774         struct bdaddr_list *b;
5775 
5776         list_for_each_entry(b, &hdev->whitelist, list) {
5777                 struct hci_conn *conn;
5778 
5779                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5780                 if (!conn)
5781                         return true;
5782 
5783                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5784                         return true;
5785         }
5786 
5787         return false;
5788 }
5789 
5790 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5791 {
5792         u8 scan;
5793 
5794         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5795                 return;
5796 
5797         if (!hdev_is_powered(hdev))
5798                 return;
5799 
5800         if (mgmt_powering_down(hdev))
5801                 return;
5802 
5803         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5804             disconnected_whitelist_entries(hdev))
5805                 scan = SCAN_PAGE;
5806         else
5807                 scan = SCAN_DISABLED;
5808 
5809         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5810                 return;
5811 
5812         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5813                 scan |= SCAN_INQUIRY;
5814 
5815         if (req)
5816                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5817         else
5818                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5819 }
5820 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp