~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/idr.h>
 30 #include <linux/rfkill.h>
 31 #include <linux/debugfs.h>
 32 #include <linux/crypto.h>
 33 #include <asm/unaligned.h>
 34 
 35 #include <net/bluetooth/bluetooth.h>
 36 #include <net/bluetooth/hci_core.h>
 37 #include <net/bluetooth/l2cap.h>
 38 #include <net/bluetooth/mgmt.h>
 39 
 40 #include "smp.h"
 41 
 42 static void hci_rx_work(struct work_struct *work);
 43 static void hci_cmd_work(struct work_struct *work);
 44 static void hci_tx_work(struct work_struct *work);
 45 
 46 /* HCI device list */
 47 LIST_HEAD(hci_dev_list);
 48 DEFINE_RWLOCK(hci_dev_list_lock);
 49 
 50 /* HCI callback list */
 51 LIST_HEAD(hci_cb_list);
 52 DEFINE_RWLOCK(hci_cb_list_lock);
 53 
 54 /* HCI ID Numbering */
 55 static DEFINE_IDA(hci_index_ida);
 56 
 57 /* ----- HCI requests ----- */
 58 
 59 #define HCI_REQ_DONE      0
 60 #define HCI_REQ_PEND      1
 61 #define HCI_REQ_CANCELED  2
 62 
 63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
 64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
 65 
 66 /* ---- HCI notifications ---- */
 67 
 68 static void hci_notify(struct hci_dev *hdev, int event)
 69 {
 70         hci_sock_dev_event(hdev, event);
 71 }
 72 
 73 /* ---- HCI debugfs entries ---- */
 74 
 75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
 76                              size_t count, loff_t *ppos)
 77 {
 78         struct hci_dev *hdev = file->private_data;
 79         char buf[3];
 80 
 81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
 82         buf[1] = '\n';
 83         buf[2] = '\0';
 84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 85 }
 86 
 87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
 88                               size_t count, loff_t *ppos)
 89 {
 90         struct hci_dev *hdev = file->private_data;
 91         struct sk_buff *skb;
 92         char buf[32];
 93         size_t buf_size = min(count, (sizeof(buf)-1));
 94         bool enable;
 95         int err;
 96 
 97         if (!test_bit(HCI_UP, &hdev->flags))
 98                 return -ENETDOWN;
 99 
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102 
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106 
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109 
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118 
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121 
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124 
125         if (err < 0)
126                 return err;
127 
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129 
130         return count;
131 }
132 
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139 
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144 
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162 
163         return 0;
164 }
165 
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170 
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177 
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182 
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187 
188         return 0;
189 }
190 
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195 
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202 
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207 
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212 
213         return 0;
214 }
215 
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220 
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227 
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232 
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236 
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243 
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247 
248         return 0;
249 }
250 
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255 
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262 
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268 
269         hci_dev_lock(hdev);
270 
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281 
282         hci_dev_unlock(hdev);
283 
284         return 0;
285 }
286 
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291 
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298 
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303 
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311 
312         return 0;
313 }
314 
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319 
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326 
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330 
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335 
336         return 0;
337 }
338 
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343 
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350 
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354 
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358 
359         return 0;
360 }
361 
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364 
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368 
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372 
373         return 0;
374 }
375 
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379 
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383 
384         return 0;
385 }
386 
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389 
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395 
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401 
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410 
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413 
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416 
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420 
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423 
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425 
426         return count;
427 }
428 
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435 
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441 
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447 
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453 
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457 
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460 
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464 
465         return 0;
466 }
467 
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471 
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475 
476         return 0;
477 }
478 
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481 
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485 
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491 
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495 
496         return 0;
497 }
498 
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502 
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506 
507         return 0;
508 }
509 
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512 
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516 
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519 
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523 
524         return 0;
525 }
526 
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530 
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534 
535         return 0;
536 }
537 
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540 
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544 
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547 
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551 
552         return 0;
553 }
554 
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558 
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562 
563         return 0;
564 }
565 
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568 
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572 
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575 
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579 
580         return 0;
581 }
582 
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586 
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590 
591         return 0;
592 }
593 
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596 
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600 
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603 
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607 
608         return 0;
609 }
610 
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614 
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618 
619         return 0;
620 }
621 
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624 
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630 
631         hci_dev_lock(hdev);
632 
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634 
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637 
638         hci_dev_unlock(hdev);
639 
640         return 0;
641 }
642 
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647 
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654 
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658 
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662 
663         return 0;
664 }
665 
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670 
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677 
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681 
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685 
686         return 0;
687 }
688 
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693 
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700 
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707 
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713 
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722 
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725 
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728 
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732 
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735 
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737 
738         return count;
739 }
740 
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747 
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752 
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757 
758         return 0;
759 }
760 
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765 
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772 
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777 
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786 
787         return 0;
788 }
789 
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795 
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802 
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807 
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817 
818         return 0;
819 }
820 
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825 
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832 
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836 
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839 
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843 
844         return 0;
845 }
846 
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850 
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854 
855         return 0;
856 }
857 
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860 
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864 
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867 
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871 
872         return 0;
873 }
874 
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878 
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882 
883         return 0;
884 }
885 
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888 
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892 
893         if (val > 0x01f3)
894                 return -EINVAL;
895 
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899 
900         return 0;
901 }
902 
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906 
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910 
911         return 0;
912 }
913 
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916 
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920 
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923 
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927 
928         return 0;
929 }
930 
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934 
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938 
939         return 0;
940 }
941 
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944 
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948 
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951 
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955 
956         return 0;
957 }
958 
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962 
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966 
967         return 0;
968 }
969 
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972 
973 static int adv_min_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976 
977         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978                 return -EINVAL;
979 
980         hci_dev_lock(hdev);
981         hdev->le_adv_min_interval = val;
982         hci_dev_unlock(hdev);
983 
984         return 0;
985 }
986 
987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990 
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_min_interval;
993         hci_dev_unlock(hdev);
994 
995         return 0;
996 }
997 
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999                         adv_min_interval_set, "%llu\n");
1000 
1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003         struct hci_dev *hdev = data;
1004 
1005         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006                 return -EINVAL;
1007 
1008         hci_dev_lock(hdev);
1009         hdev->le_adv_max_interval = val;
1010         hci_dev_unlock(hdev);
1011 
1012         return 0;
1013 }
1014 
1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017         struct hci_dev *hdev = data;
1018 
1019         hci_dev_lock(hdev);
1020         *val = hdev->le_adv_max_interval;
1021         hci_dev_unlock(hdev);
1022 
1023         return 0;
1024 }
1025 
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027                         adv_max_interval_set, "%llu\n");
1028 
1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031         struct hci_dev *hdev = f->private;
1032         struct hci_conn_params *p;
1033 
1034         hci_dev_lock(hdev);
1035         list_for_each_entry(p, &hdev->le_conn_params, list) {
1036                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037                            p->auto_connect);
1038         }
1039         hci_dev_unlock(hdev);
1040 
1041         return 0;
1042 }
1043 
1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046         return single_open(file, device_list_show, inode->i_private);
1047 }
1048 
1049 static const struct file_operations device_list_fops = {
1050         .open           = device_list_open,
1051         .read           = seq_read,
1052         .llseek         = seq_lseek,
1053         .release        = single_release,
1054 };
1055 
1056 /* ---- HCI requests ---- */
1057 
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061 
1062         if (hdev->req_status == HCI_REQ_PEND) {
1063                 hdev->req_result = result;
1064                 hdev->req_status = HCI_REQ_DONE;
1065                 wake_up_interruptible(&hdev->req_wait_q);
1066         }
1067 }
1068 
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072 
1073         if (hdev->req_status == HCI_REQ_PEND) {
1074                 hdev->req_result = err;
1075                 hdev->req_status = HCI_REQ_CANCELED;
1076                 wake_up_interruptible(&hdev->req_wait_q);
1077         }
1078 }
1079 
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081                                             u8 event)
1082 {
1083         struct hci_ev_cmd_complete *ev;
1084         struct hci_event_hdr *hdr;
1085         struct sk_buff *skb;
1086 
1087         hci_dev_lock(hdev);
1088 
1089         skb = hdev->recv_evt;
1090         hdev->recv_evt = NULL;
1091 
1092         hci_dev_unlock(hdev);
1093 
1094         if (!skb)
1095                 return ERR_PTR(-ENODATA);
1096 
1097         if (skb->len < sizeof(*hdr)) {
1098                 BT_ERR("Too short HCI event");
1099                 goto failed;
1100         }
1101 
1102         hdr = (void *) skb->data;
1103         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104 
1105         if (event) {
1106                 if (hdr->evt != event)
1107                         goto failed;
1108                 return skb;
1109         }
1110 
1111         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113                 goto failed;
1114         }
1115 
1116         if (skb->len < sizeof(*ev)) {
1117                 BT_ERR("Too short cmd_complete event");
1118                 goto failed;
1119         }
1120 
1121         ev = (void *) skb->data;
1122         skb_pull(skb, sizeof(*ev));
1123 
1124         if (opcode == __le16_to_cpu(ev->opcode))
1125                 return skb;
1126 
1127         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128                __le16_to_cpu(ev->opcode));
1129 
1130 failed:
1131         kfree_skb(skb);
1132         return ERR_PTR(-ENODATA);
1133 }
1134 
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136                                   const void *param, u8 event, u32 timeout)
1137 {
1138         DECLARE_WAITQUEUE(wait, current);
1139         struct hci_request req;
1140         int err = 0;
1141 
1142         BT_DBG("%s", hdev->name);
1143 
1144         hci_req_init(&req, hdev);
1145 
1146         hci_req_add_ev(&req, opcode, plen, param, event);
1147 
1148         hdev->req_status = HCI_REQ_PEND;
1149 
1150         err = hci_req_run(&req, hci_req_sync_complete);
1151         if (err < 0)
1152                 return ERR_PTR(err);
1153 
1154         add_wait_queue(&hdev->req_wait_q, &wait);
1155         set_current_state(TASK_INTERRUPTIBLE);
1156 
1157         schedule_timeout(timeout);
1158 
1159         remove_wait_queue(&hdev->req_wait_q, &wait);
1160 
1161         if (signal_pending(current))
1162                 return ERR_PTR(-EINTR);
1163 
1164         switch (hdev->req_status) {
1165         case HCI_REQ_DONE:
1166                 err = -bt_to_errno(hdev->req_result);
1167                 break;
1168 
1169         case HCI_REQ_CANCELED:
1170                 err = -hdev->req_result;
1171                 break;
1172 
1173         default:
1174                 err = -ETIMEDOUT;
1175                 break;
1176         }
1177 
1178         hdev->req_status = hdev->req_result = 0;
1179 
1180         BT_DBG("%s end: err %d", hdev->name, err);
1181 
1182         if (err < 0)
1183                 return ERR_PTR(err);
1184 
1185         return hci_get_cmd_complete(hdev, opcode, event);
1186 }
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188 
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190                                const void *param, u32 timeout)
1191 {
1192         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1193 }
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1195 
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198                           void (*func)(struct hci_request *req,
1199                                       unsigned long opt),
1200                           unsigned long opt, __u32 timeout)
1201 {
1202         struct hci_request req;
1203         DECLARE_WAITQUEUE(wait, current);
1204         int err = 0;
1205 
1206         BT_DBG("%s start", hdev->name);
1207 
1208         hci_req_init(&req, hdev);
1209 
1210         hdev->req_status = HCI_REQ_PEND;
1211 
1212         func(&req, opt);
1213 
1214         err = hci_req_run(&req, hci_req_sync_complete);
1215         if (err < 0) {
1216                 hdev->req_status = 0;
1217 
1218                 /* ENODATA means the HCI request command queue is empty.
1219                  * This can happen when a request with conditionals doesn't
1220                  * trigger any commands to be sent. This is normal behavior
1221                  * and should not trigger an error return.
1222                  */
1223                 if (err == -ENODATA)
1224                         return 0;
1225 
1226                 return err;
1227         }
1228 
1229         add_wait_queue(&hdev->req_wait_q, &wait);
1230         set_current_state(TASK_INTERRUPTIBLE);
1231 
1232         schedule_timeout(timeout);
1233 
1234         remove_wait_queue(&hdev->req_wait_q, &wait);
1235 
1236         if (signal_pending(current))
1237                 return -EINTR;
1238 
1239         switch (hdev->req_status) {
1240         case HCI_REQ_DONE:
1241                 err = -bt_to_errno(hdev->req_result);
1242                 break;
1243 
1244         case HCI_REQ_CANCELED:
1245                 err = -hdev->req_result;
1246                 break;
1247 
1248         default:
1249                 err = -ETIMEDOUT;
1250                 break;
1251         }
1252 
1253         hdev->req_status = hdev->req_result = 0;
1254 
1255         BT_DBG("%s end: err %d", hdev->name, err);
1256 
1257         return err;
1258 }
1259 
1260 static int hci_req_sync(struct hci_dev *hdev,
1261                         void (*req)(struct hci_request *req,
1262                                     unsigned long opt),
1263                         unsigned long opt, __u32 timeout)
1264 {
1265         int ret;
1266 
1267         if (!test_bit(HCI_UP, &hdev->flags))
1268                 return -ENETDOWN;
1269 
1270         /* Serialize all requests */
1271         hci_req_lock(hdev);
1272         ret = __hci_req_sync(hdev, req, opt, timeout);
1273         hci_req_unlock(hdev);
1274 
1275         return ret;
1276 }
1277 
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1279 {
1280         BT_DBG("%s %ld", req->hdev->name, opt);
1281 
1282         /* Reset device */
1283         set_bit(HCI_RESET, &req->hdev->flags);
1284         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1285 }
1286 
1287 static void bredr_init(struct hci_request *req)
1288 {
1289         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1290 
1291         /* Read Local Supported Features */
1292         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293 
1294         /* Read Local Version */
1295         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1296 
1297         /* Read BD Address */
1298         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1299 }
1300 
1301 static void amp_init(struct hci_request *req)
1302 {
1303         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1304 
1305         /* Read Local Version */
1306         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1307 
1308         /* Read Local Supported Commands */
1309         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310 
1311         /* Read Local Supported Features */
1312         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313 
1314         /* Read Local AMP Info */
1315         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1316 
1317         /* Read Data Blk size */
1318         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1319 
1320         /* Read Flow Control Mode */
1321         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322 
1323         /* Read Location Data */
1324         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1325 }
1326 
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1328 {
1329         struct hci_dev *hdev = req->hdev;
1330 
1331         BT_DBG("%s %ld", hdev->name, opt);
1332 
1333         /* Reset */
1334         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335                 hci_reset_req(req, 0);
1336 
1337         switch (hdev->dev_type) {
1338         case HCI_BREDR:
1339                 bredr_init(req);
1340                 break;
1341 
1342         case HCI_AMP:
1343                 amp_init(req);
1344                 break;
1345 
1346         default:
1347                 BT_ERR("Unknown device type %d", hdev->dev_type);
1348                 break;
1349         }
1350 }
1351 
1352 static void bredr_setup(struct hci_request *req)
1353 {
1354         struct hci_dev *hdev = req->hdev;
1355 
1356         __le16 param;
1357         __u8 flt_type;
1358 
1359         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1361 
1362         /* Read Class of Device */
1363         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1364 
1365         /* Read Local Name */
1366         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1367 
1368         /* Read Voice Setting */
1369         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1370 
1371         /* Read Number of Supported IAC */
1372         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373 
1374         /* Read Current IAC LAP */
1375         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376 
1377         /* Clear Event Filters */
1378         flt_type = HCI_FLT_CLEAR_ALL;
1379         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1380 
1381         /* Connection accept timeout ~20 secs */
1382         param = cpu_to_le16(0x7d00);
1383         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1384 
1385         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386          * but it does not support page scan related HCI commands.
1387          */
1388         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391         }
1392 }
1393 
1394 static void le_setup(struct hci_request *req)
1395 {
1396         struct hci_dev *hdev = req->hdev;
1397 
1398         /* Read LE Buffer Size */
1399         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1400 
1401         /* Read LE Local Supported Features */
1402         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1403 
1404         /* Read LE Supported States */
1405         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406 
1407         /* Read LE White List Size */
1408         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1409 
1410         /* Clear LE White List */
1411         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1412 
1413         /* LE-only controllers have LE implicitly enabled */
1414         if (!lmp_bredr_capable(hdev))
1415                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 }
1417 
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419 {
1420         if (lmp_ext_inq_capable(hdev))
1421                 return 0x02;
1422 
1423         if (lmp_inq_rssi_capable(hdev))
1424                 return 0x01;
1425 
1426         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427             hdev->lmp_subver == 0x0757)
1428                 return 0x01;
1429 
1430         if (hdev->manufacturer == 15) {
1431                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432                         return 0x01;
1433                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434                         return 0x01;
1435                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436                         return 0x01;
1437         }
1438 
1439         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440             hdev->lmp_subver == 0x1805)
1441                 return 0x01;
1442 
1443         return 0x00;
1444 }
1445 
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1447 {
1448         u8 mode;
1449 
1450         mode = hci_get_inquiry_mode(req->hdev);
1451 
1452         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1453 }
1454 
1455 static void hci_setup_event_mask(struct hci_request *req)
1456 {
1457         struct hci_dev *hdev = req->hdev;
1458 
1459         /* The second byte is 0xff instead of 0x9f (two reserved bits
1460          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461          * command otherwise.
1462          */
1463         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464 
1465         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466          * any event mask for pre 1.2 devices.
1467          */
1468         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469                 return;
1470 
1471         if (lmp_bredr_capable(hdev)) {
1472                 events[4] |= 0x01; /* Flow Specification Complete */
1473                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475                 events[5] |= 0x08; /* Synchronous Connection Complete */
1476                 events[5] |= 0x10; /* Synchronous Connection Changed */
1477         } else {
1478                 /* Use a different default for LE-only devices */
1479                 memset(events, 0, sizeof(events));
1480                 events[0] |= 0x10; /* Disconnection Complete */
1481                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482                 events[1] |= 0x20; /* Command Complete */
1483                 events[1] |= 0x40; /* Command Status */
1484                 events[1] |= 0x80; /* Hardware Error */
1485                 events[2] |= 0x04; /* Number of Completed Packets */
1486                 events[3] |= 0x02; /* Data Buffer Overflow */
1487 
1488                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489                         events[0] |= 0x80; /* Encryption Change */
1490                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491                 }
1492         }
1493 
1494         if (lmp_inq_rssi_capable(hdev))
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 
1497         if (lmp_sniffsubr_capable(hdev))
1498                 events[5] |= 0x20; /* Sniff Subrating */
1499 
1500         if (lmp_pause_enc_capable(hdev))
1501                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502 
1503         if (lmp_ext_inq_capable(hdev))
1504                 events[5] |= 0x40; /* Extended Inquiry Result */
1505 
1506         if (lmp_no_flush_capable(hdev))
1507                 events[7] |= 0x01; /* Enhanced Flush Complete */
1508 
1509         if (lmp_lsto_capable(hdev))
1510                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511 
1512         if (lmp_ssp_capable(hdev)) {
1513                 events[6] |= 0x01;      /* IO Capability Request */
1514                 events[6] |= 0x02;      /* IO Capability Response */
1515                 events[6] |= 0x04;      /* User Confirmation Request */
1516                 events[6] |= 0x08;      /* User Passkey Request */
1517                 events[6] |= 0x10;      /* Remote OOB Data Request */
1518                 events[6] |= 0x20;      /* Simple Pairing Complete */
1519                 events[7] |= 0x04;      /* User Passkey Notification */
1520                 events[7] |= 0x08;      /* Keypress Notification */
1521                 events[7] |= 0x10;      /* Remote Host Supported
1522                                          * Features Notification
1523                                          */
1524         }
1525 
1526         if (lmp_le_capable(hdev))
1527                 events[7] |= 0x20;      /* LE Meta-Event */
1528 
1529         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1530 }
1531 
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1533 {
1534         struct hci_dev *hdev = req->hdev;
1535 
1536         if (lmp_bredr_capable(hdev))
1537                 bredr_setup(req);
1538         else
1539                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1540 
1541         if (lmp_le_capable(hdev))
1542                 le_setup(req);
1543 
1544         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545          * local supported commands HCI command.
1546          */
1547         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1549 
1550         if (lmp_ssp_capable(hdev)) {
1551                 /* When SSP is available, then the host features page
1552                  * should also be available as well. However some
1553                  * controllers list the max_page as 0 as long as SSP
1554                  * has not been enabled. To achieve proper debugging
1555                  * output, force the minimum max_page to 1 at least.
1556                  */
1557                 hdev->max_page = 0x01;
1558 
1559                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560                         u8 mode = 0x01;
1561                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562                                     sizeof(mode), &mode);
1563                 } else {
1564                         struct hci_cp_write_eir cp;
1565 
1566                         memset(hdev->eir, 0, sizeof(hdev->eir));
1567                         memset(&cp, 0, sizeof(cp));
1568 
1569                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1570                 }
1571         }
1572 
1573         if (lmp_inq_rssi_capable(hdev))
1574                 hci_setup_inquiry_mode(req);
1575 
1576         if (lmp_inq_tx_pwr_capable(hdev))
1577                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1578 
1579         if (lmp_ext_feat_capable(hdev)) {
1580                 struct hci_cp_read_local_ext_features cp;
1581 
1582                 cp.page = 0x01;
1583                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584                             sizeof(cp), &cp);
1585         }
1586 
1587         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588                 u8 enable = 1;
1589                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590                             &enable);
1591         }
1592 }
1593 
1594 static void hci_setup_link_policy(struct hci_request *req)
1595 {
1596         struct hci_dev *hdev = req->hdev;
1597         struct hci_cp_write_def_link_policy cp;
1598         u16 link_policy = 0;
1599 
1600         if (lmp_rswitch_capable(hdev))
1601                 link_policy |= HCI_LP_RSWITCH;
1602         if (lmp_hold_capable(hdev))
1603                 link_policy |= HCI_LP_HOLD;
1604         if (lmp_sniff_capable(hdev))
1605                 link_policy |= HCI_LP_SNIFF;
1606         if (lmp_park_capable(hdev))
1607                 link_policy |= HCI_LP_PARK;
1608 
1609         cp.policy = cpu_to_le16(link_policy);
1610         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1611 }
1612 
1613 static void hci_set_le_support(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         struct hci_cp_write_le_host_supported cp;
1617 
1618         /* LE-only devices do not support explicit enablement */
1619         if (!lmp_bredr_capable(hdev))
1620                 return;
1621 
1622         memset(&cp, 0, sizeof(cp));
1623 
1624         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625                 cp.le = 0x01;
1626                 cp.simul = 0x00;
1627         }
1628 
1629         if (cp.le != lmp_host_le_capable(hdev))
1630                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631                             &cp);
1632 }
1633 
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1635 {
1636         struct hci_dev *hdev = req->hdev;
1637         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638         bool changed = false;
1639 
1640         /* If Connectionless Slave Broadcast master role is supported
1641          * enable all necessary events for it.
1642          */
1643         if (lmp_csb_master_capable(hdev)) {
1644                 events[1] |= 0x40;      /* Triggered Clock Capture */
1645                 events[1] |= 0x80;      /* Synchronization Train Complete */
1646                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1647                 events[2] |= 0x20;      /* CSB Channel Map Change */
1648                 changed = true;
1649         }
1650 
1651         /* If Connectionless Slave Broadcast slave role is supported
1652          * enable all necessary events for it.
1653          */
1654         if (lmp_csb_slave_capable(hdev)) {
1655                 events[2] |= 0x01;      /* Synchronization Train Received */
1656                 events[2] |= 0x02;      /* CSB Receive */
1657                 events[2] |= 0x04;      /* CSB Timeout */
1658                 events[2] |= 0x08;      /* Truncated Page Complete */
1659                 changed = true;
1660         }
1661 
1662         /* Enable Authenticated Payload Timeout Expired event if supported */
1663         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
1664                 events[2] |= 0x80;
1665                 changed = true;
1666         }
1667 
1668         /* Some Broadcom based controllers indicate support for Set Event
1669          * Mask Page 2 command, but then actually do not support it. Since
1670          * the default value is all bits set to zero, the command is only
1671          * required if the event mask has to be changed. In case no change
1672          * to the event mask is needed, skip this command.
1673          */
1674         if (changed)
1675                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
1676                             sizeof(events), events);
1677 }
1678 
1679 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1680 {
1681         struct hci_dev *hdev = req->hdev;
1682         u8 p;
1683 
1684         hci_setup_event_mask(req);
1685 
1686         /* Some Broadcom based Bluetooth controllers do not support the
1687          * Delete Stored Link Key command. They are clearly indicating its
1688          * absence in the bit mask of supported commands.
1689          *
1690          * Check the supported commands and only if the the command is marked
1691          * as supported send it. If not supported assume that the controller
1692          * does not have actual support for stored link keys which makes this
1693          * command redundant anyway.
1694          *
1695          * Some controllers indicate that they support handling deleting
1696          * stored link keys, but they don't. The quirk lets a driver
1697          * just disable this command.
1698          */
1699         if (hdev->commands[6] & 0x80 &&
1700             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1701                 struct hci_cp_delete_stored_link_key cp;
1702 
1703                 bacpy(&cp.bdaddr, BDADDR_ANY);
1704                 cp.delete_all = 0x01;
1705                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1706                             sizeof(cp), &cp);
1707         }
1708 
1709         if (hdev->commands[5] & 0x10)
1710                 hci_setup_link_policy(req);
1711 
1712         if (lmp_le_capable(hdev)) {
1713                 u8 events[8];
1714 
1715                 memset(events, 0, sizeof(events));
1716                 events[0] = 0x0f;
1717 
1718                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1719                         events[0] |= 0x10;      /* LE Long Term Key Request */
1720 
1721                 /* If controller supports the Connection Parameters Request
1722                  * Link Layer Procedure, enable the corresponding event.
1723                  */
1724                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1725                         events[0] |= 0x20;      /* LE Remote Connection
1726                                                  * Parameter Request
1727                                                  */
1728 
1729                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1730                             events);
1731 
1732                 if (hdev->commands[25] & 0x40) {
1733                         /* Read LE Advertising Channel TX Power */
1734                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1735                 }
1736 
1737                 hci_set_le_support(req);
1738         }
1739 
1740         /* Read features beyond page 1 if available */
1741         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1742                 struct hci_cp_read_local_ext_features cp;
1743 
1744                 cp.page = p;
1745                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1746                             sizeof(cp), &cp);
1747         }
1748 }
1749 
1750 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1751 {
1752         struct hci_dev *hdev = req->hdev;
1753 
1754         /* Set event mask page 2 if the HCI command for it is supported */
1755         if (hdev->commands[22] & 0x04)
1756                 hci_set_event_mask_page_2(req);
1757 
1758         /* Read local codec list if the HCI command is supported */
1759         if (hdev->commands[29] & 0x20)
1760                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1761 
1762         /* Get MWS transport configuration if the HCI command is supported */
1763         if (hdev->commands[30] & 0x08)
1764                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1765 
1766         /* Check for Synchronization Train support */
1767         if (lmp_sync_train_capable(hdev))
1768                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1769 
1770         /* Enable Secure Connections if supported and configured */
1771         if ((lmp_sc_capable(hdev) ||
1772              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1773             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1774                 u8 support = 0x01;
1775                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1776                             sizeof(support), &support);
1777         }
1778 }
1779 
1780 static int __hci_init(struct hci_dev *hdev)
1781 {
1782         int err;
1783 
1784         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1785         if (err < 0)
1786                 return err;
1787 
1788         /* The Device Under Test (DUT) mode is special and available for
1789          * all controller types. So just create it early on.
1790          */
1791         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1792                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1793                                     &dut_mode_fops);
1794         }
1795 
1796         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1797          * BR/EDR/LE type controllers. AMP controllers only need the
1798          * first stage init.
1799          */
1800         if (hdev->dev_type != HCI_BREDR)
1801                 return 0;
1802 
1803         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1804         if (err < 0)
1805                 return err;
1806 
1807         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1808         if (err < 0)
1809                 return err;
1810 
1811         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1812         if (err < 0)
1813                 return err;
1814 
1815         /* Only create debugfs entries during the initial setup
1816          * phase and not every time the controller gets powered on.
1817          */
1818         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1819                 return 0;
1820 
1821         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1822                             &features_fops);
1823         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1824                            &hdev->manufacturer);
1825         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1826         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1827         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1828                             &blacklist_fops);
1829         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1830                             &whitelist_fops);
1831         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1832 
1833         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1834                             &conn_info_min_age_fops);
1835         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1836                             &conn_info_max_age_fops);
1837 
1838         if (lmp_bredr_capable(hdev)) {
1839                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1840                                     hdev, &inquiry_cache_fops);
1841                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1842                                     hdev, &link_keys_fops);
1843                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1844                                     hdev, &dev_class_fops);
1845                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1846                                     hdev, &voice_setting_fops);
1847         }
1848 
1849         if (lmp_ssp_capable(hdev)) {
1850                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1851                                     hdev, &auto_accept_delay_fops);
1852                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1853                                     hdev, &force_sc_support_fops);
1854                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1855                                     hdev, &sc_only_mode_fops);
1856         }
1857 
1858         if (lmp_sniff_capable(hdev)) {
1859                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1860                                     hdev, &idle_timeout_fops);
1861                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1862                                     hdev, &sniff_min_interval_fops);
1863                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1864                                     hdev, &sniff_max_interval_fops);
1865         }
1866 
1867         if (lmp_le_capable(hdev)) {
1868                 debugfs_create_file("identity", 0400, hdev->debugfs,
1869                                     hdev, &identity_fops);
1870                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1871                                     hdev, &rpa_timeout_fops);
1872                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1873                                     hdev, &random_address_fops);
1874                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1875                                     hdev, &static_address_fops);
1876 
1877                 /* For controllers with a public address, provide a debug
1878                  * option to force the usage of the configured static
1879                  * address. By default the public address is used.
1880                  */
1881                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1882                         debugfs_create_file("force_static_address", 0644,
1883                                             hdev->debugfs, hdev,
1884                                             &force_static_address_fops);
1885 
1886                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1887                                   &hdev->le_white_list_size);
1888                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1889                                     &white_list_fops);
1890                 debugfs_create_file("identity_resolving_keys", 0400,
1891                                     hdev->debugfs, hdev,
1892                                     &identity_resolving_keys_fops);
1893                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1894                                     hdev, &long_term_keys_fops);
1895                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1896                                     hdev, &conn_min_interval_fops);
1897                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1898                                     hdev, &conn_max_interval_fops);
1899                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1900                                     hdev, &conn_latency_fops);
1901                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1902                                     hdev, &supervision_timeout_fops);
1903                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1904                                     hdev, &adv_channel_map_fops);
1905                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1906                                     hdev, &adv_min_interval_fops);
1907                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1908                                     hdev, &adv_max_interval_fops);
1909                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1910                                     &device_list_fops);
1911                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1912                                    hdev->debugfs,
1913                                    &hdev->discov_interleaved_timeout);
1914 
1915                 smp_register(hdev);
1916         }
1917 
1918         return 0;
1919 }
1920 
1921 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1922 {
1923         struct hci_dev *hdev = req->hdev;
1924 
1925         BT_DBG("%s %ld", hdev->name, opt);
1926 
1927         /* Reset */
1928         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1929                 hci_reset_req(req, 0);
1930 
1931         /* Read Local Version */
1932         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1933 
1934         /* Read BD Address */
1935         if (hdev->set_bdaddr)
1936                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1937 }
1938 
1939 static int __hci_unconf_init(struct hci_dev *hdev)
1940 {
1941         int err;
1942 
1943         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1944                 return 0;
1945 
1946         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1947         if (err < 0)
1948                 return err;
1949 
1950         return 0;
1951 }
1952 
1953 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1954 {
1955         __u8 scan = opt;
1956 
1957         BT_DBG("%s %x", req->hdev->name, scan);
1958 
1959         /* Inquiry and Page scans */
1960         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1961 }
1962 
1963 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1964 {
1965         __u8 auth = opt;
1966 
1967         BT_DBG("%s %x", req->hdev->name, auth);
1968 
1969         /* Authentication */
1970         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1971 }
1972 
1973 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1974 {
1975         __u8 encrypt = opt;
1976 
1977         BT_DBG("%s %x", req->hdev->name, encrypt);
1978 
1979         /* Encryption */
1980         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1981 }
1982 
1983 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1984 {
1985         __le16 policy = cpu_to_le16(opt);
1986 
1987         BT_DBG("%s %x", req->hdev->name, policy);
1988 
1989         /* Default link policy */
1990         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1991 }
1992 
1993 /* Get HCI device by index.
1994  * Device is held on return. */
1995 struct hci_dev *hci_dev_get(int index)
1996 {
1997         struct hci_dev *hdev = NULL, *d;
1998 
1999         BT_DBG("%d", index);
2000 
2001         if (index < 0)
2002                 return NULL;
2003 
2004         read_lock(&hci_dev_list_lock);
2005         list_for_each_entry(d, &hci_dev_list, list) {
2006                 if (d->id == index) {
2007                         hdev = hci_dev_hold(d);
2008                         break;
2009                 }
2010         }
2011         read_unlock(&hci_dev_list_lock);
2012         return hdev;
2013 }
2014 
2015 /* ---- Inquiry support ---- */
2016 
2017 bool hci_discovery_active(struct hci_dev *hdev)
2018 {
2019         struct discovery_state *discov = &hdev->discovery;
2020 
2021         switch (discov->state) {
2022         case DISCOVERY_FINDING:
2023         case DISCOVERY_RESOLVING:
2024                 return true;
2025 
2026         default:
2027                 return false;
2028         }
2029 }
2030 
2031 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2032 {
2033         int old_state = hdev->discovery.state;
2034 
2035         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2036 
2037         if (old_state == state)
2038                 return;
2039 
2040         hdev->discovery.state = state;
2041 
2042         switch (state) {
2043         case DISCOVERY_STOPPED:
2044                 hci_update_background_scan(hdev);
2045 
2046                 if (old_state != DISCOVERY_STARTING)
2047                         mgmt_discovering(hdev, 0);
2048                 break;
2049         case DISCOVERY_STARTING:
2050                 break;
2051         case DISCOVERY_FINDING:
2052                 mgmt_discovering(hdev, 1);
2053                 break;
2054         case DISCOVERY_RESOLVING:
2055                 break;
2056         case DISCOVERY_STOPPING:
2057                 break;
2058         }
2059 }
2060 
2061 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2062 {
2063         struct discovery_state *cache = &hdev->discovery;
2064         struct inquiry_entry *p, *n;
2065 
2066         list_for_each_entry_safe(p, n, &cache->all, all) {
2067                 list_del(&p->all);
2068                 kfree(p);
2069         }
2070 
2071         INIT_LIST_HEAD(&cache->unknown);
2072         INIT_LIST_HEAD(&cache->resolve);
2073 }
2074 
2075 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2076                                                bdaddr_t *bdaddr)
2077 {
2078         struct discovery_state *cache = &hdev->discovery;
2079         struct inquiry_entry *e;
2080 
2081         BT_DBG("cache %p, %pMR", cache, bdaddr);
2082 
2083         list_for_each_entry(e, &cache->all, all) {
2084                 if (!bacmp(&e->data.bdaddr, bdaddr))
2085                         return e;
2086         }
2087 
2088         return NULL;
2089 }
2090 
2091 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2092                                                        bdaddr_t *bdaddr)
2093 {
2094         struct discovery_state *cache = &hdev->discovery;
2095         struct inquiry_entry *e;
2096 
2097         BT_DBG("cache %p, %pMR", cache, bdaddr);
2098 
2099         list_for_each_entry(e, &cache->unknown, list) {
2100                 if (!bacmp(&e->data.bdaddr, bdaddr))
2101                         return e;
2102         }
2103 
2104         return NULL;
2105 }
2106 
2107 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2108                                                        bdaddr_t *bdaddr,
2109                                                        int state)
2110 {
2111         struct discovery_state *cache = &hdev->discovery;
2112         struct inquiry_entry *e;
2113 
2114         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2115 
2116         list_for_each_entry(e, &cache->resolve, list) {
2117                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2118                         return e;
2119                 if (!bacmp(&e->data.bdaddr, bdaddr))
2120                         return e;
2121         }
2122 
2123         return NULL;
2124 }
2125 
2126 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2127                                       struct inquiry_entry *ie)
2128 {
2129         struct discovery_state *cache = &hdev->discovery;
2130         struct list_head *pos = &cache->resolve;
2131         struct inquiry_entry *p;
2132 
2133         list_del(&ie->list);
2134 
2135         list_for_each_entry(p, &cache->resolve, list) {
2136                 if (p->name_state != NAME_PENDING &&
2137                     abs(p->data.rssi) >= abs(ie->data.rssi))
2138                         break;
2139                 pos = &p->list;
2140         }
2141 
2142         list_add(&ie->list, pos);
2143 }
2144 
2145 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2146                              bool name_known)
2147 {
2148         struct discovery_state *cache = &hdev->discovery;
2149         struct inquiry_entry *ie;
2150         u32 flags = 0;
2151 
2152         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2153 
2154         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2155 
2156         if (!data->ssp_mode)
2157                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2158 
2159         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2160         if (ie) {
2161                 if (!ie->data.ssp_mode)
2162                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2163 
2164                 if (ie->name_state == NAME_NEEDED &&
2165                     data->rssi != ie->data.rssi) {
2166                         ie->data.rssi = data->rssi;
2167                         hci_inquiry_cache_update_resolve(hdev, ie);
2168                 }
2169 
2170                 goto update;
2171         }
2172 
2173         /* Entry not in the cache. Add new one. */
2174         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2175         if (!ie) {
2176                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2177                 goto done;
2178         }
2179 
2180         list_add(&ie->all, &cache->all);
2181 
2182         if (name_known) {
2183                 ie->name_state = NAME_KNOWN;
2184         } else {
2185                 ie->name_state = NAME_NOT_KNOWN;
2186                 list_add(&ie->list, &cache->unknown);
2187         }
2188 
2189 update:
2190         if (name_known && ie->name_state != NAME_KNOWN &&
2191             ie->name_state != NAME_PENDING) {
2192                 ie->name_state = NAME_KNOWN;
2193                 list_del(&ie->list);
2194         }
2195 
2196         memcpy(&ie->data, data, sizeof(*data));
2197         ie->timestamp = jiffies;
2198         cache->timestamp = jiffies;
2199 
2200         if (ie->name_state == NAME_NOT_KNOWN)
2201                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2202 
2203 done:
2204         return flags;
2205 }
2206 
2207 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2208 {
2209         struct discovery_state *cache = &hdev->discovery;
2210         struct inquiry_info *info = (struct inquiry_info *) buf;
2211         struct inquiry_entry *e;
2212         int copied = 0;
2213 
2214         list_for_each_entry(e, &cache->all, all) {
2215                 struct inquiry_data *data = &e->data;
2216 
2217                 if (copied >= num)
2218                         break;
2219 
2220                 bacpy(&info->bdaddr, &data->bdaddr);
2221                 info->pscan_rep_mode    = data->pscan_rep_mode;
2222                 info->pscan_period_mode = data->pscan_period_mode;
2223                 info->pscan_mode        = data->pscan_mode;
2224                 memcpy(info->dev_class, data->dev_class, 3);
2225                 info->clock_offset      = data->clock_offset;
2226 
2227                 info++;
2228                 copied++;
2229         }
2230 
2231         BT_DBG("cache %p, copied %d", cache, copied);
2232         return copied;
2233 }
2234 
2235 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2236 {
2237         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2238         struct hci_dev *hdev = req->hdev;
2239         struct hci_cp_inquiry cp;
2240 
2241         BT_DBG("%s", hdev->name);
2242 
2243         if (test_bit(HCI_INQUIRY, &hdev->flags))
2244                 return;
2245 
2246         /* Start Inquiry */
2247         memcpy(&cp.lap, &ir->lap, 3);
2248         cp.length  = ir->length;
2249         cp.num_rsp = ir->num_rsp;
2250         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2251 }
2252 
2253 int hci_inquiry(void __user *arg)
2254 {
2255         __u8 __user *ptr = arg;
2256         struct hci_inquiry_req ir;
2257         struct hci_dev *hdev;
2258         int err = 0, do_inquiry = 0, max_rsp;
2259         long timeo;
2260         __u8 *buf;
2261 
2262         if (copy_from_user(&ir, ptr, sizeof(ir)))
2263                 return -EFAULT;
2264 
2265         hdev = hci_dev_get(ir.dev_id);
2266         if (!hdev)
2267                 return -ENODEV;
2268 
2269         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2270                 err = -EBUSY;
2271                 goto done;
2272         }
2273 
2274         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2275                 err = -EOPNOTSUPP;
2276                 goto done;
2277         }
2278 
2279         if (hdev->dev_type != HCI_BREDR) {
2280                 err = -EOPNOTSUPP;
2281                 goto done;
2282         }
2283 
2284         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2285                 err = -EOPNOTSUPP;
2286                 goto done;
2287         }
2288 
2289         hci_dev_lock(hdev);
2290         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2291             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2292                 hci_inquiry_cache_flush(hdev);
2293                 do_inquiry = 1;
2294         }
2295         hci_dev_unlock(hdev);
2296 
2297         timeo = ir.length * msecs_to_jiffies(2000);
2298 
2299         if (do_inquiry) {
2300                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2301                                    timeo);
2302                 if (err < 0)
2303                         goto done;
2304 
2305                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2306                  * cleared). If it is interrupted by a signal, return -EINTR.
2307                  */
2308                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2309                                 TASK_INTERRUPTIBLE))
2310                         return -EINTR;
2311         }
2312 
2313         /* for unlimited number of responses we will use buffer with
2314          * 255 entries
2315          */
2316         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2317 
2318         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2319          * copy it to the user space.
2320          */
2321         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2322         if (!buf) {
2323                 err = -ENOMEM;
2324                 goto done;
2325         }
2326 
2327         hci_dev_lock(hdev);
2328         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2329         hci_dev_unlock(hdev);
2330 
2331         BT_DBG("num_rsp %d", ir.num_rsp);
2332 
2333         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2334                 ptr += sizeof(ir);
2335                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2336                                  ir.num_rsp))
2337                         err = -EFAULT;
2338         } else
2339                 err = -EFAULT;
2340 
2341         kfree(buf);
2342 
2343 done:
2344         hci_dev_put(hdev);
2345         return err;
2346 }
2347 
2348 static int hci_dev_do_open(struct hci_dev *hdev)
2349 {
2350         int ret = 0;
2351 
2352         BT_DBG("%s %p", hdev->name, hdev);
2353 
2354         hci_req_lock(hdev);
2355 
2356         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2357                 ret = -ENODEV;
2358                 goto done;
2359         }
2360 
2361         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2362             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2363                 /* Check for rfkill but allow the HCI setup stage to
2364                  * proceed (which in itself doesn't cause any RF activity).
2365                  */
2366                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2367                         ret = -ERFKILL;
2368                         goto done;
2369                 }
2370 
2371                 /* Check for valid public address or a configured static
2372                  * random adddress, but let the HCI setup proceed to
2373                  * be able to determine if there is a public address
2374                  * or not.
2375                  *
2376                  * In case of user channel usage, it is not important
2377                  * if a public address or static random address is
2378                  * available.
2379                  *
2380                  * This check is only valid for BR/EDR controllers
2381                  * since AMP controllers do not have an address.
2382                  */
2383                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2384                     hdev->dev_type == HCI_BREDR &&
2385                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2386                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2387                         ret = -EADDRNOTAVAIL;
2388                         goto done;
2389                 }
2390         }
2391 
2392         if (test_bit(HCI_UP, &hdev->flags)) {
2393                 ret = -EALREADY;
2394                 goto done;
2395         }
2396 
2397         if (hdev->open(hdev)) {
2398                 ret = -EIO;
2399                 goto done;
2400         }
2401 
2402         atomic_set(&hdev->cmd_cnt, 1);
2403         set_bit(HCI_INIT, &hdev->flags);
2404 
2405         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2406                 if (hdev->setup)
2407                         ret = hdev->setup(hdev);
2408 
2409                 /* The transport driver can set these quirks before
2410                  * creating the HCI device or in its setup callback.
2411                  *
2412                  * In case any of them is set, the controller has to
2413                  * start up as unconfigured.
2414                  */
2415                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2416                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2417                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2418 
2419                 /* For an unconfigured controller it is required to
2420                  * read at least the version information provided by
2421                  * the Read Local Version Information command.
2422                  *
2423                  * If the set_bdaddr driver callback is provided, then
2424                  * also the original Bluetooth public device address
2425                  * will be read using the Read BD Address command.
2426                  */
2427                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2428                         ret = __hci_unconf_init(hdev);
2429         }
2430 
2431         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2432                 /* If public address change is configured, ensure that
2433                  * the address gets programmed. If the driver does not
2434                  * support changing the public address, fail the power
2435                  * on procedure.
2436                  */
2437                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2438                     hdev->set_bdaddr)
2439                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2440                 else
2441                         ret = -EADDRNOTAVAIL;
2442         }
2443 
2444         if (!ret) {
2445                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2446                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2447                         ret = __hci_init(hdev);
2448         }
2449 
2450         clear_bit(HCI_INIT, &hdev->flags);
2451 
2452         if (!ret) {
2453                 hci_dev_hold(hdev);
2454                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2455                 set_bit(HCI_UP, &hdev->flags);
2456                 hci_notify(hdev, HCI_DEV_UP);
2457                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2458                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2459                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2460                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2461                     hdev->dev_type == HCI_BREDR) {
2462                         hci_dev_lock(hdev);
2463                         mgmt_powered(hdev, 1);
2464                         hci_dev_unlock(hdev);
2465                 }
2466         } else {
2467                 /* Init failed, cleanup */
2468                 flush_work(&hdev->tx_work);
2469                 flush_work(&hdev->cmd_work);
2470                 flush_work(&hdev->rx_work);
2471 
2472                 skb_queue_purge(&hdev->cmd_q);
2473                 skb_queue_purge(&hdev->rx_q);
2474 
2475                 if (hdev->flush)
2476                         hdev->flush(hdev);
2477 
2478                 if (hdev->sent_cmd) {
2479                         kfree_skb(hdev->sent_cmd);
2480                         hdev->sent_cmd = NULL;
2481                 }
2482 
2483                 hdev->close(hdev);
2484                 hdev->flags &= BIT(HCI_RAW);
2485         }
2486 
2487 done:
2488         hci_req_unlock(hdev);
2489         return ret;
2490 }
2491 
2492 /* ---- HCI ioctl helpers ---- */
2493 
2494 int hci_dev_open(__u16 dev)
2495 {
2496         struct hci_dev *hdev;
2497         int err;
2498 
2499         hdev = hci_dev_get(dev);
2500         if (!hdev)
2501                 return -ENODEV;
2502 
2503         /* Devices that are marked as unconfigured can only be powered
2504          * up as user channel. Trying to bring them up as normal devices
2505          * will result into a failure. Only user channel operation is
2506          * possible.
2507          *
2508          * When this function is called for a user channel, the flag
2509          * HCI_USER_CHANNEL will be set first before attempting to
2510          * open the device.
2511          */
2512         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2513             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2514                 err = -EOPNOTSUPP;
2515                 goto done;
2516         }
2517 
2518         /* We need to ensure that no other power on/off work is pending
2519          * before proceeding to call hci_dev_do_open. This is
2520          * particularly important if the setup procedure has not yet
2521          * completed.
2522          */
2523         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2524                 cancel_delayed_work(&hdev->power_off);
2525 
2526         /* After this call it is guaranteed that the setup procedure
2527          * has finished. This means that error conditions like RFKILL
2528          * or no valid public or static random address apply.
2529          */
2530         flush_workqueue(hdev->req_workqueue);
2531 
2532         /* For controllers not using the management interface and that
2533          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2534          * so that pairing works for them. Once the management interface
2535          * is in use this bit will be cleared again and userspace has
2536          * to explicitly enable it.
2537          */
2538         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2539             !test_bit(HCI_MGMT, &hdev->dev_flags))
2540                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2541 
2542         err = hci_dev_do_open(hdev);
2543 
2544 done:
2545         hci_dev_put(hdev);
2546         return err;
2547 }
2548 
2549 /* This function requires the caller holds hdev->lock */
2550 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2551 {
2552         struct hci_conn_params *p;
2553 
2554         list_for_each_entry(p, &hdev->le_conn_params, list) {
2555                 if (p->conn) {
2556                         hci_conn_drop(p->conn);
2557                         hci_conn_put(p->conn);
2558                         p->conn = NULL;
2559                 }
2560                 list_del_init(&p->action);
2561         }
2562 
2563         BT_DBG("All LE pending actions cleared");
2564 }
2565 
2566 static int hci_dev_do_close(struct hci_dev *hdev)
2567 {
2568         BT_DBG("%s %p", hdev->name, hdev);
2569 
2570         cancel_delayed_work(&hdev->power_off);
2571 
2572         hci_req_cancel(hdev, ENODEV);
2573         hci_req_lock(hdev);
2574 
2575         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2576                 cancel_delayed_work_sync(&hdev->cmd_timer);
2577                 hci_req_unlock(hdev);
2578                 return 0;
2579         }
2580 
2581         /* Flush RX and TX works */
2582         flush_work(&hdev->tx_work);
2583         flush_work(&hdev->rx_work);
2584 
2585         if (hdev->discov_timeout > 0) {
2586                 cancel_delayed_work(&hdev->discov_off);
2587                 hdev->discov_timeout = 0;
2588                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2589                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2590         }
2591 
2592         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2593                 cancel_delayed_work(&hdev->service_cache);
2594 
2595         cancel_delayed_work_sync(&hdev->le_scan_disable);
2596 
2597         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2598                 cancel_delayed_work_sync(&hdev->rpa_expired);
2599 
2600         hci_dev_lock(hdev);
2601         hci_inquiry_cache_flush(hdev);
2602         hci_pend_le_actions_clear(hdev);
2603         hci_conn_hash_flush(hdev);
2604         hci_dev_unlock(hdev);
2605 
2606         hci_notify(hdev, HCI_DEV_DOWN);
2607 
2608         if (hdev->flush)
2609                 hdev->flush(hdev);
2610 
2611         /* Reset device */
2612         skb_queue_purge(&hdev->cmd_q);
2613         atomic_set(&hdev->cmd_cnt, 1);
2614         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2615             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2616             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2617                 set_bit(HCI_INIT, &hdev->flags);
2618                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2619                 clear_bit(HCI_INIT, &hdev->flags);
2620         }
2621 
2622         /* flush cmd  work */
2623         flush_work(&hdev->cmd_work);
2624 
2625         /* Drop queues */
2626         skb_queue_purge(&hdev->rx_q);
2627         skb_queue_purge(&hdev->cmd_q);
2628         skb_queue_purge(&hdev->raw_q);
2629 
2630         /* Drop last sent command */
2631         if (hdev->sent_cmd) {
2632                 cancel_delayed_work_sync(&hdev->cmd_timer);
2633                 kfree_skb(hdev->sent_cmd);
2634                 hdev->sent_cmd = NULL;
2635         }
2636 
2637         kfree_skb(hdev->recv_evt);
2638         hdev->recv_evt = NULL;
2639 
2640         /* After this point our queues are empty
2641          * and no tasks are scheduled. */
2642         hdev->close(hdev);
2643 
2644         /* Clear flags */
2645         hdev->flags &= BIT(HCI_RAW);
2646         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2647 
2648         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2649                 if (hdev->dev_type == HCI_BREDR) {
2650                         hci_dev_lock(hdev);
2651                         mgmt_powered(hdev, 0);
2652                         hci_dev_unlock(hdev);
2653                 }
2654         }
2655 
2656         /* Controller radio is available but is currently powered down */
2657         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2658 
2659         memset(hdev->eir, 0, sizeof(hdev->eir));
2660         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2661         bacpy(&hdev->random_addr, BDADDR_ANY);
2662 
2663         hci_req_unlock(hdev);
2664 
2665         hci_dev_put(hdev);
2666         return 0;
2667 }
2668 
2669 int hci_dev_close(__u16 dev)
2670 {
2671         struct hci_dev *hdev;
2672         int err;
2673 
2674         hdev = hci_dev_get(dev);
2675         if (!hdev)
2676                 return -ENODEV;
2677 
2678         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2679                 err = -EBUSY;
2680                 goto done;
2681         }
2682 
2683         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2684                 cancel_delayed_work(&hdev->power_off);
2685 
2686         err = hci_dev_do_close(hdev);
2687 
2688 done:
2689         hci_dev_put(hdev);
2690         return err;
2691 }
2692 
2693 int hci_dev_reset(__u16 dev)
2694 {
2695         struct hci_dev *hdev;
2696         int ret = 0;
2697 
2698         hdev = hci_dev_get(dev);
2699         if (!hdev)
2700                 return -ENODEV;
2701 
2702         hci_req_lock(hdev);
2703 
2704         if (!test_bit(HCI_UP, &hdev->flags)) {
2705                 ret = -ENETDOWN;
2706                 goto done;
2707         }
2708 
2709         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2710                 ret = -EBUSY;
2711                 goto done;
2712         }
2713 
2714         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2715                 ret = -EOPNOTSUPP;
2716                 goto done;
2717         }
2718 
2719         /* Drop queues */
2720         skb_queue_purge(&hdev->rx_q);
2721         skb_queue_purge(&hdev->cmd_q);
2722 
2723         hci_dev_lock(hdev);
2724         hci_inquiry_cache_flush(hdev);
2725         hci_conn_hash_flush(hdev);
2726         hci_dev_unlock(hdev);
2727 
2728         if (hdev->flush)
2729                 hdev->flush(hdev);
2730 
2731         atomic_set(&hdev->cmd_cnt, 1);
2732         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2733 
2734         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2735 
2736 done:
2737         hci_req_unlock(hdev);
2738         hci_dev_put(hdev);
2739         return ret;
2740 }
2741 
2742 int hci_dev_reset_stat(__u16 dev)
2743 {
2744         struct hci_dev *hdev;
2745         int ret = 0;
2746 
2747         hdev = hci_dev_get(dev);
2748         if (!hdev)
2749                 return -ENODEV;
2750 
2751         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2752                 ret = -EBUSY;
2753                 goto done;
2754         }
2755 
2756         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2757                 ret = -EOPNOTSUPP;
2758                 goto done;
2759         }
2760 
2761         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2762 
2763 done:
2764         hci_dev_put(hdev);
2765         return ret;
2766 }
2767 
2768 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2769 {
2770         bool conn_changed, discov_changed;
2771 
2772         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2773 
2774         if ((scan & SCAN_PAGE))
2775                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2776                                                  &hdev->dev_flags);
2777         else
2778                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2779                                                   &hdev->dev_flags);
2780 
2781         if ((scan & SCAN_INQUIRY)) {
2782                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2783                                                    &hdev->dev_flags);
2784         } else {
2785                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2786                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2787                                                     &hdev->dev_flags);
2788         }
2789 
2790         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2791                 return;
2792 
2793         if (conn_changed || discov_changed) {
2794                 /* In case this was disabled through mgmt */
2795                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2796 
2797                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2798                         mgmt_update_adv_data(hdev);
2799 
2800                 mgmt_new_settings(hdev);
2801         }
2802 }
2803 
2804 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2805 {
2806         struct hci_dev *hdev;
2807         struct hci_dev_req dr;
2808         int err = 0;
2809 
2810         if (copy_from_user(&dr, arg, sizeof(dr)))
2811                 return -EFAULT;
2812 
2813         hdev = hci_dev_get(dr.dev_id);
2814         if (!hdev)
2815                 return -ENODEV;
2816 
2817         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2818                 err = -EBUSY;
2819                 goto done;
2820         }
2821 
2822         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2823                 err = -EOPNOTSUPP;
2824                 goto done;
2825         }
2826 
2827         if (hdev->dev_type != HCI_BREDR) {
2828                 err = -EOPNOTSUPP;
2829                 goto done;
2830         }
2831 
2832         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2833                 err = -EOPNOTSUPP;
2834                 goto done;
2835         }
2836 
2837         switch (cmd) {
2838         case HCISETAUTH:
2839                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2840                                    HCI_INIT_TIMEOUT);
2841                 break;
2842 
2843         case HCISETENCRYPT:
2844                 if (!lmp_encrypt_capable(hdev)) {
2845                         err = -EOPNOTSUPP;
2846                         break;
2847                 }
2848 
2849                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2850                         /* Auth must be enabled first */
2851                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2852                                            HCI_INIT_TIMEOUT);
2853                         if (err)
2854                                 break;
2855                 }
2856 
2857                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2858                                    HCI_INIT_TIMEOUT);
2859                 break;
2860 
2861         case HCISETSCAN:
2862                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2863                                    HCI_INIT_TIMEOUT);
2864 
2865                 /* Ensure that the connectable and discoverable states
2866                  * get correctly modified as this was a non-mgmt change.
2867                  */
2868                 if (!err)
2869                         hci_update_scan_state(hdev, dr.dev_opt);
2870                 break;
2871 
2872         case HCISETLINKPOL:
2873                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2874                                    HCI_INIT_TIMEOUT);
2875                 break;
2876 
2877         case HCISETLINKMODE:
2878                 hdev->link_mode = ((__u16) dr.dev_opt) &
2879                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2880                 break;
2881 
2882         case HCISETPTYPE:
2883                 hdev->pkt_type = (__u16) dr.dev_opt;
2884                 break;
2885 
2886         case HCISETACLMTU:
2887                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2888                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2889                 break;
2890 
2891         case HCISETSCOMTU:
2892                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2893                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2894                 break;
2895 
2896         default:
2897                 err = -EINVAL;
2898                 break;
2899         }
2900 
2901 done:
2902         hci_dev_put(hdev);
2903         return err;
2904 }
2905 
2906 int hci_get_dev_list(void __user *arg)
2907 {
2908         struct hci_dev *hdev;
2909         struct hci_dev_list_req *dl;
2910         struct hci_dev_req *dr;
2911         int n = 0, size, err;
2912         __u16 dev_num;
2913 
2914         if (get_user(dev_num, (__u16 __user *) arg))
2915                 return -EFAULT;
2916 
2917         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2918                 return -EINVAL;
2919 
2920         size = sizeof(*dl) + dev_num * sizeof(*dr);
2921 
2922         dl = kzalloc(size, GFP_KERNEL);
2923         if (!dl)
2924                 return -ENOMEM;
2925 
2926         dr = dl->dev_req;
2927 
2928         read_lock(&hci_dev_list_lock);
2929         list_for_each_entry(hdev, &hci_dev_list, list) {
2930                 unsigned long flags = hdev->flags;
2931 
2932                 /* When the auto-off is configured it means the transport
2933                  * is running, but in that case still indicate that the
2934                  * device is actually down.
2935                  */
2936                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2937                         flags &= ~BIT(HCI_UP);
2938 
2939                 (dr + n)->dev_id  = hdev->id;
2940                 (dr + n)->dev_opt = flags;
2941 
2942                 if (++n >= dev_num)
2943                         break;
2944         }
2945         read_unlock(&hci_dev_list_lock);
2946 
2947         dl->dev_num = n;
2948         size = sizeof(*dl) + n * sizeof(*dr);
2949 
2950         err = copy_to_user(arg, dl, size);
2951         kfree(dl);
2952 
2953         return err ? -EFAULT : 0;
2954 }
2955 
2956 int hci_get_dev_info(void __user *arg)
2957 {
2958         struct hci_dev *hdev;
2959         struct hci_dev_info di;
2960         unsigned long flags;
2961         int err = 0;
2962 
2963         if (copy_from_user(&di, arg, sizeof(di)))
2964                 return -EFAULT;
2965 
2966         hdev = hci_dev_get(di.dev_id);
2967         if (!hdev)
2968                 return -ENODEV;
2969 
2970         /* When the auto-off is configured it means the transport
2971          * is running, but in that case still indicate that the
2972          * device is actually down.
2973          */
2974         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2975                 flags = hdev->flags & ~BIT(HCI_UP);
2976         else
2977                 flags = hdev->flags;
2978 
2979         strcpy(di.name, hdev->name);
2980         di.bdaddr   = hdev->bdaddr;
2981         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2982         di.flags    = flags;
2983         di.pkt_type = hdev->pkt_type;
2984         if (lmp_bredr_capable(hdev)) {
2985                 di.acl_mtu  = hdev->acl_mtu;
2986                 di.acl_pkts = hdev->acl_pkts;
2987                 di.sco_mtu  = hdev->sco_mtu;
2988                 di.sco_pkts = hdev->sco_pkts;
2989         } else {
2990                 di.acl_mtu  = hdev->le_mtu;
2991                 di.acl_pkts = hdev->le_pkts;
2992                 di.sco_mtu  = 0;
2993                 di.sco_pkts = 0;
2994         }
2995         di.link_policy = hdev->link_policy;
2996         di.link_mode   = hdev->link_mode;
2997 
2998         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2999         memcpy(&di.features, &hdev->features, sizeof(di.features));
3000 
3001         if (copy_to_user(arg, &di, sizeof(di)))
3002                 err = -EFAULT;
3003 
3004         hci_dev_put(hdev);
3005 
3006         return err;
3007 }
3008 
3009 /* ---- Interface to HCI drivers ---- */
3010 
3011 static int hci_rfkill_set_block(void *data, bool blocked)
3012 {
3013         struct hci_dev *hdev = data;
3014 
3015         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3016 
3017         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3018                 return -EBUSY;
3019 
3020         if (blocked) {
3021                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3022                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3023                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3024                         hci_dev_do_close(hdev);
3025         } else {
3026                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3027         }
3028 
3029         return 0;
3030 }
3031 
3032 static const struct rfkill_ops hci_rfkill_ops = {
3033         .set_block = hci_rfkill_set_block,
3034 };
3035 
3036 static void hci_power_on(struct work_struct *work)
3037 {
3038         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3039         int err;
3040 
3041         BT_DBG("%s", hdev->name);
3042 
3043         err = hci_dev_do_open(hdev);
3044         if (err < 0) {
3045                 mgmt_set_powered_failed(hdev, err);
3046                 return;
3047         }
3048 
3049         /* During the HCI setup phase, a few error conditions are
3050          * ignored and they need to be checked now. If they are still
3051          * valid, it is important to turn the device back off.
3052          */
3053         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3054             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3055             (hdev->dev_type == HCI_BREDR &&
3056              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3057              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3058                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3059                 hci_dev_do_close(hdev);
3060         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3061                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3062                                    HCI_AUTO_OFF_TIMEOUT);
3063         }
3064 
3065         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3066                 /* For unconfigured devices, set the HCI_RAW flag
3067                  * so that userspace can easily identify them.
3068                  */
3069                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3070                         set_bit(HCI_RAW, &hdev->flags);
3071 
3072                 /* For fully configured devices, this will send
3073                  * the Index Added event. For unconfigured devices,
3074                  * it will send Unconfigued Index Added event.
3075                  *
3076                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3077                  * and no event will be send.
3078                  */
3079                 mgmt_index_added(hdev);
3080         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3081                 /* When the controller is now configured, then it
3082                  * is important to clear the HCI_RAW flag.
3083                  */
3084                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3085                         clear_bit(HCI_RAW, &hdev->flags);
3086 
3087                 /* Powering on the controller with HCI_CONFIG set only
3088                  * happens with the transition from unconfigured to
3089                  * configured. This will send the Index Added event.
3090                  */
3091                 mgmt_index_added(hdev);
3092         }
3093 }
3094 
3095 static void hci_power_off(struct work_struct *work)
3096 {
3097         struct hci_dev *hdev = container_of(work, struct hci_dev,
3098                                             power_off.work);
3099 
3100         BT_DBG("%s", hdev->name);
3101 
3102         hci_dev_do_close(hdev);
3103 }
3104 
3105 static void hci_discov_off(struct work_struct *work)
3106 {
3107         struct hci_dev *hdev;
3108 
3109         hdev = container_of(work, struct hci_dev, discov_off.work);
3110 
3111         BT_DBG("%s", hdev->name);
3112 
3113         mgmt_discoverable_timeout(hdev);
3114 }
3115 
3116 void hci_uuids_clear(struct hci_dev *hdev)
3117 {
3118         struct bt_uuid *uuid, *tmp;
3119 
3120         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3121                 list_del(&uuid->list);
3122                 kfree(uuid);
3123         }
3124 }
3125 
3126 void hci_link_keys_clear(struct hci_dev *hdev)
3127 {
3128         struct list_head *p, *n;
3129 
3130         list_for_each_safe(p, n, &hdev->link_keys) {
3131                 struct link_key *key;
3132 
3133                 key = list_entry(p, struct link_key, list);
3134 
3135                 list_del(p);
3136                 kfree(key);
3137         }
3138 }
3139 
3140 void hci_smp_ltks_clear(struct hci_dev *hdev)
3141 {
3142         struct smp_ltk *k, *tmp;
3143 
3144         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3145                 list_del(&k->list);
3146                 kfree(k);
3147         }
3148 }
3149 
3150 void hci_smp_irks_clear(struct hci_dev *hdev)
3151 {
3152         struct smp_irk *k, *tmp;
3153 
3154         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3155                 list_del(&k->list);
3156                 kfree(k);
3157         }
3158 }
3159 
3160 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3161 {
3162         struct link_key *k;
3163 
3164         list_for_each_entry(k, &hdev->link_keys, list)
3165                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3166                         return k;
3167 
3168         return NULL;
3169 }
3170 
3171 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3172                                u8 key_type, u8 old_key_type)
3173 {
3174         /* Legacy key */
3175         if (key_type < 0x03)
3176                 return true;
3177 
3178         /* Debug keys are insecure so don't store them persistently */
3179         if (key_type == HCI_LK_DEBUG_COMBINATION)
3180                 return false;
3181 
3182         /* Changed combination key and there's no previous one */
3183         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3184                 return false;
3185 
3186         /* Security mode 3 case */
3187         if (!conn)
3188                 return true;
3189 
3190         /* Neither local nor remote side had no-bonding as requirement */
3191         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3192                 return true;
3193 
3194         /* Local side had dedicated bonding as requirement */
3195         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3196                 return true;
3197 
3198         /* Remote side had dedicated bonding as requirement */
3199         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3200                 return true;
3201 
3202         /* If none of the above criteria match, then don't store the key
3203          * persistently */
3204         return false;
3205 }
3206 
3207 static u8 ltk_role(u8 type)
3208 {
3209         if (type == SMP_LTK)
3210                 return HCI_ROLE_MASTER;
3211 
3212         return HCI_ROLE_SLAVE;
3213 }
3214 
3215 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3216                              u8 role)
3217 {
3218         struct smp_ltk *k;
3219 
3220         list_for_each_entry(k, &hdev->long_term_keys, list) {
3221                 if (k->ediv != ediv || k->rand != rand)
3222                         continue;
3223 
3224                 if (ltk_role(k->type) != role)
3225                         continue;
3226 
3227                 return k;
3228         }
3229 
3230         return NULL;
3231 }
3232 
3233 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3234                                      u8 addr_type, u8 role)
3235 {
3236         struct smp_ltk *k;
3237 
3238         list_for_each_entry(k, &hdev->long_term_keys, list)
3239                 if (addr_type == k->bdaddr_type &&
3240                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3241                     ltk_role(k->type) == role)
3242                         return k;
3243 
3244         return NULL;
3245 }
3246 
3247 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3248 {
3249         struct smp_irk *irk;
3250 
3251         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3252                 if (!bacmp(&irk->rpa, rpa))
3253                         return irk;
3254         }
3255 
3256         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3257                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3258                         bacpy(&irk->rpa, rpa);
3259                         return irk;
3260                 }
3261         }
3262 
3263         return NULL;
3264 }
3265 
3266 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3267                                      u8 addr_type)
3268 {
3269         struct smp_irk *irk;
3270 
3271         /* Identity Address must be public or static random */
3272         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3273                 return NULL;
3274 
3275         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3276                 if (addr_type == irk->addr_type &&
3277                     bacmp(bdaddr, &irk->bdaddr) == 0)
3278                         return irk;
3279         }
3280 
3281         return NULL;
3282 }
3283 
3284 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3285                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3286                                   u8 pin_len, bool *persistent)
3287 {
3288         struct link_key *key, *old_key;
3289         u8 old_key_type;
3290 
3291         old_key = hci_find_link_key(hdev, bdaddr);
3292         if (old_key) {
3293                 old_key_type = old_key->type;
3294                 key = old_key;
3295         } else {
3296                 old_key_type = conn ? conn->key_type : 0xff;
3297                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3298                 if (!key)
3299                         return NULL;
3300                 list_add(&key->list, &hdev->link_keys);
3301         }
3302 
3303         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3304 
3305         /* Some buggy controller combinations generate a changed
3306          * combination key for legacy pairing even when there's no
3307          * previous key */
3308         if (type == HCI_LK_CHANGED_COMBINATION &&
3309             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3310                 type = HCI_LK_COMBINATION;
3311                 if (conn)
3312                         conn->key_type = type;
3313         }
3314 
3315         bacpy(&key->bdaddr, bdaddr);
3316         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3317         key->pin_len = pin_len;
3318 
3319         if (type == HCI_LK_CHANGED_COMBINATION)
3320                 key->type = old_key_type;
3321         else
3322                 key->type = type;
3323 
3324         if (persistent)
3325                 *persistent = hci_persistent_key(hdev, conn, type,
3326                                                  old_key_type);
3327 
3328         return key;
3329 }
3330 
3331 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3332                             u8 addr_type, u8 type, u8 authenticated,
3333                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3334 {
3335         struct smp_ltk *key, *old_key;
3336         u8 role = ltk_role(type);
3337 
3338         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3339         if (old_key)
3340                 key = old_key;
3341         else {
3342                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3343                 if (!key)
3344                         return NULL;
3345                 list_add(&key->list, &hdev->long_term_keys);
3346         }
3347 
3348         bacpy(&key->bdaddr, bdaddr);
3349         key->bdaddr_type = addr_type;
3350         memcpy(key->val, tk, sizeof(key->val));
3351         key->authenticated = authenticated;
3352         key->ediv = ediv;
3353         key->rand = rand;
3354         key->enc_size = enc_size;
3355         key->type = type;
3356 
3357         return key;
3358 }
3359 
3360 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3361                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3362 {
3363         struct smp_irk *irk;
3364 
3365         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3366         if (!irk) {
3367                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3368                 if (!irk)
3369                         return NULL;
3370 
3371                 bacpy(&irk->bdaddr, bdaddr);
3372                 irk->addr_type = addr_type;
3373 
3374                 list_add(&irk->list, &hdev->identity_resolving_keys);
3375         }
3376 
3377         memcpy(irk->val, val, 16);
3378         bacpy(&irk->rpa, rpa);
3379 
3380         return irk;
3381 }
3382 
3383 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3384 {
3385         struct link_key *key;
3386 
3387         key = hci_find_link_key(hdev, bdaddr);
3388         if (!key)
3389                 return -ENOENT;
3390 
3391         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3392 
3393         list_del(&key->list);
3394         kfree(key);
3395 
3396         return 0;
3397 }
3398 
3399 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3400 {
3401         struct smp_ltk *k, *tmp;
3402         int removed = 0;
3403 
3404         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3405                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3406                         continue;
3407 
3408                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3409 
3410                 list_del(&k->list);
3411                 kfree(k);
3412                 removed++;
3413         }
3414 
3415         return removed ? 0 : -ENOENT;
3416 }
3417 
3418 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3419 {
3420         struct smp_irk *k, *tmp;
3421 
3422         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3423                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3424                         continue;
3425 
3426                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3427 
3428                 list_del(&k->list);
3429                 kfree(k);
3430         }
3431 }
3432 
3433 /* HCI command timer function */
3434 static void hci_cmd_timeout(struct work_struct *work)
3435 {
3436         struct hci_dev *hdev = container_of(work, struct hci_dev,
3437                                             cmd_timer.work);
3438 
3439         if (hdev->sent_cmd) {
3440                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3441                 u16 opcode = __le16_to_cpu(sent->opcode);
3442 
3443                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3444         } else {
3445                 BT_ERR("%s command tx timeout", hdev->name);
3446         }
3447 
3448         atomic_set(&hdev->cmd_cnt, 1);
3449         queue_work(hdev->workqueue, &hdev->cmd_work);
3450 }
3451 
3452 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3453                                           bdaddr_t *bdaddr)
3454 {
3455         struct oob_data *data;
3456 
3457         list_for_each_entry(data, &hdev->remote_oob_data, list)
3458                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3459                         return data;
3460 
3461         return NULL;
3462 }
3463 
3464 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3465 {
3466         struct oob_data *data;
3467 
3468         data = hci_find_remote_oob_data(hdev, bdaddr);
3469         if (!data)
3470                 return -ENOENT;
3471 
3472         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3473 
3474         list_del(&data->list);
3475         kfree(data);
3476 
3477         return 0;
3478 }
3479 
3480 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3481 {
3482         struct oob_data *data, *n;
3483 
3484         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3485                 list_del(&data->list);
3486                 kfree(data);
3487         }
3488 }
3489 
3490 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3491                             u8 *hash, u8 *randomizer)
3492 {
3493         struct oob_data *data;
3494 
3495         data = hci_find_remote_oob_data(hdev, bdaddr);
3496         if (!data) {
3497                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3498                 if (!data)
3499                         return -ENOMEM;
3500 
3501                 bacpy(&data->bdaddr, bdaddr);
3502                 list_add(&data->list, &hdev->remote_oob_data);
3503         }
3504 
3505         memcpy(data->hash192, hash, sizeof(data->hash192));
3506         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3507 
3508         memset(data->hash256, 0, sizeof(data->hash256));
3509         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3510 
3511         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3512 
3513         return 0;
3514 }
3515 
3516 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3517                                 u8 *hash192, u8 *randomizer192,
3518                                 u8 *hash256, u8 *randomizer256)
3519 {
3520         struct oob_data *data;
3521 
3522         data = hci_find_remote_oob_data(hdev, bdaddr);
3523         if (!data) {
3524                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3525                 if (!data)
3526                         return -ENOMEM;
3527 
3528                 bacpy(&data->bdaddr, bdaddr);
3529                 list_add(&data->list, &hdev->remote_oob_data);
3530         }
3531 
3532         memcpy(data->hash192, hash192, sizeof(data->hash192));
3533         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3534 
3535         memcpy(data->hash256, hash256, sizeof(data->hash256));
3536         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3537 
3538         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3539 
3540         return 0;
3541 }
3542 
3543 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3544                                          bdaddr_t *bdaddr, u8 type)
3545 {
3546         struct bdaddr_list *b;
3547 
3548         list_for_each_entry(b, bdaddr_list, list) {
3549                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3550                         return b;
3551         }
3552 
3553         return NULL;
3554 }
3555 
3556 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3557 {
3558         struct list_head *p, *n;
3559 
3560         list_for_each_safe(p, n, bdaddr_list) {
3561                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3562 
3563                 list_del(p);
3564                 kfree(b);
3565         }
3566 }
3567 
3568 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3569 {
3570         struct bdaddr_list *entry;
3571 
3572         if (!bacmp(bdaddr, BDADDR_ANY))
3573                 return -EBADF;
3574 
3575         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3576                 return -EEXIST;
3577 
3578         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3579         if (!entry)
3580                 return -ENOMEM;
3581 
3582         bacpy(&entry->bdaddr, bdaddr);
3583         entry->bdaddr_type = type;
3584 
3585         list_add(&entry->list, list);
3586 
3587         return 0;
3588 }
3589 
3590 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3591 {
3592         struct bdaddr_list *entry;
3593 
3594         if (!bacmp(bdaddr, BDADDR_ANY)) {
3595                 hci_bdaddr_list_clear(list);
3596                 return 0;
3597         }
3598 
3599         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3600         if (!entry)
3601                 return -ENOENT;
3602 
3603         list_del(&entry->list);
3604         kfree(entry);
3605 
3606         return 0;
3607 }
3608 
3609 /* This function requires the caller holds hdev->lock */
3610 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3611                                                bdaddr_t *addr, u8 addr_type)
3612 {
3613         struct hci_conn_params *params;
3614 
3615         /* The conn params list only contains identity addresses */
3616         if (!hci_is_identity_address(addr, addr_type))
3617                 return NULL;
3618 
3619         list_for_each_entry(params, &hdev->le_conn_params, list) {
3620                 if (bacmp(&params->addr, addr) == 0 &&
3621                     params->addr_type == addr_type) {
3622                         return params;
3623                 }
3624         }
3625 
3626         return NULL;
3627 }
3628 
3629 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3630 {
3631         struct hci_conn *conn;
3632 
3633         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3634         if (!conn)
3635                 return false;
3636 
3637         if (conn->dst_type != type)
3638                 return false;
3639 
3640         if (conn->state != BT_CONNECTED)
3641                 return false;
3642 
3643         return true;
3644 }
3645 
3646 /* This function requires the caller holds hdev->lock */
3647 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3648                                                   bdaddr_t *addr, u8 addr_type)
3649 {
3650         struct hci_conn_params *param;
3651 
3652         /* The list only contains identity addresses */
3653         if (!hci_is_identity_address(addr, addr_type))
3654                 return NULL;
3655 
3656         list_for_each_entry(param, list, action) {
3657                 if (bacmp(&param->addr, addr) == 0 &&
3658                     param->addr_type == addr_type)
3659                         return param;
3660         }
3661 
3662         return NULL;
3663 }
3664 
3665 /* This function requires the caller holds hdev->lock */
3666 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3667                                             bdaddr_t *addr, u8 addr_type)
3668 {
3669         struct hci_conn_params *params;
3670 
3671         if (!hci_is_identity_address(addr, addr_type))
3672                 return NULL;
3673 
3674         params = hci_conn_params_lookup(hdev, addr, addr_type);
3675         if (params)
3676                 return params;
3677 
3678         params = kzalloc(sizeof(*params), GFP_KERNEL);
3679         if (!params) {
3680                 BT_ERR("Out of memory");
3681                 return NULL;
3682         }
3683 
3684         bacpy(&params->addr, addr);
3685         params->addr_type = addr_type;
3686 
3687         list_add(&params->list, &hdev->le_conn_params);
3688         INIT_LIST_HEAD(&params->action);
3689 
3690         params->conn_min_interval = hdev->le_conn_min_interval;
3691         params->conn_max_interval = hdev->le_conn_max_interval;
3692         params->conn_latency = hdev->le_conn_latency;
3693         params->supervision_timeout = hdev->le_supv_timeout;
3694         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3695 
3696         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3697 
3698         return params;
3699 }
3700 
3701 /* This function requires the caller holds hdev->lock */
3702 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3703                         u8 auto_connect)
3704 {
3705         struct hci_conn_params *params;
3706 
3707         params = hci_conn_params_add(hdev, addr, addr_type);
3708         if (!params)
3709                 return -EIO;
3710 
3711         if (params->auto_connect == auto_connect)
3712                 return 0;
3713 
3714         list_del_init(&params->action);
3715 
3716         switch (auto_connect) {
3717         case HCI_AUTO_CONN_DISABLED:
3718         case HCI_AUTO_CONN_LINK_LOSS:
3719                 hci_update_background_scan(hdev);
3720                 break;
3721         case HCI_AUTO_CONN_REPORT:
3722                 list_add(&params->action, &hdev->pend_le_reports);
3723                 hci_update_background_scan(hdev);
3724                 break;
3725         case HCI_AUTO_CONN_DIRECT:
3726         case HCI_AUTO_CONN_ALWAYS:
3727                 if (!is_connected(hdev, addr, addr_type)) {
3728                         list_add(&params->action, &hdev->pend_le_conns);
3729                         hci_update_background_scan(hdev);
3730                 }
3731                 break;
3732         }
3733 
3734         params->auto_connect = auto_connect;
3735 
3736         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3737                auto_connect);
3738 
3739         return 0;
3740 }
3741 
3742 static void hci_conn_params_free(struct hci_conn_params *params)
3743 {
3744         if (params->conn) {
3745                 hci_conn_drop(params->conn);
3746                 hci_conn_put(params->conn);
3747         }
3748 
3749         list_del(&params->action);
3750         list_del(&params->list);
3751         kfree(params);
3752 }
3753 
3754 /* This function requires the caller holds hdev->lock */
3755 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3756 {
3757         struct hci_conn_params *params;
3758 
3759         params = hci_conn_params_lookup(hdev, addr, addr_type);
3760         if (!params)
3761                 return;
3762 
3763         hci_conn_params_free(params);
3764 
3765         hci_update_background_scan(hdev);
3766 
3767         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3768 }
3769 
3770 /* This function requires the caller holds hdev->lock */
3771 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3772 {
3773         struct hci_conn_params *params, *tmp;
3774 
3775         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3776                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3777                         continue;
3778                 list_del(&params->list);
3779                 kfree(params);
3780         }
3781 
3782         BT_DBG("All LE disabled connection parameters were removed");
3783 }
3784 
3785 /* This function requires the caller holds hdev->lock */
3786 void hci_conn_params_clear_all(struct hci_dev *hdev)
3787 {
3788         struct hci_conn_params *params, *tmp;
3789 
3790         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3791                 hci_conn_params_free(params);
3792 
3793         hci_update_background_scan(hdev);
3794 
3795         BT_DBG("All LE connection parameters were removed");
3796 }
3797 
3798 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3799 {
3800         if (status) {
3801                 BT_ERR("Failed to start inquiry: status %d", status);
3802 
3803                 hci_dev_lock(hdev);
3804                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805                 hci_dev_unlock(hdev);
3806                 return;
3807         }
3808 }
3809 
3810 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3811 {
3812         /* General inquiry access code (GIAC) */
3813         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3814         struct hci_request req;
3815         struct hci_cp_inquiry cp;
3816         int err;
3817 
3818         if (status) {
3819                 BT_ERR("Failed to disable LE scanning: status %d", status);
3820                 return;
3821         }
3822 
3823         switch (hdev->discovery.type) {
3824         case DISCOV_TYPE_LE:
3825                 hci_dev_lock(hdev);
3826                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3827                 hci_dev_unlock(hdev);
3828                 break;
3829 
3830         case DISCOV_TYPE_INTERLEAVED:
3831                 hci_req_init(&req, hdev);
3832 
3833                 memset(&cp, 0, sizeof(cp));
3834                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3835                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3836                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3837 
3838                 hci_dev_lock(hdev);
3839 
3840                 hci_inquiry_cache_flush(hdev);
3841 
3842                 err = hci_req_run(&req, inquiry_complete);
3843                 if (err) {
3844                         BT_ERR("Inquiry request failed: err %d", err);
3845                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3846                 }
3847 
3848                 hci_dev_unlock(hdev);
3849                 break;
3850         }
3851 }
3852 
3853 static void le_scan_disable_work(struct work_struct *work)
3854 {
3855         struct hci_dev *hdev = container_of(work, struct hci_dev,
3856                                             le_scan_disable.work);
3857         struct hci_request req;
3858         int err;
3859 
3860         BT_DBG("%s", hdev->name);
3861 
3862         hci_req_init(&req, hdev);
3863 
3864         hci_req_add_le_scan_disable(&req);
3865 
3866         err = hci_req_run(&req, le_scan_disable_work_complete);
3867         if (err)
3868                 BT_ERR("Disable LE scanning request failed: err %d", err);
3869 }
3870 
3871 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3872 {
3873         struct hci_dev *hdev = req->hdev;
3874 
3875         /* If we're advertising or initiating an LE connection we can't
3876          * go ahead and change the random address at this time. This is
3877          * because the eventual initiator address used for the
3878          * subsequently created connection will be undefined (some
3879          * controllers use the new address and others the one we had
3880          * when the operation started).
3881          *
3882          * In this kind of scenario skip the update and let the random
3883          * address be updated at the next cycle.
3884          */
3885         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3886             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3887                 BT_DBG("Deferring random address update");
3888                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3889                 return;
3890         }
3891 
3892         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3893 }
3894 
3895 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3896                               u8 *own_addr_type)
3897 {
3898         struct hci_dev *hdev = req->hdev;
3899         int err;
3900 
3901         /* If privacy is enabled use a resolvable private address. If
3902          * current RPA has expired or there is something else than
3903          * the current RPA in use, then generate a new one.
3904          */
3905         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3906                 int to;
3907 
3908                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3909 
3910                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3911                     !bacmp(&hdev->random_addr, &hdev->rpa))
3912                         return 0;
3913 
3914                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3915                 if (err < 0) {
3916                         BT_ERR("%s failed to generate new RPA", hdev->name);
3917                         return err;
3918                 }
3919 
3920                 set_random_addr(req, &hdev->rpa);
3921 
3922                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3923                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3924 
3925                 return 0;
3926         }
3927 
3928         /* In case of required privacy without resolvable private address,
3929          * use an unresolvable private address. This is useful for active
3930          * scanning and non-connectable advertising.
3931          */
3932         if (require_privacy) {
3933                 bdaddr_t urpa;
3934 
3935                 get_random_bytes(&urpa, 6);
3936                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3937 
3938                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3939                 set_random_addr(req, &urpa);
3940                 return 0;
3941         }
3942 
3943         /* If forcing static address is in use or there is no public
3944          * address use the static address as random address (but skip
3945          * the HCI command if the current random address is already the
3946          * static one.
3947          */
3948         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3949             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3950                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3951                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3952                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3953                                     &hdev->static_addr);
3954                 return 0;
3955         }
3956 
3957         /* Neither privacy nor static address is being used so use a
3958          * public address.
3959          */
3960         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3961 
3962         return 0;
3963 }
3964 
3965 /* Copy the Identity Address of the controller.
3966  *
3967  * If the controller has a public BD_ADDR, then by default use that one.
3968  * If this is a LE only controller without a public address, default to
3969  * the static random address.
3970  *
3971  * For debugging purposes it is possible to force controllers with a
3972  * public address to use the static random address instead.
3973  */
3974 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3975                                u8 *bdaddr_type)
3976 {
3977         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3978             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3979                 bacpy(bdaddr, &hdev->static_addr);
3980                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3981         } else {
3982                 bacpy(bdaddr, &hdev->bdaddr);
3983                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3984         }
3985 }
3986 
3987 /* Alloc HCI device */
3988 struct hci_dev *hci_alloc_dev(void)
3989 {
3990         struct hci_dev *hdev;
3991 
3992         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3993         if (!hdev)
3994                 return NULL;
3995 
3996         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3997         hdev->esco_type = (ESCO_HV1);
3998         hdev->link_mode = (HCI_LM_ACCEPT);
3999         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
4000         hdev->io_capability = 0x03;     /* No Input No Output */
4001         hdev->manufacturer = 0xffff;    /* Default to internal use */
4002         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4003         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4004 
4005         hdev->sniff_max_interval = 800;
4006         hdev->sniff_min_interval = 80;
4007 
4008         hdev->le_adv_channel_map = 0x07;
4009         hdev->le_adv_min_interval = 0x0800;
4010         hdev->le_adv_max_interval = 0x0800;
4011         hdev->le_scan_interval = 0x0060;
4012         hdev->le_scan_window = 0x0030;
4013         hdev->le_conn_min_interval = 0x0028;
4014         hdev->le_conn_max_interval = 0x0038;
4015         hdev->le_conn_latency = 0x0000;
4016         hdev->le_supv_timeout = 0x002a;
4017 
4018         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4019         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4020         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4021         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4022 
4023         mutex_init(&hdev->lock);
4024         mutex_init(&hdev->req_lock);
4025 
4026         INIT_LIST_HEAD(&hdev->mgmt_pending);
4027         INIT_LIST_HEAD(&hdev->blacklist);
4028         INIT_LIST_HEAD(&hdev->whitelist);
4029         INIT_LIST_HEAD(&hdev->uuids);
4030         INIT_LIST_HEAD(&hdev->link_keys);
4031         INIT_LIST_HEAD(&hdev->long_term_keys);
4032         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4033         INIT_LIST_HEAD(&hdev->remote_oob_data);
4034         INIT_LIST_HEAD(&hdev->le_white_list);
4035         INIT_LIST_HEAD(&hdev->le_conn_params);
4036         INIT_LIST_HEAD(&hdev->pend_le_conns);
4037         INIT_LIST_HEAD(&hdev->pend_le_reports);
4038         INIT_LIST_HEAD(&hdev->conn_hash.list);
4039 
4040         INIT_WORK(&hdev->rx_work, hci_rx_work);
4041         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4042         INIT_WORK(&hdev->tx_work, hci_tx_work);
4043         INIT_WORK(&hdev->power_on, hci_power_on);
4044 
4045         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4046         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4047         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4048 
4049         skb_queue_head_init(&hdev->rx_q);
4050         skb_queue_head_init(&hdev->cmd_q);
4051         skb_queue_head_init(&hdev->raw_q);
4052 
4053         init_waitqueue_head(&hdev->req_wait_q);
4054 
4055         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4056 
4057         hci_init_sysfs(hdev);
4058         discovery_init(hdev);
4059 
4060         return hdev;
4061 }
4062 EXPORT_SYMBOL(hci_alloc_dev);
4063 
4064 /* Free HCI device */
4065 void hci_free_dev(struct hci_dev *hdev)
4066 {
4067         /* will free via device release */
4068         put_device(&hdev->dev);
4069 }
4070 EXPORT_SYMBOL(hci_free_dev);
4071 
4072 /* Register HCI device */
4073 int hci_register_dev(struct hci_dev *hdev)
4074 {
4075         int id, error;
4076 
4077         if (!hdev->open || !hdev->close || !hdev->send)
4078                 return -EINVAL;
4079 
4080         /* Do not allow HCI_AMP devices to register at index 0,
4081          * so the index can be used as the AMP controller ID.
4082          */
4083         switch (hdev->dev_type) {
4084         case HCI_BREDR:
4085                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4086                 break;
4087         case HCI_AMP:
4088                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4089                 break;
4090         default:
4091                 return -EINVAL;
4092         }
4093 
4094         if (id < 0)
4095                 return id;
4096 
4097         sprintf(hdev->name, "hci%d", id);
4098         hdev->id = id;
4099 
4100         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4101 
4102         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4103                                           WQ_MEM_RECLAIM, 1, hdev->name);
4104         if (!hdev->workqueue) {
4105                 error = -ENOMEM;
4106                 goto err;
4107         }
4108 
4109         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4110                                               WQ_MEM_RECLAIM, 1, hdev->name);
4111         if (!hdev->req_workqueue) {
4112                 destroy_workqueue(hdev->workqueue);
4113                 error = -ENOMEM;
4114                 goto err;
4115         }
4116 
4117         if (!IS_ERR_OR_NULL(bt_debugfs))
4118                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4119 
4120         dev_set_name(&hdev->dev, "%s", hdev->name);
4121 
4122         error = device_add(&hdev->dev);
4123         if (error < 0)
4124                 goto err_wqueue;
4125 
4126         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4127                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4128                                     hdev);
4129         if (hdev->rfkill) {
4130                 if (rfkill_register(hdev->rfkill) < 0) {
4131                         rfkill_destroy(hdev->rfkill);
4132                         hdev->rfkill = NULL;
4133                 }
4134         }
4135 
4136         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4137                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4138 
4139         set_bit(HCI_SETUP, &hdev->dev_flags);
4140         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4141 
4142         if (hdev->dev_type == HCI_BREDR) {
4143                 /* Assume BR/EDR support until proven otherwise (such as
4144                  * through reading supported features during init.
4145                  */
4146                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4147         }
4148 
4149         write_lock(&hci_dev_list_lock);
4150         list_add(&hdev->list, &hci_dev_list);
4151         write_unlock(&hci_dev_list_lock);
4152 
4153         /* Devices that are marked for raw-only usage are unconfigured
4154          * and should not be included in normal operation.
4155          */
4156         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4157                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4158 
4159         hci_notify(hdev, HCI_DEV_REG);
4160         hci_dev_hold(hdev);
4161 
4162         queue_work(hdev->req_workqueue, &hdev->power_on);
4163 
4164         return id;
4165 
4166 err_wqueue:
4167         destroy_workqueue(hdev->workqueue);
4168         destroy_workqueue(hdev->req_workqueue);
4169 err:
4170         ida_simple_remove(&hci_index_ida, hdev->id);
4171 
4172         return error;
4173 }
4174 EXPORT_SYMBOL(hci_register_dev);
4175 
4176 /* Unregister HCI device */
4177 void hci_unregister_dev(struct hci_dev *hdev)
4178 {
4179         int i, id;
4180 
4181         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4182 
4183         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4184 
4185         id = hdev->id;
4186 
4187         write_lock(&hci_dev_list_lock);
4188         list_del(&hdev->list);
4189         write_unlock(&hci_dev_list_lock);
4190 
4191         hci_dev_do_close(hdev);
4192 
4193         for (i = 0; i < NUM_REASSEMBLY; i++)
4194                 kfree_skb(hdev->reassembly[i]);
4195 
4196         cancel_work_sync(&hdev->power_on);
4197 
4198         if (!test_bit(HCI_INIT, &hdev->flags) &&
4199             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4200             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4201                 hci_dev_lock(hdev);
4202                 mgmt_index_removed(hdev);
4203                 hci_dev_unlock(hdev);
4204         }
4205 
4206         /* mgmt_index_removed should take care of emptying the
4207          * pending list */
4208         BUG_ON(!list_empty(&hdev->mgmt_pending));
4209 
4210         hci_notify(hdev, HCI_DEV_UNREG);
4211 
4212         if (hdev->rfkill) {
4213                 rfkill_unregister(hdev->rfkill);
4214                 rfkill_destroy(hdev->rfkill);
4215         }
4216 
4217         smp_unregister(hdev);
4218 
4219         device_del(&hdev->dev);
4220 
4221         debugfs_remove_recursive(hdev->debugfs);
4222 
4223         destroy_workqueue(hdev->workqueue);
4224         destroy_workqueue(hdev->req_workqueue);
4225 
4226         hci_dev_lock(hdev);
4227         hci_bdaddr_list_clear(&hdev->blacklist);
4228         hci_bdaddr_list_clear(&hdev->whitelist);
4229         hci_uuids_clear(hdev);
4230         hci_link_keys_clear(hdev);
4231         hci_smp_ltks_clear(hdev);
4232         hci_smp_irks_clear(hdev);
4233         hci_remote_oob_data_clear(hdev);
4234         hci_bdaddr_list_clear(&hdev->le_white_list);
4235         hci_conn_params_clear_all(hdev);
4236         hci_dev_unlock(hdev);
4237 
4238         hci_dev_put(hdev);
4239 
4240         ida_simple_remove(&hci_index_ida, id);
4241 }
4242 EXPORT_SYMBOL(hci_unregister_dev);
4243 
4244 /* Suspend HCI device */
4245 int hci_suspend_dev(struct hci_dev *hdev)
4246 {
4247         hci_notify(hdev, HCI_DEV_SUSPEND);
4248         return 0;
4249 }
4250 EXPORT_SYMBOL(hci_suspend_dev);
4251 
4252 /* Resume HCI device */
4253 int hci_resume_dev(struct hci_dev *hdev)
4254 {
4255         hci_notify(hdev, HCI_DEV_RESUME);
4256         return 0;
4257 }
4258 EXPORT_SYMBOL(hci_resume_dev);
4259 
4260 /* Receive frame from HCI drivers */
4261 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4262 {
4263         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4264                       && !test_bit(HCI_INIT, &hdev->flags))) {
4265                 kfree_skb(skb);
4266                 return -ENXIO;
4267         }
4268 
4269         /* Incoming skb */
4270         bt_cb(skb)->incoming = 1;
4271 
4272         /* Time stamp */
4273         __net_timestamp(skb);
4274 
4275         skb_queue_tail(&hdev->rx_q, skb);
4276         queue_work(hdev->workqueue, &hdev->rx_work);
4277 
4278         return 0;
4279 }
4280 EXPORT_SYMBOL(hci_recv_frame);
4281 
4282 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4283                           int count, __u8 index)
4284 {
4285         int len = 0;
4286         int hlen = 0;
4287         int remain = count;
4288         struct sk_buff *skb;
4289         struct bt_skb_cb *scb;
4290 
4291         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4292             index >= NUM_REASSEMBLY)
4293                 return -EILSEQ;
4294 
4295         skb = hdev->reassembly[index];
4296 
4297         if (!skb) {
4298                 switch (type) {
4299                 case HCI_ACLDATA_PKT:
4300                         len = HCI_MAX_FRAME_SIZE;
4301                         hlen = HCI_ACL_HDR_SIZE;
4302                         break;
4303                 case HCI_EVENT_PKT:
4304                         len = HCI_MAX_EVENT_SIZE;
4305                         hlen = HCI_EVENT_HDR_SIZE;
4306                         break;
4307                 case HCI_SCODATA_PKT:
4308                         len = HCI_MAX_SCO_SIZE;
4309                         hlen = HCI_SCO_HDR_SIZE;
4310                         break;
4311                 }
4312 
4313                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4314                 if (!skb)
4315                         return -ENOMEM;
4316 
4317                 scb = (void *) skb->cb;
4318                 scb->expect = hlen;
4319                 scb->pkt_type = type;
4320 
4321                 hdev->reassembly[index] = skb;
4322         }
4323 
4324         while (count) {
4325                 scb = (void *) skb->cb;
4326                 len = min_t(uint, scb->expect, count);
4327 
4328                 memcpy(skb_put(skb, len), data, len);
4329 
4330                 count -= len;
4331                 data += len;
4332                 scb->expect -= len;
4333                 remain = count;
4334 
4335                 switch (type) {
4336                 case HCI_EVENT_PKT:
4337                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4338                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4339                                 scb->expect = h->plen;
4340 
4341                                 if (skb_tailroom(skb) < scb->expect) {
4342                                         kfree_skb(skb);
4343                                         hdev->reassembly[index] = NULL;
4344                                         return -ENOMEM;
4345                                 }
4346                         }
4347                         break;
4348 
4349                 case HCI_ACLDATA_PKT:
4350                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4351                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4352                                 scb->expect = __le16_to_cpu(h->dlen);
4353 
4354                                 if (skb_tailroom(skb) < scb->expect) {
4355                                         kfree_skb(skb);
4356                                         hdev->reassembly[index] = NULL;
4357                                         return -ENOMEM;
4358                                 }
4359                         }
4360                         break;
4361 
4362                 case HCI_SCODATA_PKT:
4363                         if (skb->len == HCI_SCO_HDR_SIZE) {
4364                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4365                                 scb->expect = h->dlen;
4366 
4367                                 if (skb_tailroom(skb) < scb->expect) {
4368                                         kfree_skb(skb);
4369                                         hdev->reassembly[index] = NULL;
4370                                         return -ENOMEM;
4371                                 }
4372                         }
4373                         break;
4374                 }
4375 
4376                 if (scb->expect == 0) {
4377                         /* Complete frame */
4378 
4379                         bt_cb(skb)->pkt_type = type;
4380                         hci_recv_frame(hdev, skb);
4381 
4382                         hdev->reassembly[index] = NULL;
4383                         return remain;
4384                 }
4385         }
4386 
4387         return remain;
4388 }
4389 
4390 #define STREAM_REASSEMBLY 0
4391 
4392 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4393 {
4394         int type;
4395         int rem = 0;
4396 
4397         while (count) {
4398                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4399 
4400                 if (!skb) {
4401                         struct { char type; } *pkt;
4402 
4403                         /* Start of the frame */
4404                         pkt = data;
4405                         type = pkt->type;
4406 
4407                         data++;
4408                         count--;
4409                 } else
4410                         type = bt_cb(skb)->pkt_type;
4411 
4412                 rem = hci_reassembly(hdev, type, data, count,
4413                                      STREAM_REASSEMBLY);
4414                 if (rem < 0)
4415                         return rem;
4416 
4417                 data += (count - rem);
4418                 count = rem;
4419         }
4420 
4421         return rem;
4422 }
4423 EXPORT_SYMBOL(hci_recv_stream_fragment);
4424 
4425 /* ---- Interface to upper protocols ---- */
4426 
4427 int hci_register_cb(struct hci_cb *cb)
4428 {
4429         BT_DBG("%p name %s", cb, cb->name);
4430 
4431         write_lock(&hci_cb_list_lock);
4432         list_add(&cb->list, &hci_cb_list);
4433         write_unlock(&hci_cb_list_lock);
4434 
4435         return 0;
4436 }
4437 EXPORT_SYMBOL(hci_register_cb);
4438 
4439 int hci_unregister_cb(struct hci_cb *cb)
4440 {
4441         BT_DBG("%p name %s", cb, cb->name);
4442 
4443         write_lock(&hci_cb_list_lock);
4444         list_del(&cb->list);
4445         write_unlock(&hci_cb_list_lock);
4446 
4447         return 0;
4448 }
4449 EXPORT_SYMBOL(hci_unregister_cb);
4450 
4451 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4452 {
4453         int err;
4454 
4455         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4456 
4457         /* Time stamp */
4458         __net_timestamp(skb);
4459 
4460         /* Send copy to monitor */
4461         hci_send_to_monitor(hdev, skb);
4462 
4463         if (atomic_read(&hdev->promisc)) {
4464                 /* Send copy to the sockets */
4465                 hci_send_to_sock(hdev, skb);
4466         }
4467 
4468         /* Get rid of skb owner, prior to sending to the driver. */
4469         skb_orphan(skb);
4470 
4471         err = hdev->send(hdev, skb);
4472         if (err < 0) {
4473                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4474                 kfree_skb(skb);
4475         }
4476 }
4477 
4478 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4479 {
4480         skb_queue_head_init(&req->cmd_q);
4481         req->hdev = hdev;
4482         req->err = 0;
4483 }
4484 
4485 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4486 {
4487         struct hci_dev *hdev = req->hdev;
4488         struct sk_buff *skb;
4489         unsigned long flags;
4490 
4491         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4492 
4493         /* If an error occured during request building, remove all HCI
4494          * commands queued on the HCI request queue.
4495          */
4496         if (req->err) {
4497                 skb_queue_purge(&req->cmd_q);
4498                 return req->err;
4499         }
4500 
4501         /* Do not allow empty requests */
4502         if (skb_queue_empty(&req->cmd_q))
4503                 return -ENODATA;
4504 
4505         skb = skb_peek_tail(&req->cmd_q);
4506         bt_cb(skb)->req.complete = complete;
4507 
4508         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4509         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4510         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4511 
4512         queue_work(hdev->workqueue, &hdev->cmd_work);
4513 
4514         return 0;
4515 }
4516 
4517 bool hci_req_pending(struct hci_dev *hdev)
4518 {
4519         return (hdev->req_status == HCI_REQ_PEND);
4520 }
4521 
4522 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4523                                        u32 plen, const void *param)
4524 {
4525         int len = HCI_COMMAND_HDR_SIZE + plen;
4526         struct hci_command_hdr *hdr;
4527         struct sk_buff *skb;
4528 
4529         skb = bt_skb_alloc(len, GFP_ATOMIC);
4530         if (!skb)
4531                 return NULL;
4532 
4533         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4534         hdr->opcode = cpu_to_le16(opcode);
4535         hdr->plen   = plen;
4536 
4537         if (plen)
4538                 memcpy(skb_put(skb, plen), param, plen);
4539 
4540         BT_DBG("skb len %d", skb->len);
4541 
4542         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4543         bt_cb(skb)->opcode = opcode;
4544 
4545         return skb;
4546 }
4547 
4548 /* Send HCI command */
4549 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4550                  const void *param)
4551 {
4552         struct sk_buff *skb;
4553 
4554         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4555 
4556         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4557         if (!skb) {
4558                 BT_ERR("%s no memory for command", hdev->name);
4559                 return -ENOMEM;
4560         }
4561 
4562         /* Stand-alone HCI commands must be flaged as
4563          * single-command requests.
4564          */
4565         bt_cb(skb)->req.start = true;
4566 
4567         skb_queue_tail(&hdev->cmd_q, skb);
4568         queue_work(hdev->workqueue, &hdev->cmd_work);
4569 
4570         return 0;
4571 }
4572 
4573 /* Queue a command to an asynchronous HCI request */
4574 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4575                     const void *param, u8 event)
4576 {
4577         struct hci_dev *hdev = req->hdev;
4578         struct sk_buff *skb;
4579 
4580         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4581 
4582         /* If an error occured during request building, there is no point in
4583          * queueing the HCI command. We can simply return.
4584          */
4585         if (req->err)
4586                 return;
4587 
4588         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4589         if (!skb) {
4590                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4591                        hdev->name, opcode);
4592                 req->err = -ENOMEM;
4593                 return;
4594         }
4595 
4596         if (skb_queue_empty(&req->cmd_q))
4597                 bt_cb(skb)->req.start = true;
4598 
4599         bt_cb(skb)->req.event = event;
4600 
4601         skb_queue_tail(&req->cmd_q, skb);
4602 }
4603 
4604 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4605                  const void *param)
4606 {
4607         hci_req_add_ev(req, opcode, plen, param, 0);
4608 }
4609 
4610 /* Get data from the previously sent command */
4611 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4612 {
4613         struct hci_command_hdr *hdr;
4614 
4615         if (!hdev->sent_cmd)
4616                 return NULL;
4617 
4618         hdr = (void *) hdev->sent_cmd->data;
4619 
4620         if (hdr->opcode != cpu_to_le16(opcode))
4621                 return NULL;
4622 
4623         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4624 
4625         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4626 }
4627 
4628 /* Send ACL data */
4629 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4630 {
4631         struct hci_acl_hdr *hdr;
4632         int len = skb->len;
4633 
4634         skb_push(skb, HCI_ACL_HDR_SIZE);
4635         skb_reset_transport_header(skb);
4636         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4637         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4638         hdr->dlen   = cpu_to_le16(len);
4639 }
4640 
4641 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4642                           struct sk_buff *skb, __u16 flags)
4643 {
4644         struct hci_conn *conn = chan->conn;
4645         struct hci_dev *hdev = conn->hdev;
4646         struct sk_buff *list;
4647 
4648         skb->len = skb_headlen(skb);
4649         skb->data_len = 0;
4650 
4651         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4652 
4653         switch (hdev->dev_type) {
4654         case HCI_BREDR:
4655                 hci_add_acl_hdr(skb, conn->handle, flags);
4656                 break;
4657         case HCI_AMP:
4658                 hci_add_acl_hdr(skb, chan->handle, flags);
4659                 break;
4660         default:
4661                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4662                 return;
4663         }
4664 
4665         list = skb_shinfo(skb)->frag_list;
4666         if (!list) {
4667                 /* Non fragmented */
4668                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4669 
4670                 skb_queue_tail(queue, skb);
4671         } else {
4672                 /* Fragmented */
4673                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4674 
4675                 skb_shinfo(skb)->frag_list = NULL;
4676 
4677                 /* Queue all fragments atomically */
4678                 spin_lock(&queue->lock);
4679 
4680                 __skb_queue_tail(queue, skb);
4681 
4682                 flags &= ~ACL_START;
4683                 flags |= ACL_CONT;
4684                 do {
4685                         skb = list; list = list->next;
4686 
4687                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4688                         hci_add_acl_hdr(skb, conn->handle, flags);
4689 
4690                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4691 
4692                         __skb_queue_tail(queue, skb);
4693                 } while (list);
4694 
4695                 spin_unlock(&queue->lock);
4696         }
4697 }
4698 
4699 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4700 {
4701         struct hci_dev *hdev = chan->conn->hdev;
4702 
4703         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4704 
4705         hci_queue_acl(chan, &chan->data_q, skb, flags);
4706 
4707         queue_work(hdev->workqueue, &hdev->tx_work);
4708 }
4709 
4710 /* Send SCO data */
4711 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4712 {
4713         struct hci_dev *hdev = conn->hdev;
4714         struct hci_sco_hdr hdr;
4715 
4716         BT_DBG("%s len %d", hdev->name, skb->len);
4717 
4718         hdr.handle = cpu_to_le16(conn->handle);
4719         hdr.dlen   = skb->len;
4720 
4721         skb_push(skb, HCI_SCO_HDR_SIZE);
4722         skb_reset_transport_header(skb);
4723         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4724 
4725         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4726 
4727         skb_queue_tail(&conn->data_q, skb);
4728         queue_work(hdev->workqueue, &hdev->tx_work);
4729 }
4730 
4731 /* ---- HCI TX task (outgoing data) ---- */
4732 
4733 /* HCI Connection scheduler */
4734 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4735                                      int *quote)
4736 {
4737         struct hci_conn_hash *h = &hdev->conn_hash;
4738         struct hci_conn *conn = NULL, *c;
4739         unsigned int num = 0, min = ~0;
4740 
4741         /* We don't have to lock device here. Connections are always
4742          * added and removed with TX task disabled. */
4743 
4744         rcu_read_lock();
4745 
4746         list_for_each_entry_rcu(c, &h->list, list) {
4747                 if (c->type != type || skb_queue_empty(&c->data_q))
4748                         continue;
4749 
4750                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4751                         continue;
4752 
4753                 num++;
4754 
4755                 if (c->sent < min) {
4756                         min  = c->sent;
4757                         conn = c;
4758                 }
4759 
4760                 if (hci_conn_num(hdev, type) == num)
4761                         break;
4762         }
4763 
4764         rcu_read_unlock();
4765 
4766         if (conn) {
4767                 int cnt, q;
4768 
4769                 switch (conn->type) {
4770                 case ACL_LINK:
4771                         cnt = hdev->acl_cnt;
4772                         break;
4773                 case SCO_LINK:
4774                 case ESCO_LINK:
4775                         cnt = hdev->sco_cnt;
4776                         break;
4777                 case LE_LINK:
4778                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4779                         break;
4780                 default:
4781                         cnt = 0;
4782                         BT_ERR("Unknown link type");
4783                 }
4784 
4785                 q = cnt / num;
4786                 *quote = q ? q : 1;
4787         } else
4788                 *quote = 0;
4789 
4790         BT_DBG("conn %p quote %d", conn, *quote);
4791         return conn;
4792 }
4793 
4794 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4795 {
4796         struct hci_conn_hash *h = &hdev->conn_hash;
4797         struct hci_conn *c;
4798 
4799         BT_ERR("%s link tx timeout", hdev->name);
4800 
4801         rcu_read_lock();
4802 
4803         /* Kill stalled connections */
4804         list_for_each_entry_rcu(c, &h->list, list) {
4805                 if (c->type == type && c->sent) {
4806                         BT_ERR("%s killing stalled connection %pMR",
4807                                hdev->name, &c->dst);
4808                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4809                 }
4810         }
4811 
4812         rcu_read_unlock();
4813 }
4814 
4815 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4816                                       int *quote)
4817 {
4818         struct hci_conn_hash *h = &hdev->conn_hash;
4819         struct hci_chan *chan = NULL;
4820         unsigned int num = 0, min = ~0, cur_prio = 0;
4821         struct hci_conn *conn;
4822         int cnt, q, conn_num = 0;
4823 
4824         BT_DBG("%s", hdev->name);
4825 
4826         rcu_read_lock();
4827 
4828         list_for_each_entry_rcu(conn, &h->list, list) {
4829                 struct hci_chan *tmp;
4830 
4831                 if (conn->type != type)
4832                         continue;
4833 
4834                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4835                         continue;
4836 
4837                 conn_num++;
4838 
4839                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4840                         struct sk_buff *skb;
4841 
4842                         if (skb_queue_empty(&tmp->data_q))
4843                                 continue;
4844 
4845                         skb = skb_peek(&tmp->data_q);
4846                         if (skb->priority < cur_prio)
4847                                 continue;
4848 
4849                         if (skb->priority > cur_prio) {
4850                                 num = 0;
4851                                 min = ~0;
4852                                 cur_prio = skb->priority;
4853                         }
4854 
4855                         num++;
4856 
4857                         if (conn->sent < min) {
4858                                 min  = conn->sent;
4859                                 chan = tmp;
4860                         }
4861                 }
4862 
4863                 if (hci_conn_num(hdev, type) == conn_num)
4864                         break;
4865         }
4866 
4867         rcu_read_unlock();
4868 
4869         if (!chan)
4870                 return NULL;
4871 
4872         switch (chan->conn->type) {
4873         case ACL_LINK:
4874                 cnt = hdev->acl_cnt;
4875                 break;
4876         case AMP_LINK:
4877                 cnt = hdev->block_cnt;
4878                 break;
4879         case SCO_LINK:
4880         case ESCO_LINK:
4881                 cnt = hdev->sco_cnt;
4882                 break;
4883         case LE_LINK:
4884                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4885                 break;
4886         default:
4887                 cnt = 0;
4888                 BT_ERR("Unknown link type");
4889         }
4890 
4891         q = cnt / num;
4892         *quote = q ? q : 1;
4893         BT_DBG("chan %p quote %d", chan, *quote);
4894         return chan;
4895 }
4896 
4897 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4898 {
4899         struct hci_conn_hash *h = &hdev->conn_hash;
4900         struct hci_conn *conn;
4901         int num = 0;
4902 
4903         BT_DBG("%s", hdev->name);
4904 
4905         rcu_read_lock();
4906 
4907         list_for_each_entry_rcu(conn, &h->list, list) {
4908                 struct hci_chan *chan;
4909 
4910                 if (conn->type != type)
4911                         continue;
4912 
4913                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4914                         continue;
4915 
4916                 num++;
4917 
4918                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4919                         struct sk_buff *skb;
4920 
4921                         if (chan->sent) {
4922                                 chan->sent = 0;
4923                                 continue;
4924                         }
4925 
4926                         if (skb_queue_empty(&chan->data_q))
4927                                 continue;
4928 
4929                         skb = skb_peek(&chan->data_q);
4930                         if (skb->priority >= HCI_PRIO_MAX - 1)
4931                                 continue;
4932 
4933                         skb->priority = HCI_PRIO_MAX - 1;
4934 
4935                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4936                                skb->priority);
4937                 }
4938 
4939                 if (hci_conn_num(hdev, type) == num)
4940                         break;
4941         }
4942 
4943         rcu_read_unlock();
4944 
4945 }
4946 
4947 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4948 {
4949         /* Calculate count of blocks used by this packet */
4950         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4951 }
4952 
4953 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4954 {
4955         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4956                 /* ACL tx timeout must be longer than maximum
4957                  * link supervision timeout (40.9 seconds) */
4958                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4959                                        HCI_ACL_TX_TIMEOUT))
4960                         hci_link_tx_to(hdev, ACL_LINK);
4961         }
4962 }
4963 
4964 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4965 {
4966         unsigned int cnt = hdev->acl_cnt;
4967         struct hci_chan *chan;
4968         struct sk_buff *skb;
4969         int quote;
4970 
4971         __check_timeout(hdev, cnt);
4972 
4973         while (hdev->acl_cnt &&
4974                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4975                 u32 priority = (skb_peek(&chan->data_q))->priority;
4976                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4977                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4978                                skb->len, skb->priority);
4979 
4980                         /* Stop if priority has changed */
4981                         if (skb->priority < priority)
4982                                 break;
4983 
4984                         skb = skb_dequeue(&chan->data_q);
4985 
4986                         hci_conn_enter_active_mode(chan->conn,
4987                                                    bt_cb(skb)->force_active);
4988 
4989                         hci_send_frame(hdev, skb);
4990                         hdev->acl_last_tx = jiffies;
4991 
4992                         hdev->acl_cnt--;
4993                         chan->sent++;
4994                         chan->conn->sent++;
4995                 }
4996         }
4997 
4998         if (cnt != hdev->acl_cnt)
4999                 hci_prio_recalculate(hdev, ACL_LINK);
5000 }
5001 
5002 static void hci_sched_acl_blk(struct hci_dev *hdev)
5003 {
5004         unsigned int cnt = hdev->block_cnt;
5005         struct hci_chan *chan;
5006         struct sk_buff *skb;
5007         int quote;
5008         u8 type;
5009 
5010         __check_timeout(hdev, cnt);
5011 
5012         BT_DBG("%s", hdev->name);
5013 
5014         if (hdev->dev_type == HCI_AMP)
5015                 type = AMP_LINK;
5016         else
5017                 type = ACL_LINK;
5018 
5019         while (hdev->block_cnt > 0 &&
5020                (chan = hci_chan_sent(hdev, type, &quote))) {
5021                 u32 priority = (skb_peek(&chan->data_q))->priority;
5022                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5023                         int blocks;
5024 
5025                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5026                                skb->len, skb->priority);
5027 
5028                         /* Stop if priority has changed */
5029                         if (skb->priority < priority)
5030                                 break;
5031 
5032                         skb = skb_dequeue(&chan->data_q);
5033 
5034                         blocks = __get_blocks(hdev, skb);
5035                         if (blocks > hdev->block_cnt)
5036                                 return;
5037 
5038                         hci_conn_enter_active_mode(chan->conn,
5039                                                    bt_cb(skb)->force_active);
5040 
5041                         hci_send_frame(hdev, skb);
5042                         hdev->acl_last_tx = jiffies;
5043 
5044                         hdev->block_cnt -= blocks;
5045                         quote -= blocks;
5046 
5047                         chan->sent += blocks;
5048                         chan->conn->sent += blocks;
5049                 }
5050         }
5051 
5052         if (cnt != hdev->block_cnt)
5053                 hci_prio_recalculate(hdev, type);
5054 }
5055 
5056 static void hci_sched_acl(struct hci_dev *hdev)
5057 {
5058         BT_DBG("%s", hdev->name);
5059 
5060         /* No ACL link over BR/EDR controller */
5061         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5062                 return;
5063 
5064         /* No AMP link over AMP controller */
5065         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5066                 return;
5067 
5068         switch (hdev->flow_ctl_mode) {
5069         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5070                 hci_sched_acl_pkt(hdev);
5071                 break;
5072 
5073         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5074                 hci_sched_acl_blk(hdev);
5075                 break;
5076         }
5077 }
5078 
5079 /* Schedule SCO */
5080 static void hci_sched_sco(struct hci_dev *hdev)
5081 {
5082         struct hci_conn *conn;
5083         struct sk_buff *skb;
5084         int quote;
5085 
5086         BT_DBG("%s", hdev->name);
5087 
5088         if (!hci_conn_num(hdev, SCO_LINK))
5089                 return;
5090 
5091         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5092                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5093                         BT_DBG("skb %p len %d", skb, skb->len);
5094                         hci_send_frame(hdev, skb);
5095 
5096                         conn->sent++;
5097                         if (conn->sent == ~0)
5098                                 conn->sent = 0;
5099                 }
5100         }
5101 }
5102 
5103 static void hci_sched_esco(struct hci_dev *hdev)
5104 {
5105         struct hci_conn *conn;
5106         struct sk_buff *skb;
5107         int quote;
5108 
5109         BT_DBG("%s", hdev->name);
5110 
5111         if (!hci_conn_num(hdev, ESCO_LINK))
5112                 return;
5113 
5114         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5115                                                      &quote))) {
5116                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5117                         BT_DBG("skb %p len %d", skb, skb->len);
5118                         hci_send_frame(hdev, skb);
5119 
5120                         conn->sent++;
5121                         if (conn->sent == ~0)
5122                                 conn->sent = 0;
5123                 }
5124         }
5125 }
5126 
5127 static void hci_sched_le(struct hci_dev *hdev)
5128 {
5129         struct hci_chan *chan;
5130         struct sk_buff *skb;
5131         int quote, cnt, tmp;
5132 
5133         BT_DBG("%s", hdev->name);
5134 
5135         if (!hci_conn_num(hdev, LE_LINK))
5136                 return;
5137 
5138         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5139                 /* LE tx timeout must be longer than maximum
5140                  * link supervision timeout (40.9 seconds) */
5141                 if (!hdev->le_cnt && hdev->le_pkts &&
5142                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5143                         hci_link_tx_to(hdev, LE_LINK);
5144         }
5145 
5146         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5147         tmp = cnt;
5148         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5149                 u32 priority = (skb_peek(&chan->data_q))->priority;
5150                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5151                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5152                                skb->len, skb->priority);
5153 
5154                         /* Stop if priority has changed */
5155                         if (skb->priority < priority)
5156                                 break;
5157 
5158                         skb = skb_dequeue(&chan->data_q);
5159 
5160                         hci_send_frame(hdev, skb);
5161                         hdev->le_last_tx = jiffies;
5162 
5163                         cnt--;
5164                         chan->sent++;
5165                         chan->conn->sent++;
5166                 }
5167         }
5168 
5169         if (hdev->le_pkts)
5170                 hdev->le_cnt = cnt;
5171         else
5172                 hdev->acl_cnt = cnt;
5173 
5174         if (cnt != tmp)
5175                 hci_prio_recalculate(hdev, LE_LINK);
5176 }
5177 
5178 static void hci_tx_work(struct work_struct *work)
5179 {
5180         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5181         struct sk_buff *skb;
5182 
5183         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5184                hdev->sco_cnt, hdev->le_cnt);
5185 
5186         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5187                 /* Schedule queues and send stuff to HCI driver */
5188                 hci_sched_acl(hdev);
5189                 hci_sched_sco(hdev);
5190                 hci_sched_esco(hdev);
5191                 hci_sched_le(hdev);
5192         }
5193 
5194         /* Send next queued raw (unknown type) packet */
5195         while ((skb = skb_dequeue(&hdev->raw_q)))
5196                 hci_send_frame(hdev, skb);
5197 }
5198 
5199 /* ----- HCI RX task (incoming data processing) ----- */
5200 
5201 /* ACL data packet */
5202 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5203 {
5204         struct hci_acl_hdr *hdr = (void *) skb->data;
5205         struct hci_conn *conn;
5206         __u16 handle, flags;
5207 
5208         skb_pull(skb, HCI_ACL_HDR_SIZE);
5209 
5210         handle = __le16_to_cpu(hdr->handle);
5211         flags  = hci_flags(handle);
5212         handle = hci_handle(handle);
5213 
5214         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5215                handle, flags);
5216 
5217         hdev->stat.acl_rx++;
5218 
5219         hci_dev_lock(hdev);
5220         conn = hci_conn_hash_lookup_handle(hdev, handle);
5221         hci_dev_unlock(hdev);
5222 
5223         if (conn) {
5224                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5225 
5226                 /* Send to upper protocol */
5227                 l2cap_recv_acldata(conn, skb, flags);
5228                 return;
5229         } else {
5230                 BT_ERR("%s ACL packet for unknown connection handle %d",
5231                        hdev->name, handle);
5232         }
5233 
5234         kfree_skb(skb);
5235 }
5236 
5237 /* SCO data packet */
5238 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5239 {
5240         struct hci_sco_hdr *hdr = (void *) skb->data;
5241         struct hci_conn *conn;
5242         __u16 handle;
5243 
5244         skb_pull(skb, HCI_SCO_HDR_SIZE);
5245 
5246         handle = __le16_to_cpu(hdr->handle);
5247 
5248         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5249 
5250         hdev->stat.sco_rx++;
5251 
5252         hci_dev_lock(hdev);
5253         conn = hci_conn_hash_lookup_handle(hdev, handle);
5254         hci_dev_unlock(hdev);
5255 
5256         if (conn) {
5257                 /* Send to upper protocol */
5258                 sco_recv_scodata(conn, skb);
5259                 return;
5260         } else {
5261                 BT_ERR("%s SCO packet for unknown connection handle %d",
5262                        hdev->name, handle);
5263         }
5264 
5265         kfree_skb(skb);
5266 }
5267 
5268 static bool hci_req_is_complete(struct hci_dev *hdev)
5269 {
5270         struct sk_buff *skb;
5271 
5272         skb = skb_peek(&hdev->cmd_q);
5273         if (!skb)
5274                 return true;
5275 
5276         return bt_cb(skb)->req.start;
5277 }
5278 
5279 static void hci_resend_last(struct hci_dev *hdev)
5280 {
5281         struct hci_command_hdr *sent;
5282         struct sk_buff *skb;
5283         u16 opcode;
5284 
5285         if (!hdev->sent_cmd)
5286                 return;
5287 
5288         sent = (void *) hdev->sent_cmd->data;
5289         opcode = __le16_to_cpu(sent->opcode);
5290         if (opcode == HCI_OP_RESET)
5291                 return;
5292 
5293         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5294         if (!skb)
5295                 return;
5296 
5297         skb_queue_head(&hdev->cmd_q, skb);
5298         queue_work(hdev->workqueue, &hdev->cmd_work);
5299 }
5300 
5301 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5302 {
5303         hci_req_complete_t req_complete = NULL;
5304         struct sk_buff *skb;
5305         unsigned long flags;
5306 
5307         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5308 
5309         /* If the completed command doesn't match the last one that was
5310          * sent we need to do special handling of it.
5311          */
5312         if (!hci_sent_cmd_data(hdev, opcode)) {
5313                 /* Some CSR based controllers generate a spontaneous
5314                  * reset complete event during init and any pending
5315                  * command will never be completed. In such a case we
5316                  * need to resend whatever was the last sent
5317                  * command.
5318                  */
5319                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5320                         hci_resend_last(hdev);
5321 
5322                 return;
5323         }
5324 
5325         /* If the command succeeded and there's still more commands in
5326          * this request the request is not yet complete.
5327          */
5328         if (!status && !hci_req_is_complete(hdev))
5329                 return;
5330 
5331         /* If this was the last command in a request the complete
5332          * callback would be found in hdev->sent_cmd instead of the
5333          * command queue (hdev->cmd_q).
5334          */
5335         if (hdev->sent_cmd) {
5336                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5337 
5338                 if (req_complete) {
5339                         /* We must set the complete callback to NULL to
5340                          * avoid calling the callback more than once if
5341                          * this function gets called again.
5342                          */
5343                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5344 
5345                         goto call_complete;
5346                 }
5347         }
5348 
5349         /* Remove all pending commands belonging to this request */
5350         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5351         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5352                 if (bt_cb(skb)->req.start) {
5353                         __skb_queue_head(&hdev->cmd_q, skb);
5354                         break;
5355                 }
5356 
5357                 req_complete = bt_cb(skb)->req.complete;
5358                 kfree_skb(skb);
5359         }
5360         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5361 
5362 call_complete:
5363         if (req_complete)
5364                 req_complete(hdev, status);
5365 }
5366 
5367 static void hci_rx_work(struct work_struct *work)
5368 {
5369         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5370         struct sk_buff *skb;
5371 
5372         BT_DBG("%s", hdev->name);
5373 
5374         while ((skb = skb_dequeue(&hdev->rx_q))) {
5375                 /* Send copy to monitor */
5376                 hci_send_to_monitor(hdev, skb);
5377 
5378                 if (atomic_read(&hdev->promisc)) {
5379                         /* Send copy to the sockets */
5380                         hci_send_to_sock(hdev, skb);
5381                 }
5382 
5383                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5384                         kfree_skb(skb);
5385                         continue;
5386                 }
5387 
5388                 if (test_bit(HCI_INIT, &hdev->flags)) {
5389                         /* Don't process data packets in this states. */
5390                         switch (bt_cb(skb)->pkt_type) {
5391                         case HCI_ACLDATA_PKT:
5392                         case HCI_SCODATA_PKT:
5393                                 kfree_skb(skb);
5394                                 continue;
5395                         }
5396                 }
5397 
5398                 /* Process frame */
5399                 switch (bt_cb(skb)->pkt_type) {
5400                 case HCI_EVENT_PKT:
5401                         BT_DBG("%s Event packet", hdev->name);
5402                         hci_event_packet(hdev, skb);
5403                         break;
5404 
5405                 case HCI_ACLDATA_PKT:
5406                         BT_DBG("%s ACL data packet", hdev->name);
5407                         hci_acldata_packet(hdev, skb);
5408                         break;
5409 
5410                 case HCI_SCODATA_PKT:
5411                         BT_DBG("%s SCO data packet", hdev->name);
5412                         hci_scodata_packet(hdev, skb);
5413                         break;
5414 
5415                 default:
5416                         kfree_skb(skb);
5417                         break;
5418                 }
5419         }
5420 }
5421 
5422 static void hci_cmd_work(struct work_struct *work)
5423 {
5424         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5425         struct sk_buff *skb;
5426 
5427         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5428                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5429 
5430         /* Send queued commands */
5431         if (atomic_read(&hdev->cmd_cnt)) {
5432                 skb = skb_dequeue(&hdev->cmd_q);
5433                 if (!skb)
5434                         return;
5435 
5436                 kfree_skb(hdev->sent_cmd);
5437 
5438                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5439                 if (hdev->sent_cmd) {
5440                         atomic_dec(&hdev->cmd_cnt);
5441                         hci_send_frame(hdev, skb);
5442                         if (test_bit(HCI_RESET, &hdev->flags))
5443                                 cancel_delayed_work(&hdev->cmd_timer);
5444                         else
5445                                 schedule_delayed_work(&hdev->cmd_timer,
5446                                                       HCI_CMD_TIMEOUT);
5447                 } else {
5448                         skb_queue_head(&hdev->cmd_q, skb);
5449                         queue_work(hdev->workqueue, &hdev->cmd_work);
5450                 }
5451         }
5452 }
5453 
5454 void hci_req_add_le_scan_disable(struct hci_request *req)
5455 {
5456         struct hci_cp_le_set_scan_enable cp;
5457 
5458         memset(&cp, 0, sizeof(cp));
5459         cp.enable = LE_SCAN_DISABLE;
5460         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5461 }
5462 
5463 static void add_to_white_list(struct hci_request *req,
5464                               struct hci_conn_params *params)
5465 {
5466         struct hci_cp_le_add_to_white_list cp;
5467 
5468         cp.bdaddr_type = params->addr_type;
5469         bacpy(&cp.bdaddr, &params->addr);
5470 
5471         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5472 }
5473 
5474 static u8 update_white_list(struct hci_request *req)
5475 {
5476         struct hci_dev *hdev = req->hdev;
5477         struct hci_conn_params *params;
5478         struct bdaddr_list *b;
5479         uint8_t white_list_entries = 0;
5480 
5481         /* Go through the current white list programmed into the
5482          * controller one by one and check if that address is still
5483          * in the list of pending connections or list of devices to
5484          * report. If not present in either list, then queue the
5485          * command to remove it from the controller.
5486          */
5487         list_for_each_entry(b, &hdev->le_white_list, list) {
5488                 struct hci_cp_le_del_from_white_list cp;
5489 
5490                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5491                                               &b->bdaddr, b->bdaddr_type) ||
5492                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5493                                               &b->bdaddr, b->bdaddr_type)) {
5494                         white_list_entries++;
5495                         continue;
5496                 }
5497 
5498                 cp.bdaddr_type = b->bdaddr_type;
5499                 bacpy(&cp.bdaddr, &b->bdaddr);
5500 
5501                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5502                             sizeof(cp), &cp);
5503         }
5504 
5505         /* Since all no longer valid white list entries have been
5506          * removed, walk through the list of pending connections
5507          * and ensure that any new device gets programmed into
5508          * the controller.
5509          *
5510          * If the list of the devices is larger than the list of
5511          * available white list entries in the controller, then
5512          * just abort and return filer policy value to not use the
5513          * white list.
5514          */
5515         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5516                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5517                                            &params->addr, params->addr_type))
5518                         continue;
5519 
5520                 if (white_list_entries >= hdev->le_white_list_size) {
5521                         /* Select filter policy to accept all advertising */
5522                         return 0x00;
5523                 }
5524 
5525                 if (hci_find_irk_by_addr(hdev, &params->addr,
5526                                          params->addr_type)) {
5527                         /* White list can not be used with RPAs */
5528                         return 0x00;
5529                 }
5530 
5531                 white_list_entries++;
5532                 add_to_white_list(req, params);
5533         }
5534 
5535         /* After adding all new pending connections, walk through
5536          * the list of pending reports and also add these to the
5537          * white list if there is still space.
5538          */
5539         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5540                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5541                                            &params->addr, params->addr_type))
5542                         continue;
5543 
5544                 if (white_list_entries >= hdev->le_white_list_size) {
5545                         /* Select filter policy to accept all advertising */
5546                         return 0x00;
5547                 }
5548 
5549                 if (hci_find_irk_by_addr(hdev, &params->addr,
5550                                          params->addr_type)) {
5551                         /* White list can not be used with RPAs */
5552                         return 0x00;
5553                 }
5554 
5555                 white_list_entries++;
5556                 add_to_white_list(req, params);
5557         }
5558 
5559         /* Select filter policy to use white list */
5560         return 0x01;
5561 }
5562 
5563 void hci_req_add_le_passive_scan(struct hci_request *req)
5564 {
5565         struct hci_cp_le_set_scan_param param_cp;
5566         struct hci_cp_le_set_scan_enable enable_cp;
5567         struct hci_dev *hdev = req->hdev;
5568         u8 own_addr_type;
5569         u8 filter_policy;
5570 
5571         /* Set require_privacy to false since no SCAN_REQ are send
5572          * during passive scanning. Not using an unresolvable address
5573          * here is important so that peer devices using direct
5574          * advertising with our address will be correctly reported
5575          * by the controller.
5576          */
5577         if (hci_update_random_address(req, false, &own_addr_type))
5578                 return;
5579 
5580         /* Adding or removing entries from the white list must
5581          * happen before enabling scanning. The controller does
5582          * not allow white list modification while scanning.
5583          */
5584         filter_policy = update_white_list(req);
5585 
5586         memset(&param_cp, 0, sizeof(param_cp));
5587         param_cp.type = LE_SCAN_PASSIVE;
5588         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5589         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5590         param_cp.own_address_type = own_addr_type;
5591         param_cp.filter_policy = filter_policy;
5592         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5593                     &param_cp);
5594 
5595         memset(&enable_cp, 0, sizeof(enable_cp));
5596         enable_cp.enable = LE_SCAN_ENABLE;
5597         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5598         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5599                     &enable_cp);
5600 }
5601 
5602 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5603 {
5604         if (status)
5605                 BT_DBG("HCI request failed to update background scanning: "
5606                        "status 0x%2.2x", status);
5607 }
5608 
5609 /* This function controls the background scanning based on hdev->pend_le_conns
5610  * list. If there are pending LE connection we start the background scanning,
5611  * otherwise we stop it.
5612  *
5613  * This function requires the caller holds hdev->lock.
5614  */
5615 void hci_update_background_scan(struct hci_dev *hdev)
5616 {
5617         struct hci_request req;
5618         struct hci_conn *conn;
5619         int err;
5620 
5621         if (!test_bit(HCI_UP, &hdev->flags) ||
5622             test_bit(HCI_INIT, &hdev->flags) ||
5623             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5624             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5625             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5626             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5627                 return;
5628 
5629         /* No point in doing scanning if LE support hasn't been enabled */
5630         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5631                 return;
5632 
5633         /* If discovery is active don't interfere with it */
5634         if (hdev->discovery.state != DISCOVERY_STOPPED)
5635                 return;
5636 
5637         hci_req_init(&req, hdev);
5638 
5639         if (list_empty(&hdev->pend_le_conns) &&
5640             list_empty(&hdev->pend_le_reports)) {
5641                 /* If there is no pending LE connections or devices
5642                  * to be scanned for, we should stop the background
5643                  * scanning.
5644                  */
5645 
5646                 /* If controller is not scanning we are done. */
5647                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5648                         return;
5649 
5650                 hci_req_add_le_scan_disable(&req);
5651 
5652                 BT_DBG("%s stopping background scanning", hdev->name);
5653         } else {
5654                 /* If there is at least one pending LE connection, we should
5655                  * keep the background scan running.
5656                  */
5657 
5658                 /* If controller is connecting, we should not start scanning
5659                  * since some controllers are not able to scan and connect at
5660                  * the same time.
5661                  */
5662                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5663                 if (conn)
5664                         return;
5665 
5666                 /* If controller is currently scanning, we stop it to ensure we
5667                  * don't miss any advertising (due to duplicates filter).
5668                  */
5669                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5670                         hci_req_add_le_scan_disable(&req);
5671 
5672                 hci_req_add_le_passive_scan(&req);
5673 
5674                 BT_DBG("%s starting background scanning", hdev->name);
5675         }
5676 
5677         err = hci_req_run(&req, update_background_scan_complete);
5678         if (err)
5679                 BT_ERR("Failed to run HCI request: err %d", err);
5680 }
5681 
5682 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5683 {
5684         struct bdaddr_list *b;
5685 
5686         list_for_each_entry(b, &hdev->whitelist, list) {
5687                 struct hci_conn *conn;
5688 
5689                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5690                 if (!conn)
5691                         return true;
5692 
5693                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5694                         return true;
5695         }
5696 
5697         return false;
5698 }
5699 
5700 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5701 {
5702         u8 scan;
5703 
5704         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5705                 return;
5706 
5707         if (!hdev_is_powered(hdev))
5708                 return;
5709 
5710         if (mgmt_powering_down(hdev))
5711                 return;
5712 
5713         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5714             disconnected_whitelist_entries(hdev))
5715                 scan = SCAN_PAGE;
5716         else
5717                 scan = SCAN_DISABLED;
5718 
5719         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5720                 return;
5721 
5722         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5723                 scan |= SCAN_INQUIRY;
5724 
5725         if (req)
5726                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5727         else
5728                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5729 }
5730 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp