~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/rfkill.h>
 30 #include <linux/debugfs.h>
 31 #include <linux/crypto.h>
 32 #include <linux/property.h>
 33 #include <linux/suspend.h>
 34 #include <linux/wait.h>
 35 #include <asm/unaligned.h>
 36 
 37 #include <net/bluetooth/bluetooth.h>
 38 #include <net/bluetooth/hci_core.h>
 39 #include <net/bluetooth/l2cap.h>
 40 #include <net/bluetooth/mgmt.h>
 41 
 42 #include "hci_request.h"
 43 #include "hci_debugfs.h"
 44 #include "smp.h"
 45 #include "leds.h"
 46 #include "msft.h"
 47 #include "aosp.h"
 48 
 49 static void hci_rx_work(struct work_struct *work);
 50 static void hci_cmd_work(struct work_struct *work);
 51 static void hci_tx_work(struct work_struct *work);
 52 
 53 /* HCI device list */
 54 LIST_HEAD(hci_dev_list);
 55 DEFINE_RWLOCK(hci_dev_list_lock);
 56 
 57 /* HCI callback list */
 58 LIST_HEAD(hci_cb_list);
 59 DEFINE_MUTEX(hci_cb_list_lock);
 60 
 61 /* HCI ID Numbering */
 62 static DEFINE_IDA(hci_index_ida);
 63 
 64 /* ---- HCI debugfs entries ---- */
 65 
 66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
 67                              size_t count, loff_t *ppos)
 68 {
 69         struct hci_dev *hdev = file->private_data;
 70         char buf[3];
 71 
 72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
 73         buf[1] = '\n';
 74         buf[2] = '\0';
 75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 76 }
 77 
 78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
 79                               size_t count, loff_t *ppos)
 80 {
 81         struct hci_dev *hdev = file->private_data;
 82         struct sk_buff *skb;
 83         bool enable;
 84         int err;
 85 
 86         if (!test_bit(HCI_UP, &hdev->flags))
 87                 return -ENETDOWN;
 88 
 89         err = kstrtobool_from_user(user_buf, count, &enable);
 90         if (err)
 91                 return err;
 92 
 93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
 94                 return -EALREADY;
 95 
 96         hci_req_sync_lock(hdev);
 97         if (enable)
 98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
 99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104 
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107 
108         kfree_skb(skb);
109 
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111 
112         return count;
113 }
114 
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121 
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127 
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133 
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140 
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144 
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154 
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158 
159         if (err < 0)
160                 return err;
161 
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167 
168         return count;
169 }
170 
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177 
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182 
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187 
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191 
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197 
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201 
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204 
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207 
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211 
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215 
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218 
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221 
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224 
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227 
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230 
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234 
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243 
244         return 0;
245 }
246 
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250 
251         BT_DBG("%s %ld", hdev->name, opt);
252 
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256 
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268 
269         return 0;
270 }
271 
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276 
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279 
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282 
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285 
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288 
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291 
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294 
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298 
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303 
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307 
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310 
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313 
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316 
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321 
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325 
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331 
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337 
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346 
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356 
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364 
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370 
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374 
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377 
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382 
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385 
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388 
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391 
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394 
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397 
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411 
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414 
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417 
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421 
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424 
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429 
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432 
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444 
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453 
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456 
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461 
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464 
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468 
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472 
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478 
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481 
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484 
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487 
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492 
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498 
499         return 0;
500 }
501 
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507 
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516 
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520 
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525 
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529 
530         memset(&cp, 0, sizeof(cp));
531 
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536 
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541 
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547 
548         /* If Connectionless Slave Broadcast master role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_csb_master_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Slave Page Response Timeout */
555                 events[2] |= 0x20;      /* CSB Channel Map Change */
556                 changed = true;
557         }
558 
559         /* If Connectionless Slave Broadcast slave role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_csb_slave_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CSB Receive */
565                 events[2] |= 0x04;      /* CSB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569 
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575 
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586 
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591 
592         hci_setup_event_mask(req);
593 
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597 
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602 
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605 
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608 
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612 
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619 
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622 
623                 memset(events, 0, sizeof(events));
624 
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627 
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635 
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641 
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649 
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the correspondig event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657 
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665 
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671 
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677 
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685 
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693 
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701 
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707 
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713 
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722 
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730 
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733 
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744 
745                 if (hdev->commands[38] & 0x80) {
746                         /* Read LE Min/Max Tx Power*/
747                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748                                     0, NULL);
749                 }
750 
751                 if (hdev->commands[26] & 0x40) {
752                         /* Read LE White List Size */
753                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
754                                     0, NULL);
755                 }
756 
757                 if (hdev->commands[26] & 0x80) {
758                         /* Clear LE White List */
759                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
760                 }
761 
762                 if (hdev->commands[34] & 0x40) {
763                         /* Read LE Resolving List Size */
764                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765                                     0, NULL);
766                 }
767 
768                 if (hdev->commands[34] & 0x20) {
769                         /* Clear LE Resolving List */
770                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771                 }
772 
773                 if (hdev->commands[35] & 0x04) {
774                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775 
776                         /* Set RPA timeout */
777                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778                                     &rpa_timeout);
779                 }
780 
781                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782                         /* Read LE Maximum Data Length */
783                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784 
785                         /* Read LE Suggested Default Data Length */
786                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787                 }
788 
789                 if (ext_adv_capable(hdev)) {
790                         /* Read LE Number of Supported Advertising Sets */
791                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792                                     0, NULL);
793                 }
794 
795                 hci_set_le_support(req);
796         }
797 
798         /* Read features beyond page 1 if available */
799         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800                 struct hci_cp_read_local_ext_features cp;
801 
802                 cp.page = p;
803                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804                             sizeof(cp), &cp);
805         }
806 
807         return 0;
808 }
809 
810 static int hci_init4_req(struct hci_request *req, unsigned long opt)
811 {
812         struct hci_dev *hdev = req->hdev;
813 
814         /* Some Broadcom based Bluetooth controllers do not support the
815          * Delete Stored Link Key command. They are clearly indicating its
816          * absence in the bit mask of supported commands.
817          *
818          * Check the supported commands and only if the command is marked
819          * as supported send it. If not supported assume that the controller
820          * does not have actual support for stored link keys which makes this
821          * command redundant anyway.
822          *
823          * Some controllers indicate that they support handling deleting
824          * stored link keys, but they don't. The quirk lets a driver
825          * just disable this command.
826          */
827         if (hdev->commands[6] & 0x80 &&
828             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829                 struct hci_cp_delete_stored_link_key cp;
830 
831                 bacpy(&cp.bdaddr, BDADDR_ANY);
832                 cp.delete_all = 0x01;
833                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834                             sizeof(cp), &cp);
835         }
836 
837         /* Set event mask page 2 if the HCI command for it is supported */
838         if (hdev->commands[22] & 0x04)
839                 hci_set_event_mask_page_2(req);
840 
841         /* Read local codec list if the HCI command is supported */
842         if (hdev->commands[29] & 0x20)
843                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844 
845         /* Read local pairing options if the HCI command is supported */
846         if (hdev->commands[41] & 0x08)
847                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848 
849         /* Get MWS transport configuration if the HCI command is supported */
850         if (hdev->commands[30] & 0x08)
851                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852 
853         /* Check for Synchronization Train support */
854         if (lmp_sync_train_capable(hdev))
855                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
856 
857         /* Enable Secure Connections if supported and configured */
858         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
859             bredr_sc_enabled(hdev)) {
860                 u8 support = 0x01;
861 
862                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863                             sizeof(support), &support);
864         }
865 
866         /* Set erroneous data reporting if supported to the wideband speech
867          * setting value
868          */
869         if (hdev->commands[18] & 0x08 &&
870             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
871                 bool enabled = hci_dev_test_flag(hdev,
872                                                  HCI_WIDEBAND_SPEECH_ENABLED);
873 
874                 if (enabled !=
875                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876                         struct hci_cp_write_def_err_data_reporting cp;
877 
878                         cp.err_data_reporting = enabled ?
879                                                 ERR_DATA_REPORTING_ENABLED :
880                                                 ERR_DATA_REPORTING_DISABLED;
881 
882                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883                                     sizeof(cp), &cp);
884                 }
885         }
886 
887         /* Set Suggested Default Data Length to maximum if supported */
888         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889                 struct hci_cp_le_write_def_data_len cp;
890 
891                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
893                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894         }
895 
896         /* Set Default PHY parameters if command is supported */
897         if (hdev->commands[35] & 0x20) {
898                 struct hci_cp_le_set_default_phy cp;
899 
900                 cp.all_phys = 0x00;
901                 cp.tx_phys = hdev->le_tx_def_phys;
902                 cp.rx_phys = hdev->le_rx_def_phys;
903 
904                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905         }
906 
907         return 0;
908 }
909 
910 static int __hci_init(struct hci_dev *hdev)
911 {
912         int err;
913 
914         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917 
918         if (hci_dev_test_flag(hdev, HCI_SETUP))
919                 hci_debugfs_create_basic(hdev);
920 
921         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
922         if (err < 0)
923                 return err;
924 
925         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
926          * BR/EDR/LE type controllers. AMP controllers only need the
927          * first two stages of init.
928          */
929         if (hdev->dev_type != HCI_PRIMARY)
930                 return 0;
931 
932         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
933         if (err < 0)
934                 return err;
935 
936         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
937         if (err < 0)
938                 return err;
939 
940         /* This function is only called when the controller is actually in
941          * configured state. When the controller is marked as unconfigured,
942          * this initialization procedure is not run.
943          *
944          * It means that it is possible that a controller runs through its
945          * setup phase and then discovers missing settings. If that is the
946          * case, then this function will not be called. It then will only
947          * be called during the config phase.
948          *
949          * So only when in setup phase or config phase, create the debugfs
950          * entries and register the SMP channels.
951          */
952         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953             !hci_dev_test_flag(hdev, HCI_CONFIG))
954                 return 0;
955 
956         hci_debugfs_create_common(hdev);
957 
958         if (lmp_bredr_capable(hdev))
959                 hci_debugfs_create_bredr(hdev);
960 
961         if (lmp_le_capable(hdev))
962                 hci_debugfs_create_le(hdev);
963 
964         return 0;
965 }
966 
967 static int hci_init0_req(struct hci_request *req, unsigned long opt)
968 {
969         struct hci_dev *hdev = req->hdev;
970 
971         BT_DBG("%s %ld", hdev->name, opt);
972 
973         /* Reset */
974         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975                 hci_reset_req(req, 0);
976 
977         /* Read Local Version */
978         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979 
980         /* Read BD Address */
981         if (hdev->set_bdaddr)
982                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
983 
984         return 0;
985 }
986 
987 static int __hci_unconf_init(struct hci_dev *hdev)
988 {
989         int err;
990 
991         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992                 return 0;
993 
994         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
995         if (err < 0)
996                 return err;
997 
998         if (hci_dev_test_flag(hdev, HCI_SETUP))
999                 hci_debugfs_create_basic(hdev);
1000 
1001         return 0;
1002 }
1003 
1004 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1005 {
1006         __u8 scan = opt;
1007 
1008         BT_DBG("%s %x", req->hdev->name, scan);
1009 
1010         /* Inquiry and Page scans */
1011         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1012         return 0;
1013 }
1014 
1015 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1016 {
1017         __u8 auth = opt;
1018 
1019         BT_DBG("%s %x", req->hdev->name, auth);
1020 
1021         /* Authentication */
1022         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1023         return 0;
1024 }
1025 
1026 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027 {
1028         __u8 encrypt = opt;
1029 
1030         BT_DBG("%s %x", req->hdev->name, encrypt);
1031 
1032         /* Encryption */
1033         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1034         return 0;
1035 }
1036 
1037 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038 {
1039         __le16 policy = cpu_to_le16(opt);
1040 
1041         BT_DBG("%s %x", req->hdev->name, policy);
1042 
1043         /* Default link policy */
1044         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1045         return 0;
1046 }
1047 
1048 /* Get HCI device by index.
1049  * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1051 {
1052         struct hci_dev *hdev = NULL, *d;
1053 
1054         BT_DBG("%d", index);
1055 
1056         if (index < 0)
1057                 return NULL;
1058 
1059         read_lock(&hci_dev_list_lock);
1060         list_for_each_entry(d, &hci_dev_list, list) {
1061                 if (d->id == index) {
1062                         hdev = hci_dev_hold(d);
1063                         break;
1064                 }
1065         }
1066         read_unlock(&hci_dev_list_lock);
1067         return hdev;
1068 }
1069 
1070 /* ---- Inquiry support ---- */
1071 
1072 bool hci_discovery_active(struct hci_dev *hdev)
1073 {
1074         struct discovery_state *discov = &hdev->discovery;
1075 
1076         switch (discov->state) {
1077         case DISCOVERY_FINDING:
1078         case DISCOVERY_RESOLVING:
1079                 return true;
1080 
1081         default:
1082                 return false;
1083         }
1084 }
1085 
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 {
1088         int old_state = hdev->discovery.state;
1089 
1090         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091 
1092         if (old_state == state)
1093                 return;
1094 
1095         hdev->discovery.state = state;
1096 
1097         switch (state) {
1098         case DISCOVERY_STOPPED:
1099                 hci_update_background_scan(hdev);
1100 
1101                 if (old_state != DISCOVERY_STARTING)
1102                         mgmt_discovering(hdev, 0);
1103                 break;
1104         case DISCOVERY_STARTING:
1105                 break;
1106         case DISCOVERY_FINDING:
1107                 mgmt_discovering(hdev, 1);
1108                 break;
1109         case DISCOVERY_RESOLVING:
1110                 break;
1111         case DISCOVERY_STOPPING:
1112                 break;
1113         }
1114 }
1115 
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *p, *n;
1120 
1121         list_for_each_entry_safe(p, n, &cache->all, all) {
1122                 list_del(&p->all);
1123                 kfree(p);
1124         }
1125 
1126         INIT_LIST_HEAD(&cache->unknown);
1127         INIT_LIST_HEAD(&cache->resolve);
1128 }
1129 
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131                                                bdaddr_t *bdaddr)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *e;
1135 
1136         BT_DBG("cache %p, %pMR", cache, bdaddr);
1137 
1138         list_for_each_entry(e, &cache->all, all) {
1139                 if (!bacmp(&e->data.bdaddr, bdaddr))
1140                         return e;
1141         }
1142 
1143         return NULL;
1144 }
1145 
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147                                                        bdaddr_t *bdaddr)
1148 {
1149         struct discovery_state *cache = &hdev->discovery;
1150         struct inquiry_entry *e;
1151 
1152         BT_DBG("cache %p, %pMR", cache, bdaddr);
1153 
1154         list_for_each_entry(e, &cache->unknown, list) {
1155                 if (!bacmp(&e->data.bdaddr, bdaddr))
1156                         return e;
1157         }
1158 
1159         return NULL;
1160 }
1161 
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163                                                        bdaddr_t *bdaddr,
1164                                                        int state)
1165 {
1166         struct discovery_state *cache = &hdev->discovery;
1167         struct inquiry_entry *e;
1168 
1169         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170 
1171         list_for_each_entry(e, &cache->resolve, list) {
1172                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173                         return e;
1174                 if (!bacmp(&e->data.bdaddr, bdaddr))
1175                         return e;
1176         }
1177 
1178         return NULL;
1179 }
1180 
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182                                       struct inquiry_entry *ie)
1183 {
1184         struct discovery_state *cache = &hdev->discovery;
1185         struct list_head *pos = &cache->resolve;
1186         struct inquiry_entry *p;
1187 
1188         list_del(&ie->list);
1189 
1190         list_for_each_entry(p, &cache->resolve, list) {
1191                 if (p->name_state != NAME_PENDING &&
1192                     abs(p->data.rssi) >= abs(ie->data.rssi))
1193                         break;
1194                 pos = &p->list;
1195         }
1196 
1197         list_add(&ie->list, pos);
1198 }
1199 
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201                              bool name_known)
1202 {
1203         struct discovery_state *cache = &hdev->discovery;
1204         struct inquiry_entry *ie;
1205         u32 flags = 0;
1206 
1207         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208 
1209         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210 
1211         if (!data->ssp_mode)
1212                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213 
1214         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215         if (ie) {
1216                 if (!ie->data.ssp_mode)
1217                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218 
1219                 if (ie->name_state == NAME_NEEDED &&
1220                     data->rssi != ie->data.rssi) {
1221                         ie->data.rssi = data->rssi;
1222                         hci_inquiry_cache_update_resolve(hdev, ie);
1223                 }
1224 
1225                 goto update;
1226         }
1227 
1228         /* Entry not in the cache. Add new one. */
1229         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230         if (!ie) {
1231                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232                 goto done;
1233         }
1234 
1235         list_add(&ie->all, &cache->all);
1236 
1237         if (name_known) {
1238                 ie->name_state = NAME_KNOWN;
1239         } else {
1240                 ie->name_state = NAME_NOT_KNOWN;
1241                 list_add(&ie->list, &cache->unknown);
1242         }
1243 
1244 update:
1245         if (name_known && ie->name_state != NAME_KNOWN &&
1246             ie->name_state != NAME_PENDING) {
1247                 ie->name_state = NAME_KNOWN;
1248                 list_del(&ie->list);
1249         }
1250 
1251         memcpy(&ie->data, data, sizeof(*data));
1252         ie->timestamp = jiffies;
1253         cache->timestamp = jiffies;
1254 
1255         if (ie->name_state == NAME_NOT_KNOWN)
1256                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257 
1258 done:
1259         return flags;
1260 }
1261 
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 {
1264         struct discovery_state *cache = &hdev->discovery;
1265         struct inquiry_info *info = (struct inquiry_info *) buf;
1266         struct inquiry_entry *e;
1267         int copied = 0;
1268 
1269         list_for_each_entry(e, &cache->all, all) {
1270                 struct inquiry_data *data = &e->data;
1271 
1272                 if (copied >= num)
1273                         break;
1274 
1275                 bacpy(&info->bdaddr, &data->bdaddr);
1276                 info->pscan_rep_mode    = data->pscan_rep_mode;
1277                 info->pscan_period_mode = data->pscan_period_mode;
1278                 info->pscan_mode        = data->pscan_mode;
1279                 memcpy(info->dev_class, data->dev_class, 3);
1280                 info->clock_offset      = data->clock_offset;
1281 
1282                 info++;
1283                 copied++;
1284         }
1285 
1286         BT_DBG("cache %p, copied %d", cache, copied);
1287         return copied;
1288 }
1289 
1290 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1291 {
1292         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293         struct hci_dev *hdev = req->hdev;
1294         struct hci_cp_inquiry cp;
1295 
1296         BT_DBG("%s", hdev->name);
1297 
1298         if (test_bit(HCI_INQUIRY, &hdev->flags))
1299                 return 0;
1300 
1301         /* Start Inquiry */
1302         memcpy(&cp.lap, &ir->lap, 3);
1303         cp.length  = ir->length;
1304         cp.num_rsp = ir->num_rsp;
1305         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306 
1307         return 0;
1308 }
1309 
1310 int hci_inquiry(void __user *arg)
1311 {
1312         __u8 __user *ptr = arg;
1313         struct hci_inquiry_req ir;
1314         struct hci_dev *hdev;
1315         int err = 0, do_inquiry = 0, max_rsp;
1316         long timeo;
1317         __u8 *buf;
1318 
1319         if (copy_from_user(&ir, ptr, sizeof(ir)))
1320                 return -EFAULT;
1321 
1322         hdev = hci_dev_get(ir.dev_id);
1323         if (!hdev)
1324                 return -ENODEV;
1325 
1326         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1327                 err = -EBUSY;
1328                 goto done;
1329         }
1330 
1331         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1332                 err = -EOPNOTSUPP;
1333                 goto done;
1334         }
1335 
1336         if (hdev->dev_type != HCI_PRIMARY) {
1337                 err = -EOPNOTSUPP;
1338                 goto done;
1339         }
1340 
1341         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1342                 err = -EOPNOTSUPP;
1343                 goto done;
1344         }
1345 
1346         /* Restrict maximum inquiry length to 60 seconds */
1347         if (ir.length > 60) {
1348                 err = -EINVAL;
1349                 goto done;
1350         }
1351 
1352         hci_dev_lock(hdev);
1353         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1354             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1355                 hci_inquiry_cache_flush(hdev);
1356                 do_inquiry = 1;
1357         }
1358         hci_dev_unlock(hdev);
1359 
1360         timeo = ir.length * msecs_to_jiffies(2000);
1361 
1362         if (do_inquiry) {
1363                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1364                                    timeo, NULL);
1365                 if (err < 0)
1366                         goto done;
1367 
1368                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1369                  * cleared). If it is interrupted by a signal, return -EINTR.
1370                  */
1371                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1372                                 TASK_INTERRUPTIBLE)) {
1373                         err = -EINTR;
1374                         goto done;
1375                 }
1376         }
1377 
1378         /* for unlimited number of responses we will use buffer with
1379          * 255 entries
1380          */
1381         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1382 
1383         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1384          * copy it to the user space.
1385          */
1386         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1387         if (!buf) {
1388                 err = -ENOMEM;
1389                 goto done;
1390         }
1391 
1392         hci_dev_lock(hdev);
1393         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1394         hci_dev_unlock(hdev);
1395 
1396         BT_DBG("num_rsp %d", ir.num_rsp);
1397 
1398         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1399                 ptr += sizeof(ir);
1400                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1401                                  ir.num_rsp))
1402                         err = -EFAULT;
1403         } else
1404                 err = -EFAULT;
1405 
1406         kfree(buf);
1407 
1408 done:
1409         hci_dev_put(hdev);
1410         return err;
1411 }
1412 
1413 /**
1414  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1415  *                                     (BD_ADDR) for a HCI device from
1416  *                                     a firmware node property.
1417  * @hdev:       The HCI device
1418  *
1419  * Search the firmware node for 'local-bd-address'.
1420  *
1421  * All-zero BD addresses are rejected, because those could be properties
1422  * that exist in the firmware tables, but were not updated by the firmware. For
1423  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1424  */
1425 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1426 {
1427         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1428         bdaddr_t ba;
1429         int ret;
1430 
1431         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1432                                             (u8 *)&ba, sizeof(ba));
1433         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1434                 return;
1435 
1436         bacpy(&hdev->public_addr, &ba);
1437 }
1438 
1439 static int hci_dev_do_open(struct hci_dev *hdev)
1440 {
1441         int ret = 0;
1442 
1443         BT_DBG("%s %p", hdev->name, hdev);
1444 
1445         hci_req_sync_lock(hdev);
1446 
1447         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1448                 ret = -ENODEV;
1449                 goto done;
1450         }
1451 
1452         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1453             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1454                 /* Check for rfkill but allow the HCI setup stage to
1455                  * proceed (which in itself doesn't cause any RF activity).
1456                  */
1457                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1458                         ret = -ERFKILL;
1459                         goto done;
1460                 }
1461 
1462                 /* Check for valid public address or a configured static
1463                  * random adddress, but let the HCI setup proceed to
1464                  * be able to determine if there is a public address
1465                  * or not.
1466                  *
1467                  * In case of user channel usage, it is not important
1468                  * if a public address or static random address is
1469                  * available.
1470                  *
1471                  * This check is only valid for BR/EDR controllers
1472                  * since AMP controllers do not have an address.
1473                  */
1474                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1475                     hdev->dev_type == HCI_PRIMARY &&
1476                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1477                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1478                         ret = -EADDRNOTAVAIL;
1479                         goto done;
1480                 }
1481         }
1482 
1483         if (test_bit(HCI_UP, &hdev->flags)) {
1484                 ret = -EALREADY;
1485                 goto done;
1486         }
1487 
1488         if (hdev->open(hdev)) {
1489                 ret = -EIO;
1490                 goto done;
1491         }
1492 
1493         set_bit(HCI_RUNNING, &hdev->flags);
1494         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1495 
1496         atomic_set(&hdev->cmd_cnt, 1);
1497         set_bit(HCI_INIT, &hdev->flags);
1498 
1499         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1500             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1501                 bool invalid_bdaddr;
1502 
1503                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1504 
1505                 if (hdev->setup)
1506                         ret = hdev->setup(hdev);
1507 
1508                 /* The transport driver can set the quirk to mark the
1509                  * BD_ADDR invalid before creating the HCI device or in
1510                  * its setup callback.
1511                  */
1512                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1513                                           &hdev->quirks);
1514 
1515                 if (ret)
1516                         goto setup_failed;
1517 
1518                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1519                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1520                                 hci_dev_get_bd_addr_from_property(hdev);
1521 
1522                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1523                             hdev->set_bdaddr) {
1524                                 ret = hdev->set_bdaddr(hdev,
1525                                                        &hdev->public_addr);
1526 
1527                                 /* If setting of the BD_ADDR from the device
1528                                  * property succeeds, then treat the address
1529                                  * as valid even if the invalid BD_ADDR
1530                                  * quirk indicates otherwise.
1531                                  */
1532                                 if (!ret)
1533                                         invalid_bdaddr = false;
1534                         }
1535                 }
1536 
1537 setup_failed:
1538                 /* The transport driver can set these quirks before
1539                  * creating the HCI device or in its setup callback.
1540                  *
1541                  * For the invalid BD_ADDR quirk it is possible that
1542                  * it becomes a valid address if the bootloader does
1543                  * provide it (see above).
1544                  *
1545                  * In case any of them is set, the controller has to
1546                  * start up as unconfigured.
1547                  */
1548                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1549                     invalid_bdaddr)
1550                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1551 
1552                 /* For an unconfigured controller it is required to
1553                  * read at least the version information provided by
1554                  * the Read Local Version Information command.
1555                  *
1556                  * If the set_bdaddr driver callback is provided, then
1557                  * also the original Bluetooth public device address
1558                  * will be read using the Read BD Address command.
1559                  */
1560                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1561                         ret = __hci_unconf_init(hdev);
1562         }
1563 
1564         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1565                 /* If public address change is configured, ensure that
1566                  * the address gets programmed. If the driver does not
1567                  * support changing the public address, fail the power
1568                  * on procedure.
1569                  */
1570                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1571                     hdev->set_bdaddr)
1572                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1573                 else
1574                         ret = -EADDRNOTAVAIL;
1575         }
1576 
1577         if (!ret) {
1578                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1579                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1580                         ret = __hci_init(hdev);
1581                         if (!ret && hdev->post_init)
1582                                 ret = hdev->post_init(hdev);
1583                 }
1584         }
1585 
1586         /* If the HCI Reset command is clearing all diagnostic settings,
1587          * then they need to be reprogrammed after the init procedure
1588          * completed.
1589          */
1590         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1591             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1592             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1593                 ret = hdev->set_diag(hdev, true);
1594 
1595         msft_do_open(hdev);
1596         aosp_do_open(hdev);
1597 
1598         clear_bit(HCI_INIT, &hdev->flags);
1599 
1600         if (!ret) {
1601                 hci_dev_hold(hdev);
1602                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1603                 hci_adv_instances_set_rpa_expired(hdev, true);
1604                 set_bit(HCI_UP, &hdev->flags);
1605                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1606                 hci_leds_update_powered(hdev, true);
1607                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1608                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1609                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1610                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1611                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1612                     hdev->dev_type == HCI_PRIMARY) {
1613                         ret = __hci_req_hci_power_on(hdev);
1614                         mgmt_power_on(hdev, ret);
1615                 }
1616         } else {
1617                 /* Init failed, cleanup */
1618                 flush_work(&hdev->tx_work);
1619 
1620                 /* Since hci_rx_work() is possible to awake new cmd_work
1621                  * it should be flushed first to avoid unexpected call of
1622                  * hci_cmd_work()
1623                  */
1624                 flush_work(&hdev->rx_work);
1625                 flush_work(&hdev->cmd_work);
1626 
1627                 skb_queue_purge(&hdev->cmd_q);
1628                 skb_queue_purge(&hdev->rx_q);
1629 
1630                 if (hdev->flush)
1631                         hdev->flush(hdev);
1632 
1633                 if (hdev->sent_cmd) {
1634                         kfree_skb(hdev->sent_cmd);
1635                         hdev->sent_cmd = NULL;
1636                 }
1637 
1638                 clear_bit(HCI_RUNNING, &hdev->flags);
1639                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1640 
1641                 hdev->close(hdev);
1642                 hdev->flags &= BIT(HCI_RAW);
1643         }
1644 
1645 done:
1646         hci_req_sync_unlock(hdev);
1647         return ret;
1648 }
1649 
1650 /* ---- HCI ioctl helpers ---- */
1651 
1652 int hci_dev_open(__u16 dev)
1653 {
1654         struct hci_dev *hdev;
1655         int err;
1656 
1657         hdev = hci_dev_get(dev);
1658         if (!hdev)
1659                 return -ENODEV;
1660 
1661         /* Devices that are marked as unconfigured can only be powered
1662          * up as user channel. Trying to bring them up as normal devices
1663          * will result into a failure. Only user channel operation is
1664          * possible.
1665          *
1666          * When this function is called for a user channel, the flag
1667          * HCI_USER_CHANNEL will be set first before attempting to
1668          * open the device.
1669          */
1670         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1671             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1672                 err = -EOPNOTSUPP;
1673                 goto done;
1674         }
1675 
1676         /* We need to ensure that no other power on/off work is pending
1677          * before proceeding to call hci_dev_do_open. This is
1678          * particularly important if the setup procedure has not yet
1679          * completed.
1680          */
1681         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1682                 cancel_delayed_work(&hdev->power_off);
1683 
1684         /* After this call it is guaranteed that the setup procedure
1685          * has finished. This means that error conditions like RFKILL
1686          * or no valid public or static random address apply.
1687          */
1688         flush_workqueue(hdev->req_workqueue);
1689 
1690         /* For controllers not using the management interface and that
1691          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1692          * so that pairing works for them. Once the management interface
1693          * is in use this bit will be cleared again and userspace has
1694          * to explicitly enable it.
1695          */
1696         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1697             !hci_dev_test_flag(hdev, HCI_MGMT))
1698                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1699 
1700         err = hci_dev_do_open(hdev);
1701 
1702 done:
1703         hci_dev_put(hdev);
1704         return err;
1705 }
1706 
1707 /* This function requires the caller holds hdev->lock */
1708 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1709 {
1710         struct hci_conn_params *p;
1711 
1712         list_for_each_entry(p, &hdev->le_conn_params, list) {
1713                 if (p->conn) {
1714                         hci_conn_drop(p->conn);
1715                         hci_conn_put(p->conn);
1716                         p->conn = NULL;
1717                 }
1718                 list_del_init(&p->action);
1719         }
1720 
1721         BT_DBG("All LE pending actions cleared");
1722 }
1723 
1724 int hci_dev_do_close(struct hci_dev *hdev)
1725 {
1726         bool auto_off;
1727 
1728         BT_DBG("%s %p", hdev->name, hdev);
1729 
1730         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1731             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1732             test_bit(HCI_UP, &hdev->flags)) {
1733                 /* Execute vendor specific shutdown routine */
1734                 if (hdev->shutdown)
1735                         hdev->shutdown(hdev);
1736         }
1737 
1738         cancel_delayed_work(&hdev->power_off);
1739 
1740         hci_request_cancel_all(hdev);
1741         hci_req_sync_lock(hdev);
1742 
1743         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1744                 cancel_delayed_work_sync(&hdev->cmd_timer);
1745                 hci_req_sync_unlock(hdev);
1746                 return 0;
1747         }
1748 
1749         hci_leds_update_powered(hdev, false);
1750 
1751         /* Flush RX and TX works */
1752         flush_work(&hdev->tx_work);
1753         flush_work(&hdev->rx_work);
1754 
1755         if (hdev->discov_timeout > 0) {
1756                 hdev->discov_timeout = 0;
1757                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1758                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1759         }
1760 
1761         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1762                 cancel_delayed_work(&hdev->service_cache);
1763 
1764         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1765                 struct adv_info *adv_instance;
1766 
1767                 cancel_delayed_work_sync(&hdev->rpa_expired);
1768 
1769                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1770                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1771         }
1772 
1773         /* Avoid potential lockdep warnings from the *_flush() calls by
1774          * ensuring the workqueue is empty up front.
1775          */
1776         drain_workqueue(hdev->workqueue);
1777 
1778         hci_dev_lock(hdev);
1779 
1780         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1781 
1782         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1783 
1784         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1785             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1786             hci_dev_test_flag(hdev, HCI_MGMT))
1787                 __mgmt_power_off(hdev);
1788 
1789         hci_inquiry_cache_flush(hdev);
1790         hci_pend_le_actions_clear(hdev);
1791         hci_conn_hash_flush(hdev);
1792         hci_dev_unlock(hdev);
1793 
1794         smp_unregister(hdev);
1795 
1796         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1797 
1798         aosp_do_close(hdev);
1799         msft_do_close(hdev);
1800 
1801         if (hdev->flush)
1802                 hdev->flush(hdev);
1803 
1804         /* Reset device */
1805         skb_queue_purge(&hdev->cmd_q);
1806         atomic_set(&hdev->cmd_cnt, 1);
1807         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1808             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1809                 set_bit(HCI_INIT, &hdev->flags);
1810                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1811                 clear_bit(HCI_INIT, &hdev->flags);
1812         }
1813 
1814         /* flush cmd  work */
1815         flush_work(&hdev->cmd_work);
1816 
1817         /* Drop queues */
1818         skb_queue_purge(&hdev->rx_q);
1819         skb_queue_purge(&hdev->cmd_q);
1820         skb_queue_purge(&hdev->raw_q);
1821 
1822         /* Drop last sent command */
1823         if (hdev->sent_cmd) {
1824                 cancel_delayed_work_sync(&hdev->cmd_timer);
1825                 kfree_skb(hdev->sent_cmd);
1826                 hdev->sent_cmd = NULL;
1827         }
1828 
1829         clear_bit(HCI_RUNNING, &hdev->flags);
1830         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1831 
1832         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1833                 wake_up(&hdev->suspend_wait_q);
1834 
1835         /* After this point our queues are empty
1836          * and no tasks are scheduled. */
1837         hdev->close(hdev);
1838 
1839         /* Clear flags */
1840         hdev->flags &= BIT(HCI_RAW);
1841         hci_dev_clear_volatile_flags(hdev);
1842 
1843         /* Controller radio is available but is currently powered down */
1844         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1845 
1846         memset(hdev->eir, 0, sizeof(hdev->eir));
1847         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1848         bacpy(&hdev->random_addr, BDADDR_ANY);
1849 
1850         hci_req_sync_unlock(hdev);
1851 
1852         hci_dev_put(hdev);
1853         return 0;
1854 }
1855 
1856 int hci_dev_close(__u16 dev)
1857 {
1858         struct hci_dev *hdev;
1859         int err;
1860 
1861         hdev = hci_dev_get(dev);
1862         if (!hdev)
1863                 return -ENODEV;
1864 
1865         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1866                 err = -EBUSY;
1867                 goto done;
1868         }
1869 
1870         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1871                 cancel_delayed_work(&hdev->power_off);
1872 
1873         err = hci_dev_do_close(hdev);
1874 
1875 done:
1876         hci_dev_put(hdev);
1877         return err;
1878 }
1879 
1880 static int hci_dev_do_reset(struct hci_dev *hdev)
1881 {
1882         int ret;
1883 
1884         BT_DBG("%s %p", hdev->name, hdev);
1885 
1886         hci_req_sync_lock(hdev);
1887 
1888         /* Drop queues */
1889         skb_queue_purge(&hdev->rx_q);
1890         skb_queue_purge(&hdev->cmd_q);
1891 
1892         /* Avoid potential lockdep warnings from the *_flush() calls by
1893          * ensuring the workqueue is empty up front.
1894          */
1895         drain_workqueue(hdev->workqueue);
1896 
1897         hci_dev_lock(hdev);
1898         hci_inquiry_cache_flush(hdev);
1899         hci_conn_hash_flush(hdev);
1900         hci_dev_unlock(hdev);
1901 
1902         if (hdev->flush)
1903                 hdev->flush(hdev);
1904 
1905         atomic_set(&hdev->cmd_cnt, 1);
1906         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1907 
1908         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1909 
1910         hci_req_sync_unlock(hdev);
1911         return ret;
1912 }
1913 
1914 int hci_dev_reset(__u16 dev)
1915 {
1916         struct hci_dev *hdev;
1917         int err;
1918 
1919         hdev = hci_dev_get(dev);
1920         if (!hdev)
1921                 return -ENODEV;
1922 
1923         if (!test_bit(HCI_UP, &hdev->flags)) {
1924                 err = -ENETDOWN;
1925                 goto done;
1926         }
1927 
1928         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1929                 err = -EBUSY;
1930                 goto done;
1931         }
1932 
1933         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1934                 err = -EOPNOTSUPP;
1935                 goto done;
1936         }
1937 
1938         err = hci_dev_do_reset(hdev);
1939 
1940 done:
1941         hci_dev_put(hdev);
1942         return err;
1943 }
1944 
1945 int hci_dev_reset_stat(__u16 dev)
1946 {
1947         struct hci_dev *hdev;
1948         int ret = 0;
1949 
1950         hdev = hci_dev_get(dev);
1951         if (!hdev)
1952                 return -ENODEV;
1953 
1954         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1955                 ret = -EBUSY;
1956                 goto done;
1957         }
1958 
1959         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1960                 ret = -EOPNOTSUPP;
1961                 goto done;
1962         }
1963 
1964         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1965 
1966 done:
1967         hci_dev_put(hdev);
1968         return ret;
1969 }
1970 
1971 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1972 {
1973         bool conn_changed, discov_changed;
1974 
1975         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1976 
1977         if ((scan & SCAN_PAGE))
1978                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1979                                                           HCI_CONNECTABLE);
1980         else
1981                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1982                                                            HCI_CONNECTABLE);
1983 
1984         if ((scan & SCAN_INQUIRY)) {
1985                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1986                                                             HCI_DISCOVERABLE);
1987         } else {
1988                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1989                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1990                                                              HCI_DISCOVERABLE);
1991         }
1992 
1993         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1994                 return;
1995 
1996         if (conn_changed || discov_changed) {
1997                 /* In case this was disabled through mgmt */
1998                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1999 
2000                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2001                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2002 
2003                 mgmt_new_settings(hdev);
2004         }
2005 }
2006 
2007 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2008 {
2009         struct hci_dev *hdev;
2010         struct hci_dev_req dr;
2011         int err = 0;
2012 
2013         if (copy_from_user(&dr, arg, sizeof(dr)))
2014                 return -EFAULT;
2015 
2016         hdev = hci_dev_get(dr.dev_id);
2017         if (!hdev)
2018                 return -ENODEV;
2019 
2020         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2021                 err = -EBUSY;
2022                 goto done;
2023         }
2024 
2025         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2026                 err = -EOPNOTSUPP;
2027                 goto done;
2028         }
2029 
2030         if (hdev->dev_type != HCI_PRIMARY) {
2031                 err = -EOPNOTSUPP;
2032                 goto done;
2033         }
2034 
2035         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2036                 err = -EOPNOTSUPP;
2037                 goto done;
2038         }
2039 
2040         switch (cmd) {
2041         case HCISETAUTH:
2042                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2043                                    HCI_INIT_TIMEOUT, NULL);
2044                 break;
2045 
2046         case HCISETENCRYPT:
2047                 if (!lmp_encrypt_capable(hdev)) {
2048                         err = -EOPNOTSUPP;
2049                         break;
2050                 }
2051 
2052                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2053                         /* Auth must be enabled first */
2054                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2055                                            HCI_INIT_TIMEOUT, NULL);
2056                         if (err)
2057                                 break;
2058                 }
2059 
2060                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2061                                    HCI_INIT_TIMEOUT, NULL);
2062                 break;
2063 
2064         case HCISETSCAN:
2065                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2066                                    HCI_INIT_TIMEOUT, NULL);
2067 
2068                 /* Ensure that the connectable and discoverable states
2069                  * get correctly modified as this was a non-mgmt change.
2070                  */
2071                 if (!err)
2072                         hci_update_scan_state(hdev, dr.dev_opt);
2073                 break;
2074 
2075         case HCISETLINKPOL:
2076                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2077                                    HCI_INIT_TIMEOUT, NULL);
2078                 break;
2079 
2080         case HCISETLINKMODE:
2081                 hdev->link_mode = ((__u16) dr.dev_opt) &
2082                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2083                 break;
2084 
2085         case HCISETPTYPE:
2086                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2087                         break;
2088 
2089                 hdev->pkt_type = (__u16) dr.dev_opt;
2090                 mgmt_phy_configuration_changed(hdev, NULL);
2091                 break;
2092 
2093         case HCISETACLMTU:
2094                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2095                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2096                 break;
2097 
2098         case HCISETSCOMTU:
2099                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2100                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2101                 break;
2102 
2103         default:
2104                 err = -EINVAL;
2105                 break;
2106         }
2107 
2108 done:
2109         hci_dev_put(hdev);
2110         return err;
2111 }
2112 
2113 int hci_get_dev_list(void __user *arg)
2114 {
2115         struct hci_dev *hdev;
2116         struct hci_dev_list_req *dl;
2117         struct hci_dev_req *dr;
2118         int n = 0, size, err;
2119         __u16 dev_num;
2120 
2121         if (get_user(dev_num, (__u16 __user *) arg))
2122                 return -EFAULT;
2123 
2124         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2125                 return -EINVAL;
2126 
2127         size = sizeof(*dl) + dev_num * sizeof(*dr);
2128 
2129         dl = kzalloc(size, GFP_KERNEL);
2130         if (!dl)
2131                 return -ENOMEM;
2132 
2133         dr = dl->dev_req;
2134 
2135         read_lock(&hci_dev_list_lock);
2136         list_for_each_entry(hdev, &hci_dev_list, list) {
2137                 unsigned long flags = hdev->flags;
2138 
2139                 /* When the auto-off is configured it means the transport
2140                  * is running, but in that case still indicate that the
2141                  * device is actually down.
2142                  */
2143                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2144                         flags &= ~BIT(HCI_UP);
2145 
2146                 (dr + n)->dev_id  = hdev->id;
2147                 (dr + n)->dev_opt = flags;
2148 
2149                 if (++n >= dev_num)
2150                         break;
2151         }
2152         read_unlock(&hci_dev_list_lock);
2153 
2154         dl->dev_num = n;
2155         size = sizeof(*dl) + n * sizeof(*dr);
2156 
2157         err = copy_to_user(arg, dl, size);
2158         kfree(dl);
2159 
2160         return err ? -EFAULT : 0;
2161 }
2162 
2163 int hci_get_dev_info(void __user *arg)
2164 {
2165         struct hci_dev *hdev;
2166         struct hci_dev_info di;
2167         unsigned long flags;
2168         int err = 0;
2169 
2170         if (copy_from_user(&di, arg, sizeof(di)))
2171                 return -EFAULT;
2172 
2173         hdev = hci_dev_get(di.dev_id);
2174         if (!hdev)
2175                 return -ENODEV;
2176 
2177         /* When the auto-off is configured it means the transport
2178          * is running, but in that case still indicate that the
2179          * device is actually down.
2180          */
2181         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2182                 flags = hdev->flags & ~BIT(HCI_UP);
2183         else
2184                 flags = hdev->flags;
2185 
2186         strcpy(di.name, hdev->name);
2187         di.bdaddr   = hdev->bdaddr;
2188         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2189         di.flags    = flags;
2190         di.pkt_type = hdev->pkt_type;
2191         if (lmp_bredr_capable(hdev)) {
2192                 di.acl_mtu  = hdev->acl_mtu;
2193                 di.acl_pkts = hdev->acl_pkts;
2194                 di.sco_mtu  = hdev->sco_mtu;
2195                 di.sco_pkts = hdev->sco_pkts;
2196         } else {
2197                 di.acl_mtu  = hdev->le_mtu;
2198                 di.acl_pkts = hdev->le_pkts;
2199                 di.sco_mtu  = 0;
2200                 di.sco_pkts = 0;
2201         }
2202         di.link_policy = hdev->link_policy;
2203         di.link_mode   = hdev->link_mode;
2204 
2205         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2206         memcpy(&di.features, &hdev->features, sizeof(di.features));
2207 
2208         if (copy_to_user(arg, &di, sizeof(di)))
2209                 err = -EFAULT;
2210 
2211         hci_dev_put(hdev);
2212 
2213         return err;
2214 }
2215 
2216 /* ---- Interface to HCI drivers ---- */
2217 
2218 static int hci_rfkill_set_block(void *data, bool blocked)
2219 {
2220         struct hci_dev *hdev = data;
2221 
2222         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2223 
2224         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2225                 return -EBUSY;
2226 
2227         if (blocked) {
2228                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2229                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2230                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2231                         hci_dev_do_close(hdev);
2232         } else {
2233                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2234         }
2235 
2236         return 0;
2237 }
2238 
2239 static const struct rfkill_ops hci_rfkill_ops = {
2240         .set_block = hci_rfkill_set_block,
2241 };
2242 
2243 static void hci_power_on(struct work_struct *work)
2244 {
2245         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2246         int err;
2247 
2248         BT_DBG("%s", hdev->name);
2249 
2250         if (test_bit(HCI_UP, &hdev->flags) &&
2251             hci_dev_test_flag(hdev, HCI_MGMT) &&
2252             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2253                 cancel_delayed_work(&hdev->power_off);
2254                 hci_req_sync_lock(hdev);
2255                 err = __hci_req_hci_power_on(hdev);
2256                 hci_req_sync_unlock(hdev);
2257                 mgmt_power_on(hdev, err);
2258                 return;
2259         }
2260 
2261         err = hci_dev_do_open(hdev);
2262         if (err < 0) {
2263                 hci_dev_lock(hdev);
2264                 mgmt_set_powered_failed(hdev, err);
2265                 hci_dev_unlock(hdev);
2266                 return;
2267         }
2268 
2269         /* During the HCI setup phase, a few error conditions are
2270          * ignored and they need to be checked now. If they are still
2271          * valid, it is important to turn the device back off.
2272          */
2273         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2274             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2275             (hdev->dev_type == HCI_PRIMARY &&
2276              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2277              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2278                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2279                 hci_dev_do_close(hdev);
2280         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2281                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2282                                    HCI_AUTO_OFF_TIMEOUT);
2283         }
2284 
2285         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2286                 /* For unconfigured devices, set the HCI_RAW flag
2287                  * so that userspace can easily identify them.
2288                  */
2289                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2290                         set_bit(HCI_RAW, &hdev->flags);
2291 
2292                 /* For fully configured devices, this will send
2293                  * the Index Added event. For unconfigured devices,
2294                  * it will send Unconfigued Index Added event.
2295                  *
2296                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2297                  * and no event will be send.
2298                  */
2299                 mgmt_index_added(hdev);
2300         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2301                 /* When the controller is now configured, then it
2302                  * is important to clear the HCI_RAW flag.
2303                  */
2304                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2305                         clear_bit(HCI_RAW, &hdev->flags);
2306 
2307                 /* Powering on the controller with HCI_CONFIG set only
2308                  * happens with the transition from unconfigured to
2309                  * configured. This will send the Index Added event.
2310                  */
2311                 mgmt_index_added(hdev);
2312         }
2313 }
2314 
2315 static void hci_power_off(struct work_struct *work)
2316 {
2317         struct hci_dev *hdev = container_of(work, struct hci_dev,
2318                                             power_off.work);
2319 
2320         BT_DBG("%s", hdev->name);
2321 
2322         hci_dev_do_close(hdev);
2323 }
2324 
2325 static void hci_error_reset(struct work_struct *work)
2326 {
2327         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2328 
2329         BT_DBG("%s", hdev->name);
2330 
2331         if (hdev->hw_error)
2332                 hdev->hw_error(hdev, hdev->hw_error_code);
2333         else
2334                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2335 
2336         if (hci_dev_do_close(hdev))
2337                 return;
2338 
2339         hci_dev_do_open(hdev);
2340 }
2341 
2342 void hci_uuids_clear(struct hci_dev *hdev)
2343 {
2344         struct bt_uuid *uuid, *tmp;
2345 
2346         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2347                 list_del(&uuid->list);
2348                 kfree(uuid);
2349         }
2350 }
2351 
2352 void hci_link_keys_clear(struct hci_dev *hdev)
2353 {
2354         struct link_key *key;
2355 
2356         list_for_each_entry(key, &hdev->link_keys, list) {
2357                 list_del_rcu(&key->list);
2358                 kfree_rcu(key, rcu);
2359         }
2360 }
2361 
2362 void hci_smp_ltks_clear(struct hci_dev *hdev)
2363 {
2364         struct smp_ltk *k;
2365 
2366         list_for_each_entry(k, &hdev->long_term_keys, list) {
2367                 list_del_rcu(&k->list);
2368                 kfree_rcu(k, rcu);
2369         }
2370 }
2371 
2372 void hci_smp_irks_clear(struct hci_dev *hdev)
2373 {
2374         struct smp_irk *k;
2375 
2376         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2377                 list_del_rcu(&k->list);
2378                 kfree_rcu(k, rcu);
2379         }
2380 }
2381 
2382 void hci_blocked_keys_clear(struct hci_dev *hdev)
2383 {
2384         struct blocked_key *b;
2385 
2386         list_for_each_entry(b, &hdev->blocked_keys, list) {
2387                 list_del_rcu(&b->list);
2388                 kfree_rcu(b, rcu);
2389         }
2390 }
2391 
2392 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2393 {
2394         bool blocked = false;
2395         struct blocked_key *b;
2396 
2397         rcu_read_lock();
2398         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2399                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2400                         blocked = true;
2401                         break;
2402                 }
2403         }
2404 
2405         rcu_read_unlock();
2406         return blocked;
2407 }
2408 
2409 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2410 {
2411         struct link_key *k;
2412 
2413         rcu_read_lock();
2414         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2415                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2416                         rcu_read_unlock();
2417 
2418                         if (hci_is_blocked_key(hdev,
2419                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2420                                                k->val)) {
2421                                 bt_dev_warn_ratelimited(hdev,
2422                                                         "Link key blocked for %pMR",
2423                                                         &k->bdaddr);
2424                                 return NULL;
2425                         }
2426 
2427                         return k;
2428                 }
2429         }
2430         rcu_read_unlock();
2431 
2432         return NULL;
2433 }
2434 
2435 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2436                                u8 key_type, u8 old_key_type)
2437 {
2438         /* Legacy key */
2439         if (key_type < 0x03)
2440                 return true;
2441 
2442         /* Debug keys are insecure so don't store them persistently */
2443         if (key_type == HCI_LK_DEBUG_COMBINATION)
2444                 return false;
2445 
2446         /* Changed combination key and there's no previous one */
2447         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2448                 return false;
2449 
2450         /* Security mode 3 case */
2451         if (!conn)
2452                 return true;
2453 
2454         /* BR/EDR key derived using SC from an LE link */
2455         if (conn->type == LE_LINK)
2456                 return true;
2457 
2458         /* Neither local nor remote side had no-bonding as requirement */
2459         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2460                 return true;
2461 
2462         /* Local side had dedicated bonding as requirement */
2463         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2464                 return true;
2465 
2466         /* Remote side had dedicated bonding as requirement */
2467         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2468                 return true;
2469 
2470         /* If none of the above criteria match, then don't store the key
2471          * persistently */
2472         return false;
2473 }
2474 
2475 static u8 ltk_role(u8 type)
2476 {
2477         if (type == SMP_LTK)
2478                 return HCI_ROLE_MASTER;
2479 
2480         return HCI_ROLE_SLAVE;
2481 }
2482 
2483 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2484                              u8 addr_type, u8 role)
2485 {
2486         struct smp_ltk *k;
2487 
2488         rcu_read_lock();
2489         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2490                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2491                         continue;
2492 
2493                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2494                         rcu_read_unlock();
2495 
2496                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2497                                                k->val)) {
2498                                 bt_dev_warn_ratelimited(hdev,
2499                                                         "LTK blocked for %pMR",
2500                                                         &k->bdaddr);
2501                                 return NULL;
2502                         }
2503 
2504                         return k;
2505                 }
2506         }
2507         rcu_read_unlock();
2508 
2509         return NULL;
2510 }
2511 
2512 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2513 {
2514         struct smp_irk *irk_to_return = NULL;
2515         struct smp_irk *irk;
2516 
2517         rcu_read_lock();
2518         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2519                 if (!bacmp(&irk->rpa, rpa)) {
2520                         irk_to_return = irk;
2521                         goto done;
2522                 }
2523         }
2524 
2525         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2526                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2527                         bacpy(&irk->rpa, rpa);
2528                         irk_to_return = irk;
2529                         goto done;
2530                 }
2531         }
2532 
2533 done:
2534         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2535                                                 irk_to_return->val)) {
2536                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2537                                         &irk_to_return->bdaddr);
2538                 irk_to_return = NULL;
2539         }
2540 
2541         rcu_read_unlock();
2542 
2543         return irk_to_return;
2544 }
2545 
2546 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2547                                      u8 addr_type)
2548 {
2549         struct smp_irk *irk_to_return = NULL;
2550         struct smp_irk *irk;
2551 
2552         /* Identity Address must be public or static random */
2553         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2554                 return NULL;
2555 
2556         rcu_read_lock();
2557         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2558                 if (addr_type == irk->addr_type &&
2559                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2560                         irk_to_return = irk;
2561                         goto done;
2562                 }
2563         }
2564 
2565 done:
2566 
2567         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2568                                                 irk_to_return->val)) {
2569                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2570                                         &irk_to_return->bdaddr);
2571                 irk_to_return = NULL;
2572         }
2573 
2574         rcu_read_unlock();
2575 
2576         return irk_to_return;
2577 }
2578 
2579 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2580                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2581                                   u8 pin_len, bool *persistent)
2582 {
2583         struct link_key *key, *old_key;
2584         u8 old_key_type;
2585 
2586         old_key = hci_find_link_key(hdev, bdaddr);
2587         if (old_key) {
2588                 old_key_type = old_key->type;
2589                 key = old_key;
2590         } else {
2591                 old_key_type = conn ? conn->key_type : 0xff;
2592                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2593                 if (!key)
2594                         return NULL;
2595                 list_add_rcu(&key->list, &hdev->link_keys);
2596         }
2597 
2598         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2599 
2600         /* Some buggy controller combinations generate a changed
2601          * combination key for legacy pairing even when there's no
2602          * previous key */
2603         if (type == HCI_LK_CHANGED_COMBINATION &&
2604             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2605                 type = HCI_LK_COMBINATION;
2606                 if (conn)
2607                         conn->key_type = type;
2608         }
2609 
2610         bacpy(&key->bdaddr, bdaddr);
2611         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2612         key->pin_len = pin_len;
2613 
2614         if (type == HCI_LK_CHANGED_COMBINATION)
2615                 key->type = old_key_type;
2616         else
2617                 key->type = type;
2618 
2619         if (persistent)
2620                 *persistent = hci_persistent_key(hdev, conn, type,
2621                                                  old_key_type);
2622 
2623         return key;
2624 }
2625 
2626 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2627                             u8 addr_type, u8 type, u8 authenticated,
2628                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2629 {
2630         struct smp_ltk *key, *old_key;
2631         u8 role = ltk_role(type);
2632 
2633         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2634         if (old_key)
2635                 key = old_key;
2636         else {
2637                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2638                 if (!key)
2639                         return NULL;
2640                 list_add_rcu(&key->list, &hdev->long_term_keys);
2641         }
2642 
2643         bacpy(&key->bdaddr, bdaddr);
2644         key->bdaddr_type = addr_type;
2645         memcpy(key->val, tk, sizeof(key->val));
2646         key->authenticated = authenticated;
2647         key->ediv = ediv;
2648         key->rand = rand;
2649         key->enc_size = enc_size;
2650         key->type = type;
2651 
2652         return key;
2653 }
2654 
2655 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2656                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2657 {
2658         struct smp_irk *irk;
2659 
2660         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2661         if (!irk) {
2662                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2663                 if (!irk)
2664                         return NULL;
2665 
2666                 bacpy(&irk->bdaddr, bdaddr);
2667                 irk->addr_type = addr_type;
2668 
2669                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2670         }
2671 
2672         memcpy(irk->val, val, 16);
2673         bacpy(&irk->rpa, rpa);
2674 
2675         return irk;
2676 }
2677 
2678 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2679 {
2680         struct link_key *key;
2681 
2682         key = hci_find_link_key(hdev, bdaddr);
2683         if (!key)
2684                 return -ENOENT;
2685 
2686         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2687 
2688         list_del_rcu(&key->list);
2689         kfree_rcu(key, rcu);
2690 
2691         return 0;
2692 }
2693 
2694 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2695 {
2696         struct smp_ltk *k;
2697         int removed = 0;
2698 
2699         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2700                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2701                         continue;
2702 
2703                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2704 
2705                 list_del_rcu(&k->list);
2706                 kfree_rcu(k, rcu);
2707                 removed++;
2708         }
2709 
2710         return removed ? 0 : -ENOENT;
2711 }
2712 
2713 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2714 {
2715         struct smp_irk *k;
2716 
2717         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2718                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2719                         continue;
2720 
2721                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2722 
2723                 list_del_rcu(&k->list);
2724                 kfree_rcu(k, rcu);
2725         }
2726 }
2727 
2728 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2729 {
2730         struct smp_ltk *k;
2731         struct smp_irk *irk;
2732         u8 addr_type;
2733 
2734         if (type == BDADDR_BREDR) {
2735                 if (hci_find_link_key(hdev, bdaddr))
2736                         return true;
2737                 return false;
2738         }
2739 
2740         /* Convert to HCI addr type which struct smp_ltk uses */
2741         if (type == BDADDR_LE_PUBLIC)
2742                 addr_type = ADDR_LE_DEV_PUBLIC;
2743         else
2744                 addr_type = ADDR_LE_DEV_RANDOM;
2745 
2746         irk = hci_get_irk(hdev, bdaddr, addr_type);
2747         if (irk) {
2748                 bdaddr = &irk->bdaddr;
2749                 addr_type = irk->addr_type;
2750         }
2751 
2752         rcu_read_lock();
2753         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2754                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2755                         rcu_read_unlock();
2756                         return true;
2757                 }
2758         }
2759         rcu_read_unlock();
2760 
2761         return false;
2762 }
2763 
2764 /* HCI command timer function */
2765 static void hci_cmd_timeout(struct work_struct *work)
2766 {
2767         struct hci_dev *hdev = container_of(work, struct hci_dev,
2768                                             cmd_timer.work);
2769 
2770         if (hdev->sent_cmd) {
2771                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2772                 u16 opcode = __le16_to_cpu(sent->opcode);
2773 
2774                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2775         } else {
2776                 bt_dev_err(hdev, "command tx timeout");
2777         }
2778 
2779         if (hdev->cmd_timeout)
2780                 hdev->cmd_timeout(hdev);
2781 
2782         atomic_set(&hdev->cmd_cnt, 1);
2783         queue_work(hdev->workqueue, &hdev->cmd_work);
2784 }
2785 
2786 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2787                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2788 {
2789         struct oob_data *data;
2790 
2791         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2792                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2793                         continue;
2794                 if (data->bdaddr_type != bdaddr_type)
2795                         continue;
2796                 return data;
2797         }
2798 
2799         return NULL;
2800 }
2801 
2802 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2803                                u8 bdaddr_type)
2804 {
2805         struct oob_data *data;
2806 
2807         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2808         if (!data)
2809                 return -ENOENT;
2810 
2811         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2812 
2813         list_del(&data->list);
2814         kfree(data);
2815 
2816         return 0;
2817 }
2818 
2819 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2820 {
2821         struct oob_data *data, *n;
2822 
2823         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2824                 list_del(&data->list);
2825                 kfree(data);
2826         }
2827 }
2828 
2829 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2830                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2831                             u8 *hash256, u8 *rand256)
2832 {
2833         struct oob_data *data;
2834 
2835         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2836         if (!data) {
2837                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2838                 if (!data)
2839                         return -ENOMEM;
2840 
2841                 bacpy(&data->bdaddr, bdaddr);
2842                 data->bdaddr_type = bdaddr_type;
2843                 list_add(&data->list, &hdev->remote_oob_data);
2844         }
2845 
2846         if (hash192 && rand192) {
2847                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2848                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2849                 if (hash256 && rand256)
2850                         data->present = 0x03;
2851         } else {
2852                 memset(data->hash192, 0, sizeof(data->hash192));
2853                 memset(data->rand192, 0, sizeof(data->rand192));
2854                 if (hash256 && rand256)
2855                         data->present = 0x02;
2856                 else
2857                         data->present = 0x00;
2858         }
2859 
2860         if (hash256 && rand256) {
2861                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2862                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2863         } else {
2864                 memset(data->hash256, 0, sizeof(data->hash256));
2865                 memset(data->rand256, 0, sizeof(data->rand256));
2866                 if (hash192 && rand192)
2867                         data->present = 0x01;
2868         }
2869 
2870         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2871 
2872         return 0;
2873 }
2874 
2875 /* This function requires the caller holds hdev->lock */
2876 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2877 {
2878         struct adv_info *adv_instance;
2879 
2880         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2881                 if (adv_instance->instance == instance)
2882                         return adv_instance;
2883         }
2884 
2885         return NULL;
2886 }
2887 
2888 /* This function requires the caller holds hdev->lock */
2889 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2890 {
2891         struct adv_info *cur_instance;
2892 
2893         cur_instance = hci_find_adv_instance(hdev, instance);
2894         if (!cur_instance)
2895                 return NULL;
2896 
2897         if (cur_instance == list_last_entry(&hdev->adv_instances,
2898                                             struct adv_info, list))
2899                 return list_first_entry(&hdev->adv_instances,
2900                                                  struct adv_info, list);
2901         else
2902                 return list_next_entry(cur_instance, list);
2903 }
2904 
2905 /* This function requires the caller holds hdev->lock */
2906 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2907 {
2908         struct adv_info *adv_instance;
2909 
2910         adv_instance = hci_find_adv_instance(hdev, instance);
2911         if (!adv_instance)
2912                 return -ENOENT;
2913 
2914         BT_DBG("%s removing %dMR", hdev->name, instance);
2915 
2916         if (hdev->cur_adv_instance == instance) {
2917                 if (hdev->adv_instance_timeout) {
2918                         cancel_delayed_work(&hdev->adv_instance_expire);
2919                         hdev->adv_instance_timeout = 0;
2920                 }
2921                 hdev->cur_adv_instance = 0x00;
2922         }
2923 
2924         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2925 
2926         list_del(&adv_instance->list);
2927         kfree(adv_instance);
2928 
2929         hdev->adv_instance_cnt--;
2930 
2931         return 0;
2932 }
2933 
2934 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2935 {
2936         struct adv_info *adv_instance, *n;
2937 
2938         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2939                 adv_instance->rpa_expired = rpa_expired;
2940 }
2941 
2942 /* This function requires the caller holds hdev->lock */
2943 void hci_adv_instances_clear(struct hci_dev *hdev)
2944 {
2945         struct adv_info *adv_instance, *n;
2946 
2947         if (hdev->adv_instance_timeout) {
2948                 cancel_delayed_work(&hdev->adv_instance_expire);
2949                 hdev->adv_instance_timeout = 0;
2950         }
2951 
2952         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2953                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2954                 list_del(&adv_instance->list);
2955                 kfree(adv_instance);
2956         }
2957 
2958         hdev->adv_instance_cnt = 0;
2959         hdev->cur_adv_instance = 0x00;
2960 }
2961 
2962 static void adv_instance_rpa_expired(struct work_struct *work)
2963 {
2964         struct adv_info *adv_instance = container_of(work, struct adv_info,
2965                                                      rpa_expired_cb.work);
2966 
2967         BT_DBG("");
2968 
2969         adv_instance->rpa_expired = true;
2970 }
2971 
2972 /* This function requires the caller holds hdev->lock */
2973 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2974                          u16 adv_data_len, u8 *adv_data,
2975                          u16 scan_rsp_len, u8 *scan_rsp_data,
2976                          u16 timeout, u16 duration, s8 tx_power,
2977                          u32 min_interval, u32 max_interval)
2978 {
2979         struct adv_info *adv_instance;
2980 
2981         adv_instance = hci_find_adv_instance(hdev, instance);
2982         if (adv_instance) {
2983                 memset(adv_instance->adv_data, 0,
2984                        sizeof(adv_instance->adv_data));
2985                 memset(adv_instance->scan_rsp_data, 0,
2986                        sizeof(adv_instance->scan_rsp_data));
2987         } else {
2988                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2989                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2990                         return -EOVERFLOW;
2991 
2992                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2993                 if (!adv_instance)
2994                         return -ENOMEM;
2995 
2996                 adv_instance->pending = true;
2997                 adv_instance->instance = instance;
2998                 list_add(&adv_instance->list, &hdev->adv_instances);
2999                 hdev->adv_instance_cnt++;
3000         }
3001 
3002         adv_instance->flags = flags;
3003         adv_instance->adv_data_len = adv_data_len;
3004         adv_instance->scan_rsp_len = scan_rsp_len;
3005         adv_instance->min_interval = min_interval;
3006         adv_instance->max_interval = max_interval;
3007         adv_instance->tx_power = tx_power;
3008 
3009         if (adv_data_len)
3010                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3011 
3012         if (scan_rsp_len)
3013                 memcpy(adv_instance->scan_rsp_data,
3014                        scan_rsp_data, scan_rsp_len);
3015 
3016         adv_instance->timeout = timeout;
3017         adv_instance->remaining_time = timeout;
3018 
3019         if (duration == 0)
3020                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3021         else
3022                 adv_instance->duration = duration;
3023 
3024         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3025                           adv_instance_rpa_expired);
3026 
3027         BT_DBG("%s for %dMR", hdev->name, instance);
3028 
3029         return 0;
3030 }
3031 
3032 /* This function requires the caller holds hdev->lock */
3033 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3034                               u16 adv_data_len, u8 *adv_data,
3035                               u16 scan_rsp_len, u8 *scan_rsp_data)
3036 {
3037         struct adv_info *adv_instance;
3038 
3039         adv_instance = hci_find_adv_instance(hdev, instance);
3040 
3041         /* If advertisement doesn't exist, we can't modify its data */
3042         if (!adv_instance)
3043                 return -ENOENT;
3044 
3045         if (adv_data_len) {
3046                 memset(adv_instance->adv_data, 0,
3047                        sizeof(adv_instance->adv_data));
3048                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3049                 adv_instance->adv_data_len = adv_data_len;
3050         }
3051 
3052         if (scan_rsp_len) {
3053                 memset(adv_instance->scan_rsp_data, 0,
3054                        sizeof(adv_instance->scan_rsp_data));
3055                 memcpy(adv_instance->scan_rsp_data,
3056                        scan_rsp_data, scan_rsp_len);
3057                 adv_instance->scan_rsp_len = scan_rsp_len;
3058         }
3059 
3060         return 0;
3061 }
3062 
3063 /* This function requires the caller holds hdev->lock */
3064 void hci_adv_monitors_clear(struct hci_dev *hdev)
3065 {
3066         struct adv_monitor *monitor;
3067         int handle;
3068 
3069         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3070                 hci_free_adv_monitor(hdev, monitor);
3071 
3072         idr_destroy(&hdev->adv_monitors_idr);
3073 }
3074 
3075 /* Frees the monitor structure and do some bookkeepings.
3076  * This function requires the caller holds hdev->lock.
3077  */
3078 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3079 {
3080         struct adv_pattern *pattern;
3081         struct adv_pattern *tmp;
3082 
3083         if (!monitor)
3084                 return;
3085 
3086         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3087                 list_del(&pattern->list);
3088                 kfree(pattern);
3089         }
3090 
3091         if (monitor->handle)
3092                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3093 
3094         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3095                 hdev->adv_monitors_cnt--;
3096                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3097         }
3098 
3099         kfree(monitor);
3100 }
3101 
3102 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3103 {
3104         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3105 }
3106 
3107 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3108 {
3109         return mgmt_remove_adv_monitor_complete(hdev, status);
3110 }
3111 
3112 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3113  * also attempts to forward the request to the controller.
3114  * Returns true if request is forwarded (result is pending), false otherwise.
3115  * This function requires the caller holds hdev->lock.
3116  */
3117 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3118                          int *err)
3119 {
3120         int min, max, handle;
3121 
3122         *err = 0;
3123 
3124         if (!monitor) {
3125                 *err = -EINVAL;
3126                 return false;
3127         }
3128 
3129         min = HCI_MIN_ADV_MONITOR_HANDLE;
3130         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3131         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3132                            GFP_KERNEL);
3133         if (handle < 0) {
3134                 *err = handle;
3135                 return false;
3136         }
3137 
3138         monitor->handle = handle;
3139 
3140         if (!hdev_is_powered(hdev))
3141                 return false;
3142 
3143         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3144         case HCI_ADV_MONITOR_EXT_NONE:
3145                 hci_update_background_scan(hdev);
3146                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3147                 /* Message was not forwarded to controller - not an error */
3148                 return false;
3149         case HCI_ADV_MONITOR_EXT_MSFT:
3150                 *err = msft_add_monitor_pattern(hdev, monitor);
3151                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3152                            *err);
3153                 break;
3154         }
3155 
3156         return (*err == 0);
3157 }
3158 
3159 /* Attempts to tell the controller and free the monitor. If somehow the
3160  * controller doesn't have a corresponding handle, remove anyway.
3161  * Returns true if request is forwarded (result is pending), false otherwise.
3162  * This function requires the caller holds hdev->lock.
3163  */
3164 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3165                                    struct adv_monitor *monitor,
3166                                    u16 handle, int *err)
3167 {
3168         *err = 0;
3169 
3170         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3171         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3172                 goto free_monitor;
3173         case HCI_ADV_MONITOR_EXT_MSFT:
3174                 *err = msft_remove_monitor(hdev, monitor, handle);
3175                 break;
3176         }
3177 
3178         /* In case no matching handle registered, just free the monitor */
3179         if (*err == -ENOENT)
3180                 goto free_monitor;
3181 
3182         return (*err == 0);
3183 
3184 free_monitor:
3185         if (*err == -ENOENT)
3186                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3187                             monitor->handle);
3188         hci_free_adv_monitor(hdev, monitor);
3189 
3190         *err = 0;
3191         return false;
3192 }
3193 
3194 /* Returns true if request is forwarded (result is pending), false otherwise.
3195  * This function requires the caller holds hdev->lock.
3196  */
3197 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3198 {
3199         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3200         bool pending;
3201 
3202         if (!monitor) {
3203                 *err = -EINVAL;
3204                 return false;
3205         }
3206 
3207         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3208         if (!*err && !pending)
3209                 hci_update_background_scan(hdev);
3210 
3211         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3212                    hdev->name, handle, *err, pending ? "" : "not ");
3213 
3214         return pending;
3215 }
3216 
3217 /* Returns true if request is forwarded (result is pending), false otherwise.
3218  * This function requires the caller holds hdev->lock.
3219  */
3220 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3221 {
3222         struct adv_monitor *monitor;
3223         int idr_next_id = 0;
3224         bool pending = false;
3225         bool update = false;
3226 
3227         *err = 0;
3228 
3229         while (!*err && !pending) {
3230                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3231                 if (!monitor)
3232                         break;
3233 
3234                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3235 
3236                 if (!*err && !pending)
3237                         update = true;
3238         }
3239 
3240         if (update)
3241                 hci_update_background_scan(hdev);
3242 
3243         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3244                    hdev->name, *err, pending ? "" : "not ");
3245 
3246         return pending;
3247 }
3248 
3249 /* This function requires the caller holds hdev->lock */
3250 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3251 {
3252         return !idr_is_empty(&hdev->adv_monitors_idr);
3253 }
3254 
3255 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3256 {
3257         if (msft_monitor_supported(hdev))
3258                 return HCI_ADV_MONITOR_EXT_MSFT;
3259 
3260         return HCI_ADV_MONITOR_EXT_NONE;
3261 }
3262 
3263 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3264                                          bdaddr_t *bdaddr, u8 type)
3265 {
3266         struct bdaddr_list *b;
3267 
3268         list_for_each_entry(b, bdaddr_list, list) {
3269                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3270                         return b;
3271         }
3272 
3273         return NULL;
3274 }
3275 
3276 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3277                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3278                                 u8 type)
3279 {
3280         struct bdaddr_list_with_irk *b;
3281 
3282         list_for_each_entry(b, bdaddr_list, list) {
3283                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3284                         return b;
3285         }
3286 
3287         return NULL;
3288 }
3289 
3290 struct bdaddr_list_with_flags *
3291 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3292                                   bdaddr_t *bdaddr, u8 type)
3293 {
3294         struct bdaddr_list_with_flags *b;
3295 
3296         list_for_each_entry(b, bdaddr_list, list) {
3297                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3298                         return b;
3299         }
3300 
3301         return NULL;
3302 }
3303 
3304 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3305 {
3306         struct bdaddr_list *b, *n;
3307 
3308         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3309                 list_del(&b->list);
3310                 kfree(b);
3311         }
3312 }
3313 
3314 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3315 {
3316         struct bdaddr_list *entry;
3317 
3318         if (!bacmp(bdaddr, BDADDR_ANY))
3319                 return -EBADF;
3320 
3321         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3322                 return -EEXIST;
3323 
3324         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3325         if (!entry)
3326                 return -ENOMEM;
3327 
3328         bacpy(&entry->bdaddr, bdaddr);
3329         entry->bdaddr_type = type;
3330 
3331         list_add(&entry->list, list);
3332 
3333         return 0;
3334 }
3335 
3336 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3337                                         u8 type, u8 *peer_irk, u8 *local_irk)
3338 {
3339         struct bdaddr_list_with_irk *entry;
3340 
3341         if (!bacmp(bdaddr, BDADDR_ANY))
3342                 return -EBADF;
3343 
3344         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3345                 return -EEXIST;
3346 
3347         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3348         if (!entry)
3349                 return -ENOMEM;
3350 
3351         bacpy(&entry->bdaddr, bdaddr);
3352         entry->bdaddr_type = type;
3353 
3354         if (peer_irk)
3355                 memcpy(entry->peer_irk, peer_irk, 16);
3356 
3357         if (local_irk)
3358                 memcpy(entry->local_irk, local_irk, 16);
3359 
3360         list_add(&entry->list, list);
3361 
3362         return 0;
3363 }
3364 
3365 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3366                                    u8 type, u32 flags)
3367 {
3368         struct bdaddr_list_with_flags *entry;
3369 
3370         if (!bacmp(bdaddr, BDADDR_ANY))
3371                 return -EBADF;
3372 
3373         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3374                 return -EEXIST;
3375 
3376         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3377         if (!entry)
3378                 return -ENOMEM;
3379 
3380         bacpy(&entry->bdaddr, bdaddr);
3381         entry->bdaddr_type = type;
3382         entry->current_flags = flags;
3383 
3384         list_add(&entry->list, list);
3385 
3386         return 0;
3387 }
3388 
3389 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3390 {
3391         struct bdaddr_list *entry;
3392 
3393         if (!bacmp(bdaddr, BDADDR_ANY)) {
3394                 hci_bdaddr_list_clear(list);
3395                 return 0;
3396         }
3397 
3398         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3399         if (!entry)
3400                 return -ENOENT;
3401 
3402         list_del(&entry->list);
3403         kfree(entry);
3404 
3405         return 0;
3406 }
3407 
3408 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3409                                                         u8 type)
3410 {
3411         struct bdaddr_list_with_irk *entry;
3412 
3413         if (!bacmp(bdaddr, BDADDR_ANY)) {
3414                 hci_bdaddr_list_clear(list);
3415                 return 0;
3416         }
3417 
3418         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3419         if (!entry)
3420                 return -ENOENT;
3421 
3422         list_del(&entry->list);
3423         kfree(entry);
3424 
3425         return 0;
3426 }
3427 
3428 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3429                                    u8 type)
3430 {
3431         struct bdaddr_list_with_flags *entry;
3432 
3433         if (!bacmp(bdaddr, BDADDR_ANY)) {
3434                 hci_bdaddr_list_clear(list);
3435                 return 0;
3436         }
3437 
3438         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3439         if (!entry)
3440                 return -ENOENT;
3441 
3442         list_del(&entry->list);
3443         kfree(entry);
3444 
3445         return 0;
3446 }
3447 
3448 /* This function requires the caller holds hdev->lock */
3449 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3450                                                bdaddr_t *addr, u8 addr_type)
3451 {
3452         struct hci_conn_params *params;
3453 
3454         list_for_each_entry(params, &hdev->le_conn_params, list) {
3455                 if (bacmp(&params->addr, addr) == 0 &&
3456                     params->addr_type == addr_type) {
3457                         return params;
3458                 }
3459         }
3460 
3461         return NULL;
3462 }
3463 
3464 /* This function requires the caller holds hdev->lock */
3465 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3466                                                   bdaddr_t *addr, u8 addr_type)
3467 {
3468         struct hci_conn_params *param;
3469 
3470         switch (addr_type) {
3471         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3472                 addr_type = ADDR_LE_DEV_PUBLIC;
3473                 break;
3474         case ADDR_LE_DEV_RANDOM_RESOLVED:
3475                 addr_type = ADDR_LE_DEV_RANDOM;
3476                 break;
3477         }
3478 
3479         list_for_each_entry(param, list, action) {
3480                 if (bacmp(&param->addr, addr) == 0 &&
3481                     param->addr_type == addr_type)
3482                         return param;
3483         }
3484 
3485         return NULL;
3486 }
3487 
3488 /* This function requires the caller holds hdev->lock */
3489 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3490                                             bdaddr_t *addr, u8 addr_type)
3491 {
3492         struct hci_conn_params *params;
3493 
3494         params = hci_conn_params_lookup(hdev, addr, addr_type);
3495         if (params)
3496                 return params;
3497 
3498         params = kzalloc(sizeof(*params), GFP_KERNEL);
3499         if (!params) {
3500                 bt_dev_err(hdev, "out of memory");
3501                 return NULL;
3502         }
3503 
3504         bacpy(&params->addr, addr);
3505         params->addr_type = addr_type;
3506 
3507         list_add(&params->list, &hdev->le_conn_params);
3508         INIT_LIST_HEAD(&params->action);
3509 
3510         params->conn_min_interval = hdev->le_conn_min_interval;
3511         params->conn_max_interval = hdev->le_conn_max_interval;
3512         params->conn_latency = hdev->le_conn_latency;
3513         params->supervision_timeout = hdev->le_supv_timeout;
3514         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3515 
3516         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3517 
3518         return params;
3519 }
3520 
3521 static void hci_conn_params_free(struct hci_conn_params *params)
3522 {
3523         if (params->conn) {
3524                 hci_conn_drop(params->conn);
3525                 hci_conn_put(params->conn);
3526         }
3527 
3528         list_del(&params->action);
3529         list_del(&params->list);
3530         kfree(params);
3531 }
3532 
3533 /* This function requires the caller holds hdev->lock */
3534 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3535 {
3536         struct hci_conn_params *params;
3537 
3538         params = hci_conn_params_lookup(hdev, addr, addr_type);
3539         if (!params)
3540                 return;
3541 
3542         hci_conn_params_free(params);
3543 
3544         hci_update_background_scan(hdev);
3545 
3546         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3547 }
3548 
3549 /* This function requires the caller holds hdev->lock */
3550 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3551 {
3552         struct hci_conn_params *params, *tmp;
3553 
3554         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3555                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3556                         continue;
3557 
3558                 /* If trying to estabilish one time connection to disabled
3559                  * device, leave the params, but mark them as just once.
3560                  */
3561                 if (params->explicit_connect) {
3562                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3563                         continue;
3564                 }
3565 
3566                 list_del(&params->list);
3567                 kfree(params);
3568         }
3569 
3570         BT_DBG("All LE disabled connection parameters were removed");
3571 }
3572 
3573 /* This function requires the caller holds hdev->lock */
3574 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3575 {
3576         struct hci_conn_params *params, *tmp;
3577 
3578         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3579                 hci_conn_params_free(params);
3580 
3581         BT_DBG("All LE connection parameters were removed");
3582 }
3583 
3584 /* Copy the Identity Address of the controller.
3585  *
3586  * If the controller has a public BD_ADDR, then by default use that one.
3587  * If this is a LE only controller without a public address, default to
3588  * the static random address.
3589  *
3590  * For debugging purposes it is possible to force controllers with a
3591  * public address to use the static random address instead.
3592  *
3593  * In case BR/EDR has been disabled on a dual-mode controller and
3594  * userspace has configured a static address, then that address
3595  * becomes the identity address instead of the public BR/EDR address.
3596  */
3597 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3598                                u8 *bdaddr_type)
3599 {
3600         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3601             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3602             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3603              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3604                 bacpy(bdaddr, &hdev->static_addr);
3605                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3606         } else {
3607                 bacpy(bdaddr, &hdev->bdaddr);
3608                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3609         }
3610 }
3611 
3612 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3613 {
3614         int i;
3615 
3616         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3617                 clear_bit(i, hdev->suspend_tasks);
3618 
3619         wake_up(&hdev->suspend_wait_q);
3620 }
3621 
3622 static int hci_suspend_wait_event(struct hci_dev *hdev)
3623 {
3624 #define WAKE_COND                                                              \
3625         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3626          __SUSPEND_NUM_TASKS)
3627 
3628         int i;
3629         int ret = wait_event_timeout(hdev->suspend_wait_q,
3630                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3631 
3632         if (ret == 0) {
3633                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3634                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3635                         if (test_bit(i, hdev->suspend_tasks))
3636                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3637                         clear_bit(i, hdev->suspend_tasks);
3638                 }
3639 
3640                 ret = -ETIMEDOUT;
3641         } else {
3642                 ret = 0;
3643         }
3644 
3645         return ret;
3646 }
3647 
3648 static void hci_prepare_suspend(struct work_struct *work)
3649 {
3650         struct hci_dev *hdev =
3651                 container_of(work, struct hci_dev, suspend_prepare);
3652 
3653         hci_dev_lock(hdev);
3654         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3655         hci_dev_unlock(hdev);
3656 }
3657 
3658 static int hci_change_suspend_state(struct hci_dev *hdev,
3659                                     enum suspended_state next)
3660 {
3661         hdev->suspend_state_next = next;
3662         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3663         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3664         return hci_suspend_wait_event(hdev);
3665 }
3666 
3667 static void hci_clear_wake_reason(struct hci_dev *hdev)
3668 {
3669         hci_dev_lock(hdev);
3670 
3671         hdev->wake_reason = 0;
3672         bacpy(&hdev->wake_addr, BDADDR_ANY);
3673         hdev->wake_addr_type = 0;
3674 
3675         hci_dev_unlock(hdev);
3676 }
3677 
3678 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3679                                 void *data)
3680 {
3681         struct hci_dev *hdev =
3682                 container_of(nb, struct hci_dev, suspend_notifier);
3683         int ret = 0;
3684         u8 state = BT_RUNNING;
3685 
3686         /* If powering down, wait for completion. */
3687         if (mgmt_powering_down(hdev)) {
3688                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3689                 ret = hci_suspend_wait_event(hdev);
3690                 if (ret)
3691                         goto done;
3692         }
3693 
3694         /* Suspend notifier should only act on events when powered. */
3695         if (!hdev_is_powered(hdev) ||
3696             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3697                 goto done;
3698 
3699         if (action == PM_SUSPEND_PREPARE) {
3700                 /* Suspend consists of two actions:
3701                  *  - First, disconnect everything and make the controller not
3702                  *    connectable (disabling scanning)
3703                  *  - Second, program event filter/whitelist and enable scan
3704                  */
3705                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3706                 if (!ret)
3707                         state = BT_SUSPEND_DISCONNECT;
3708 
3709                 /* Only configure whitelist if disconnect succeeded and wake
3710                  * isn't being prevented.
3711                  */
3712                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3713                         ret = hci_change_suspend_state(hdev,
3714                                                 BT_SUSPEND_CONFIGURE_WAKE);
3715                         if (!ret)
3716                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3717                 }
3718 
3719                 hci_clear_wake_reason(hdev);
3720                 mgmt_suspending(hdev, state);
3721 
3722         } else if (action == PM_POST_SUSPEND) {
3723                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3724 
3725                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3726                               hdev->wake_addr_type);
3727         }
3728 
3729 done:
3730         /* We always allow suspend even if suspend preparation failed and
3731          * attempt to recover in resume.
3732          */
3733         if (ret)
3734                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3735                            action, ret);
3736 
3737         return NOTIFY_DONE;
3738 }
3739 
3740 /* Alloc HCI device */
3741 struct hci_dev *hci_alloc_dev(void)
3742 {
3743         struct hci_dev *hdev;
3744 
3745         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3746         if (!hdev)
3747                 return NULL;
3748 
3749         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3750         hdev->esco_type = (ESCO_HV1);
3751         hdev->link_mode = (HCI_LM_ACCEPT);
3752         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3753         hdev->io_capability = 0x03;     /* No Input No Output */
3754         hdev->manufacturer = 0xffff;    /* Default to internal use */
3755         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3756         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3757         hdev->adv_instance_cnt = 0;
3758         hdev->cur_adv_instance = 0x00;
3759         hdev->adv_instance_timeout = 0;
3760 
3761         hdev->advmon_allowlist_duration = 300;
3762         hdev->advmon_no_filter_duration = 500;
3763         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3764 
3765         hdev->sniff_max_interval = 800;
3766         hdev->sniff_min_interval = 80;
3767 
3768         hdev->le_adv_channel_map = 0x07;
3769         hdev->le_adv_min_interval = 0x0800;
3770         hdev->le_adv_max_interval = 0x0800;
3771         hdev->le_scan_interval = 0x0060;
3772         hdev->le_scan_window = 0x0030;
3773         hdev->le_scan_int_suspend = 0x0400;
3774         hdev->le_scan_window_suspend = 0x0012;
3775         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3776         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3777         hdev->le_scan_int_adv_monitor = 0x0060;
3778         hdev->le_scan_window_adv_monitor = 0x0030;
3779         hdev->le_scan_int_connect = 0x0060;
3780         hdev->le_scan_window_connect = 0x0060;
3781         hdev->le_conn_min_interval = 0x0018;
3782         hdev->le_conn_max_interval = 0x0028;
3783         hdev->le_conn_latency = 0x0000;
3784         hdev->le_supv_timeout = 0x002a;
3785         hdev->le_def_tx_len = 0x001b;
3786         hdev->le_def_tx_time = 0x0148;
3787         hdev->le_max_tx_len = 0x001b;
3788         hdev->le_max_tx_time = 0x0148;
3789         hdev->le_max_rx_len = 0x001b;
3790         hdev->le_max_rx_time = 0x0148;
3791         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3792         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3793         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3794         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3795         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3796         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3797         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3798         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3799         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3800 
3801         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3802         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3803         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3804         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3805         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3806         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3807 
3808         /* default 1.28 sec page scan */
3809         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3810         hdev->def_page_scan_int = 0x0800;
3811         hdev->def_page_scan_window = 0x0012;
3812 
3813         mutex_init(&hdev->lock);
3814         mutex_init(&hdev->req_lock);
3815 
3816         INIT_LIST_HEAD(&hdev->mgmt_pending);
3817         INIT_LIST_HEAD(&hdev->blacklist);
3818         INIT_LIST_HEAD(&hdev->whitelist);
3819         INIT_LIST_HEAD(&hdev->uuids);
3820         INIT_LIST_HEAD(&hdev->link_keys);
3821         INIT_LIST_HEAD(&hdev->long_term_keys);
3822         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3823         INIT_LIST_HEAD(&hdev->remote_oob_data);
3824         INIT_LIST_HEAD(&hdev->le_white_list);
3825         INIT_LIST_HEAD(&hdev->le_resolv_list);
3826         INIT_LIST_HEAD(&hdev->le_conn_params);
3827         INIT_LIST_HEAD(&hdev->pend_le_conns);
3828         INIT_LIST_HEAD(&hdev->pend_le_reports);
3829         INIT_LIST_HEAD(&hdev->conn_hash.list);
3830         INIT_LIST_HEAD(&hdev->adv_instances);
3831         INIT_LIST_HEAD(&hdev->blocked_keys);
3832 
3833         INIT_WORK(&hdev->rx_work, hci_rx_work);
3834         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3835         INIT_WORK(&hdev->tx_work, hci_tx_work);
3836         INIT_WORK(&hdev->power_on, hci_power_on);
3837         INIT_WORK(&hdev->error_reset, hci_error_reset);
3838         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3839 
3840         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3841 
3842         skb_queue_head_init(&hdev->rx_q);
3843         skb_queue_head_init(&hdev->cmd_q);
3844         skb_queue_head_init(&hdev->raw_q);
3845 
3846         init_waitqueue_head(&hdev->req_wait_q);
3847         init_waitqueue_head(&hdev->suspend_wait_q);
3848 
3849         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3850 
3851         hci_request_setup(hdev);
3852 
3853         hci_init_sysfs(hdev);
3854         discovery_init(hdev);
3855 
3856         return hdev;
3857 }
3858 EXPORT_SYMBOL(hci_alloc_dev);
3859 
3860 /* Free HCI device */
3861 void hci_free_dev(struct hci_dev *hdev)
3862 {
3863         /* will free via device release */
3864         put_device(&hdev->dev);
3865 }
3866 EXPORT_SYMBOL(hci_free_dev);
3867 
3868 /* Register HCI device */
3869 int hci_register_dev(struct hci_dev *hdev)
3870 {
3871         int id, error;
3872 
3873         if (!hdev->open || !hdev->close || !hdev->send)
3874                 return -EINVAL;
3875 
3876         /* Do not allow HCI_AMP devices to register at index 0,
3877          * so the index can be used as the AMP controller ID.
3878          */
3879         switch (hdev->dev_type) {
3880         case HCI_PRIMARY:
3881                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3882                 break;
3883         case HCI_AMP:
3884                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3885                 break;
3886         default:
3887                 return -EINVAL;
3888         }
3889 
3890         if (id < 0)
3891                 return id;
3892 
3893         sprintf(hdev->name, "hci%d", id);
3894         hdev->id = id;
3895 
3896         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3897 
3898         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3899         if (!hdev->workqueue) {
3900                 error = -ENOMEM;
3901                 goto err;
3902         }
3903 
3904         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3905                                                       hdev->name);
3906         if (!hdev->req_workqueue) {
3907                 destroy_workqueue(hdev->workqueue);
3908                 error = -ENOMEM;
3909                 goto err;
3910         }
3911 
3912         if (!IS_ERR_OR_NULL(bt_debugfs))
3913                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3914 
3915         dev_set_name(&hdev->dev, "%s", hdev->name);
3916 
3917         error = device_add(&hdev->dev);
3918         if (error < 0)
3919                 goto err_wqueue;
3920 
3921         hci_leds_init(hdev);
3922 
3923         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3924                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3925                                     hdev);
3926         if (hdev->rfkill) {
3927                 if (rfkill_register(hdev->rfkill) < 0) {
3928                         rfkill_destroy(hdev->rfkill);
3929                         hdev->rfkill = NULL;
3930                 }
3931         }
3932 
3933         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3934                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3935 
3936         hci_dev_set_flag(hdev, HCI_SETUP);
3937         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3938 
3939         if (hdev->dev_type == HCI_PRIMARY) {
3940                 /* Assume BR/EDR support until proven otherwise (such as
3941                  * through reading supported features during init.
3942                  */
3943                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3944         }
3945 
3946         write_lock(&hci_dev_list_lock);
3947         list_add(&hdev->list, &hci_dev_list);
3948         write_unlock(&hci_dev_list_lock);
3949 
3950         /* Devices that are marked for raw-only usage are unconfigured
3951          * and should not be included in normal operation.
3952          */
3953         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3954                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3955 
3956         hci_sock_dev_event(hdev, HCI_DEV_REG);
3957         hci_dev_hold(hdev);
3958 
3959         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3960                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3961                 error = register_pm_notifier(&hdev->suspend_notifier);
3962                 if (error)
3963                         goto err_wqueue;
3964         }
3965 
3966         queue_work(hdev->req_workqueue, &hdev->power_on);
3967 
3968         idr_init(&hdev->adv_monitors_idr);
3969 
3970         return id;
3971 
3972 err_wqueue:
3973         destroy_workqueue(hdev->workqueue);
3974         destroy_workqueue(hdev->req_workqueue);
3975 err:
3976         ida_simple_remove(&hci_index_ida, hdev->id);
3977 
3978         return error;
3979 }
3980 EXPORT_SYMBOL(hci_register_dev);
3981 
3982 /* Unregister HCI device */
3983 void hci_unregister_dev(struct hci_dev *hdev)
3984 {
3985         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3986 
3987         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3988 
3989         write_lock(&hci_dev_list_lock);
3990         list_del(&hdev->list);
3991         write_unlock(&hci_dev_list_lock);
3992 
3993         cancel_work_sync(&hdev->power_on);
3994 
3995         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3996                 hci_suspend_clear_tasks(hdev);
3997                 unregister_pm_notifier(&hdev->suspend_notifier);
3998                 cancel_work_sync(&hdev->suspend_prepare);
3999         }
4000 
4001         hci_dev_do_close(hdev);
4002 
4003         if (!test_bit(HCI_INIT, &hdev->flags) &&
4004             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4005             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4006                 hci_dev_lock(hdev);
4007                 mgmt_index_removed(hdev);
4008                 hci_dev_unlock(hdev);
4009         }
4010 
4011         /* mgmt_index_removed should take care of emptying the
4012          * pending list */
4013         BUG_ON(!list_empty(&hdev->mgmt_pending));
4014 
4015         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4016 
4017         if (hdev->rfkill) {
4018                 rfkill_unregister(hdev->rfkill);
4019                 rfkill_destroy(hdev->rfkill);
4020         }
4021 
4022         device_del(&hdev->dev);
4023         /* Actual cleanup is deferred until hci_cleanup_dev(). */
4024         hci_dev_put(hdev);
4025 }
4026 EXPORT_SYMBOL(hci_unregister_dev);
4027 
4028 /* Cleanup HCI device */
4029 void hci_cleanup_dev(struct hci_dev *hdev)
4030 {
4031         debugfs_remove_recursive(hdev->debugfs);
4032         kfree_const(hdev->hw_info);
4033         kfree_const(hdev->fw_info);
4034 
4035         destroy_workqueue(hdev->workqueue);
4036         destroy_workqueue(hdev->req_workqueue);
4037 
4038         hci_dev_lock(hdev);
4039         hci_bdaddr_list_clear(&hdev->blacklist);
4040         hci_bdaddr_list_clear(&hdev->whitelist);
4041         hci_uuids_clear(hdev);
4042         hci_link_keys_clear(hdev);
4043         hci_smp_ltks_clear(hdev);
4044         hci_smp_irks_clear(hdev);
4045         hci_remote_oob_data_clear(hdev);
4046         hci_adv_instances_clear(hdev);
4047         hci_adv_monitors_clear(hdev);
4048         hci_bdaddr_list_clear(&hdev->le_white_list);
4049         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4050         hci_conn_params_clear_all(hdev);
4051         hci_discovery_filter_clear(hdev);
4052         hci_blocked_keys_clear(hdev);
4053         hci_dev_unlock(hdev);
4054 
4055         ida_simple_remove(&hci_index_ida, hdev->id);
4056 }
4057 
4058 /* Suspend HCI device */
4059 int hci_suspend_dev(struct hci_dev *hdev)
4060 {
4061         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4062         return 0;
4063 }
4064 EXPORT_SYMBOL(hci_suspend_dev);
4065 
4066 /* Resume HCI device */
4067 int hci_resume_dev(struct hci_dev *hdev)
4068 {
4069         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4070         return 0;
4071 }
4072 EXPORT_SYMBOL(hci_resume_dev);
4073 
4074 /* Reset HCI device */
4075 int hci_reset_dev(struct hci_dev *hdev)
4076 {
4077         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4078         struct sk_buff *skb;
4079 
4080         skb = bt_skb_alloc(3, GFP_ATOMIC);
4081         if (!skb)
4082                 return -ENOMEM;
4083 
4084         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4085         skb_put_data(skb, hw_err, 3);
4086 
4087         /* Send Hardware Error to upper stack */
4088         return hci_recv_frame(hdev, skb);
4089 }
4090 EXPORT_SYMBOL(hci_reset_dev);
4091 
4092 /* Receive frame from HCI drivers */
4093 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4094 {
4095         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4096                       && !test_bit(HCI_INIT, &hdev->flags))) {
4097                 kfree_skb(skb);
4098                 return -ENXIO;
4099         }
4100 
4101         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4102             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4103             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4104             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4105                 kfree_skb(skb);
4106                 return -EINVAL;
4107         }
4108 
4109         /* Incoming skb */
4110         bt_cb(skb)->incoming = 1;
4111 
4112         /* Time stamp */
4113         __net_timestamp(skb);
4114 
4115         skb_queue_tail(&hdev->rx_q, skb);
4116         queue_work(hdev->workqueue, &hdev->rx_work);
4117 
4118         return 0;
4119 }
4120 EXPORT_SYMBOL(hci_recv_frame);
4121 
4122 /* Receive diagnostic message from HCI drivers */
4123 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4124 {
4125         /* Mark as diagnostic packet */
4126         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4127 
4128         /* Time stamp */
4129         __net_timestamp(skb);
4130 
4131         skb_queue_tail(&hdev->rx_q, skb);
4132         queue_work(hdev->workqueue, &hdev->rx_work);
4133 
4134         return 0;
4135 }
4136 EXPORT_SYMBOL(hci_recv_diag);
4137 
4138 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4139 {
4140         va_list vargs;
4141 
4142         va_start(vargs, fmt);
4143         kfree_const(hdev->hw_info);
4144         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4145         va_end(vargs);
4146 }
4147 EXPORT_SYMBOL(hci_set_hw_info);
4148 
4149 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4150 {
4151         va_list vargs;
4152 
4153         va_start(vargs, fmt);
4154         kfree_const(hdev->fw_info);
4155         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4156         va_end(vargs);
4157 }
4158 EXPORT_SYMBOL(hci_set_fw_info);
4159 
4160 /* ---- Interface to upper protocols ---- */
4161 
4162 int hci_register_cb(struct hci_cb *cb)
4163 {
4164         BT_DBG("%p name %s", cb, cb->name);
4165 
4166         mutex_lock(&hci_cb_list_lock);
4167         list_add_tail(&cb->list, &hci_cb_list);
4168         mutex_unlock(&hci_cb_list_lock);
4169 
4170         return 0;
4171 }
4172 EXPORT_SYMBOL(hci_register_cb);
4173 
4174 int hci_unregister_cb(struct hci_cb *cb)
4175 {
4176         BT_DBG("%p name %s", cb, cb->name);
4177 
4178         mutex_lock(&hci_cb_list_lock);
4179         list_del(&cb->list);
4180         mutex_unlock(&hci_cb_list_lock);
4181 
4182         return 0;
4183 }
4184 EXPORT_SYMBOL(hci_unregister_cb);
4185 
4186 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4187 {
4188         int err;
4189 
4190         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4191                skb->len);
4192 
4193         /* Time stamp */
4194         __net_timestamp(skb);
4195 
4196         /* Send copy to monitor */
4197         hci_send_to_monitor(hdev, skb);
4198 
4199         if (atomic_read(&hdev->promisc)) {
4200                 /* Send copy to the sockets */
4201                 hci_send_to_sock(hdev, skb);
4202         }
4203 
4204         /* Get rid of skb owner, prior to sending to the driver. */
4205         skb_orphan(skb);
4206 
4207         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4208                 kfree_skb(skb);
4209                 return;
4210         }
4211 
4212         err = hdev->send(hdev, skb);
4213         if (err < 0) {
4214                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4215                 kfree_skb(skb);
4216         }
4217 }
4218 
4219 /* Send HCI command */
4220 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4221                  const void *param)
4222 {
4223         struct sk_buff *skb;
4224 
4225         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4226 
4227         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4228         if (!skb) {
4229                 bt_dev_err(hdev, "no memory for command");
4230                 return -ENOMEM;
4231         }
4232 
4233         /* Stand-alone HCI commands must be flagged as
4234          * single-command requests.
4235          */
4236         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4237 
4238         skb_queue_tail(&hdev->cmd_q, skb);
4239         queue_work(hdev->workqueue, &hdev->cmd_work);
4240 
4241         return 0;
4242 }
4243 
4244 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4245                    const void *param)
4246 {
4247         struct sk_buff *skb;
4248 
4249         if (hci_opcode_ogf(opcode) != 0x3f) {
4250                 /* A controller receiving a command shall respond with either
4251                  * a Command Status Event or a Command Complete Event.
4252                  * Therefore, all standard HCI commands must be sent via the
4253                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4254                  * Some vendors do not comply with this rule for vendor-specific
4255                  * commands and do not return any event. We want to support
4256                  * unresponded commands for such cases only.
4257                  */
4258                 bt_dev_err(hdev, "unresponded command not supported");
4259                 return -EINVAL;
4260         }
4261 
4262         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4263         if (!skb) {
4264                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4265                            opcode);
4266                 return -ENOMEM;
4267         }
4268 
4269         hci_send_frame(hdev, skb);
4270 
4271         return 0;
4272 }
4273 EXPORT_SYMBOL(__hci_cmd_send);
4274 
4275 /* Get data from the previously sent command */
4276 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4277 {
4278         struct hci_command_hdr *hdr;
4279 
4280         if (!hdev->sent_cmd)
4281                 return NULL;
4282 
4283         hdr = (void *) hdev->sent_cmd->data;
4284 
4285         if (hdr->opcode != cpu_to_le16(opcode))
4286                 return NULL;
4287 
4288         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4289 
4290         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4291 }
4292 
4293 /* Send HCI command and wait for command commplete event */
4294 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4295                              const void *param, u32 timeout)
4296 {
4297         struct sk_buff *skb;
4298 
4299         if (!test_bit(HCI_UP, &hdev->flags))
4300                 return ERR_PTR(-ENETDOWN);
4301 
4302         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4303 
4304         hci_req_sync_lock(hdev);
4305         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4306         hci_req_sync_unlock(hdev);
4307 
4308         return skb;
4309 }
4310 EXPORT_SYMBOL(hci_cmd_sync);
4311 
4312 /* Send ACL data */
4313 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4314 {
4315         struct hci_acl_hdr *hdr;
4316         int len = skb->len;
4317 
4318         skb_push(skb, HCI_ACL_HDR_SIZE);
4319         skb_reset_transport_header(skb);
4320         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4321         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4322         hdr->dlen   = cpu_to_le16(len);
4323 }
4324 
4325 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4326                           struct sk_buff *skb, __u16 flags)
4327 {
4328         struct hci_conn *conn = chan->conn;
4329         struct hci_dev *hdev = conn->hdev;
4330         struct sk_buff *list;
4331 
4332         skb->len = skb_headlen(skb);
4333         skb->data_len = 0;
4334 
4335         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4336 
4337         switch (hdev->dev_type) {
4338         case HCI_PRIMARY:
4339                 hci_add_acl_hdr(skb, conn->handle, flags);
4340                 break;
4341         case HCI_AMP:
4342                 hci_add_acl_hdr(skb, chan->handle, flags);
4343                 break;
4344         default:
4345                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4346                 return;
4347         }
4348 
4349         list = skb_shinfo(skb)->frag_list;
4350         if (!list) {
4351                 /* Non fragmented */
4352                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4353 
4354                 skb_queue_tail(queue, skb);
4355         } else {
4356                 /* Fragmented */
4357                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4358 
4359                 skb_shinfo(skb)->frag_list = NULL;
4360 
4361                 /* Queue all fragments atomically. We need to use spin_lock_bh
4362                  * here because of 6LoWPAN links, as there this function is
4363                  * called from softirq and using normal spin lock could cause
4364                  * deadlocks.
4365                  */
4366                 spin_lock_bh(&queue->lock);
4367 
4368                 __skb_queue_tail(queue, skb);
4369 
4370                 flags &= ~ACL_START;
4371                 flags |= ACL_CONT;
4372                 do {
4373                         skb = list; list = list->next;
4374 
4375                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4376                         hci_add_acl_hdr(skb, conn->handle, flags);
4377 
4378                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4379 
4380                         __skb_queue_tail(queue, skb);
4381                 } while (list);
4382 
4383                 spin_unlock_bh(&queue->lock);
4384         }
4385 }
4386 
4387 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4388 {
4389         struct hci_dev *hdev = chan->conn->hdev;
4390 
4391         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4392 
4393         hci_queue_acl(chan, &chan->data_q, skb, flags);
4394 
4395         queue_work(hdev->workqueue, &hdev->tx_work);
4396 }
4397 
4398 /* Send SCO data */
4399 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4400 {
4401         struct hci_dev *hdev = conn->hdev;
4402         struct hci_sco_hdr hdr;
4403 
4404         BT_DBG("%s len %d", hdev->name, skb->len);
4405 
4406         hdr.handle = cpu_to_le16(conn->handle);
4407         hdr.dlen   = skb->len;
4408 
4409         skb_push(skb, HCI_SCO_HDR_SIZE);
4410         skb_reset_transport_header(skb);
4411         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4412 
4413         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4414 
4415         skb_queue_tail(&conn->data_q, skb);
4416         queue_work(hdev->workqueue, &hdev->tx_work);
4417 }
4418 
4419 /* ---- HCI TX task (outgoing data) ---- */
4420 
4421 /* HCI Connection scheduler */
4422 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4423                                      int *quote)
4424 {
4425         struct hci_conn_hash *h = &hdev->conn_hash;
4426         struct hci_conn *conn = NULL, *c;
4427         unsigned int num = 0, min = ~0;
4428 
4429         /* We don't have to lock device here. Connections are always
4430          * added and removed with TX task disabled. */
4431 
4432         rcu_read_lock();
4433 
4434         list_for_each_entry_rcu(c, &h->list, list) {
4435                 if (c->type != type || skb_queue_empty(&c->data_q))
4436                         continue;
4437 
4438                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4439                         continue;
4440 
4441                 num++;
4442 
4443                 if (c->sent < min) {
4444                         min  = c->sent;
4445                         conn = c;
4446                 }
4447 
4448                 if (hci_conn_num(hdev, type) == num)
4449                         break;
4450         }
4451 
4452         rcu_read_unlock();
4453 
4454         if (conn) {
4455                 int cnt, q;
4456 
4457                 switch (conn->type) {
4458                 case ACL_LINK:
4459                         cnt = hdev->acl_cnt;
4460                         break;
4461                 case SCO_LINK:
4462                 case ESCO_LINK:
4463                         cnt = hdev->sco_cnt;
4464                         break;
4465                 case LE_LINK:
4466                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4467                         break;
4468                 default:
4469                         cnt = 0;
4470                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4471                 }
4472 
4473                 q = cnt / num;
4474                 *quote = q ? q : 1;
4475         } else
4476                 *quote = 0;
4477 
4478         BT_DBG("conn %p quote %d", conn, *quote);
4479         return conn;
4480 }
4481 
4482 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4483 {
4484         struct hci_conn_hash *h = &hdev->conn_hash;
4485         struct hci_conn *c;
4486 
4487         bt_dev_err(hdev, "link tx timeout");
4488 
4489         rcu_read_lock();
4490 
4491         /* Kill stalled connections */
4492         list_for_each_entry_rcu(c, &h->list, list) {
4493                 if (c->type == type && c->sent) {
4494                         bt_dev_err(hdev, "killing stalled connection %pMR",
4495                                    &c->dst);
4496                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4497                 }
4498         }
4499 
4500         rcu_read_unlock();
4501 }
4502 
4503 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4504                                       int *quote)
4505 {
4506         struct hci_conn_hash *h = &hdev->conn_hash;
4507         struct hci_chan *chan = NULL;
4508         unsigned int num = 0, min = ~0, cur_prio = 0;
4509         struct hci_conn *conn;
4510         int cnt, q, conn_num = 0;
4511 
4512         BT_DBG("%s", hdev->name);
4513 
4514         rcu_read_lock();
4515 
4516         list_for_each_entry_rcu(conn, &h->list, list) {
4517                 struct hci_chan *tmp;
4518 
4519                 if (conn->type != type)
4520                         continue;
4521 
4522                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4523                         continue;
4524 
4525                 conn_num++;
4526 
4527                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4528                         struct sk_buff *skb;
4529 
4530                         if (skb_queue_empty(&tmp->data_q))
4531                                 continue;
4532 
4533                         skb = skb_peek(&tmp->data_q);
4534                         if (skb->priority < cur_prio)
4535                                 continue;
4536 
4537                         if (skb->priority > cur_prio) {
4538                                 num = 0;
4539                                 min = ~0;
4540                                 cur_prio = skb->priority;
4541                         }
4542 
4543                         num++;
4544 
4545                         if (conn->sent < min) {
4546                                 min  = conn->sent;
4547                                 chan = tmp;
4548                         }
4549                 }
4550 
4551                 if (hci_conn_num(hdev, type) == conn_num)
4552                         break;
4553         }
4554 
4555         rcu_read_unlock();
4556 
4557         if (!chan)
4558                 return NULL;
4559 
4560         switch (chan->conn->type) {
4561         case ACL_LINK:
4562                 cnt = hdev->acl_cnt;
4563                 break;
4564         case AMP_LINK:
4565                 cnt = hdev->block_cnt;
4566                 break;
4567         case SCO_LINK:
4568         case ESCO_LINK:
4569                 cnt = hdev->sco_cnt;
4570                 break;
4571         case LE_LINK:
4572                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4573                 break;
4574         default:
4575                 cnt = 0;
4576                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4577         }
4578 
4579         q = cnt / num;
4580         *quote = q ? q : 1;
4581         BT_DBG("chan %p quote %d", chan, *quote);
4582         return chan;
4583 }
4584 
4585 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4586 {
4587         struct hci_conn_hash *h = &hdev->conn_hash;
4588         struct hci_conn *conn;
4589         int num = 0;
4590 
4591         BT_DBG("%s", hdev->name);
4592 
4593         rcu_read_lock();
4594 
4595         list_for_each_entry_rcu(conn, &h->list, list) {
4596                 struct hci_chan *chan;
4597 
4598                 if (conn->type != type)
4599                         continue;
4600 
4601                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4602                         continue;
4603 
4604                 num++;
4605 
4606                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4607                         struct sk_buff *skb;
4608 
4609                         if (chan->sent) {
4610                                 chan->sent = 0;
4611                                 continue;
4612                         }
4613 
4614                         if (skb_queue_empty(&chan->data_q))
4615                                 continue;
4616 
4617                         skb = skb_peek(&chan->data_q);
4618                         if (skb->priority >= HCI_PRIO_MAX - 1)
4619                                 continue;
4620 
4621                         skb->priority = HCI_PRIO_MAX - 1;
4622 
4623                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4624                                skb->priority);
4625                 }
4626 
4627                 if (hci_conn_num(hdev, type) == num)
4628                         break;
4629         }
4630 
4631         rcu_read_unlock();
4632 
4633 }
4634 
4635 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4636 {
4637         /* Calculate count of blocks used by this packet */
4638         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4639 }
4640 
4641 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4642 {
4643         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4644                 /* ACL tx timeout must be longer than maximum
4645                  * link supervision timeout (40.9 seconds) */
4646                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4647                                        HCI_ACL_TX_TIMEOUT))
4648                         hci_link_tx_to(hdev, ACL_LINK);
4649         }
4650 }
4651 
4652 /* Schedule SCO */
4653 static void hci_sched_sco(struct hci_dev *hdev)
4654 {
4655         struct hci_conn *conn;
4656         struct sk_buff *skb;
4657         int quote;
4658 
4659         BT_DBG("%s", hdev->name);
4660 
4661         if (!hci_conn_num(hdev, SCO_LINK))
4662                 return;
4663 
4664         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4665                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4666                         BT_DBG("skb %p len %d", skb, skb->len);
4667                         hci_send_frame(hdev, skb);
4668 
4669                         conn->sent++;
4670                         if (conn->sent == ~0)
4671                                 conn->sent = 0;
4672                 }
4673         }
4674 }
4675 
4676 static void hci_sched_esco(struct hci_dev *hdev)
4677 {
4678         struct hci_conn *conn;
4679         struct sk_buff *skb;
4680         int quote;
4681 
4682         BT_DBG("%s", hdev->name);
4683 
4684         if (!hci_conn_num(hdev, ESCO_LINK))
4685                 return;
4686 
4687         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4688                                                      &quote))) {
4689                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4690                         BT_DBG("skb %p len %d", skb, skb->len);
4691                         hci_send_frame(hdev, skb);
4692 
4693                         conn->sent++;
4694                         if (conn->sent == ~0)
4695                                 conn->sent = 0;
4696                 }
4697         }
4698 }
4699 
4700 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4701 {
4702         unsigned int cnt = hdev->acl_cnt;
4703         struct hci_chan *chan;
4704         struct sk_buff *skb;
4705         int quote;
4706 
4707         __check_timeout(hdev, cnt);
4708 
4709         while (hdev->acl_cnt &&
4710                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4711                 u32 priority = (skb_peek(&chan->data_q))->priority;
4712                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4713                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4714                                skb->len, skb->priority);
4715 
4716                         /* Stop if priority has changed */
4717                         if (skb->priority < priority)
4718                                 break;
4719 
4720                         skb = skb_dequeue(&chan->data_q);
4721 
4722                         hci_conn_enter_active_mode(chan->conn,
4723                                                    bt_cb(skb)->force_active);
4724 
4725                         hci_send_frame(hdev, skb);
4726                         hdev->acl_last_tx = jiffies;
4727 
4728                         hdev->acl_cnt--;
4729                         chan->sent++;
4730                         chan->conn->sent++;
4731 
4732                         /* Send pending SCO packets right away */
4733                         hci_sched_sco(hdev);
4734                         hci_sched_esco(hdev);
4735                 }
4736         }
4737 
4738         if (cnt != hdev->acl_cnt)
4739                 hci_prio_recalculate(hdev, ACL_LINK);
4740 }
4741 
4742 static void hci_sched_acl_blk(struct hci_dev *hdev)
4743 {
4744         unsigned int cnt = hdev->block_cnt;
4745         struct hci_chan *chan;
4746         struct sk_buff *skb;
4747         int quote;
4748         u8 type;
4749 
4750         __check_timeout(hdev, cnt);
4751 
4752         BT_DBG("%s", hdev->name);
4753 
4754         if (hdev->dev_type == HCI_AMP)
4755                 type = AMP_LINK;
4756         else
4757                 type = ACL_LINK;
4758 
4759         while (hdev->block_cnt > 0 &&
4760                (chan = hci_chan_sent(hdev, type, &quote))) {
4761                 u32 priority = (skb_peek(&chan->data_q))->priority;
4762                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4763                         int blocks;
4764 
4765                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4766                                skb->len, skb->priority);
4767 
4768                         /* Stop if priority has changed */
4769                         if (skb->priority < priority)
4770                                 break;
4771 
4772                         skb = skb_dequeue(&chan->data_q);
4773 
4774                         blocks = __get_blocks(hdev, skb);
4775                         if (blocks > hdev->block_cnt)
4776                                 return;
4777 
4778                         hci_conn_enter_active_mode(chan->conn,
4779                                                    bt_cb(skb)->force_active);
4780 
4781                         hci_send_frame(hdev, skb);
4782                         hdev->acl_last_tx = jiffies;
4783 
4784                         hdev->block_cnt -= blocks;
4785                         quote -= blocks;
4786 
4787                         chan->sent += blocks;
4788                         chan->conn->sent += blocks;
4789                 }
4790         }
4791 
4792         if (cnt != hdev->block_cnt)
4793                 hci_prio_recalculate(hdev, type);
4794 }
4795 
4796 static void hci_sched_acl(struct hci_dev *hdev)
4797 {
4798         BT_DBG("%s", hdev->name);
4799 
4800         /* No ACL link over BR/EDR controller */
4801         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4802                 return;
4803 
4804         /* No AMP link over AMP controller */
4805         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4806                 return;
4807 
4808         switch (hdev->flow_ctl_mode) {
4809         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4810                 hci_sched_acl_pkt(hdev);
4811                 break;
4812 
4813         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4814                 hci_sched_acl_blk(hdev);
4815                 break;
4816         }
4817 }
4818 
4819 static void hci_sched_le(struct hci_dev *hdev)
4820 {
4821         struct hci_chan *chan;
4822         struct sk_buff *skb;
4823         int quote, cnt, tmp;
4824 
4825         BT_DBG("%s", hdev->name);
4826 
4827         if (!hci_conn_num(hdev, LE_LINK))
4828                 return;
4829 
4830         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4831 
4832         __check_timeout(hdev, cnt);
4833 
4834         tmp = cnt;
4835         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4836                 u32 priority = (skb_peek(&chan->data_q))->priority;
4837                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4838                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4839                                skb->len, skb->priority);
4840 
4841                         /* Stop if priority has changed */
4842                         if (skb->priority < priority)
4843                                 break;
4844 
4845                         skb = skb_dequeue(&chan->data_q);
4846 
4847                         hci_send_frame(hdev, skb);
4848                         hdev->le_last_tx = jiffies;
4849 
4850                         cnt--;
4851                         chan->sent++;
4852                         chan->conn->sent++;
4853 
4854                         /* Send pending SCO packets right away */
4855                         hci_sched_sco(hdev);
4856                         hci_sched_esco(hdev);
4857                 }
4858         }
4859 
4860         if (hdev->le_pkts)
4861                 hdev->le_cnt = cnt;
4862         else
4863                 hdev->acl_cnt = cnt;
4864 
4865         if (cnt != tmp)
4866                 hci_prio_recalculate(hdev, LE_LINK);
4867 }
4868 
4869 static void hci_tx_work(struct work_struct *work)
4870 {
4871         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4872         struct sk_buff *skb;
4873 
4874         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4875                hdev->sco_cnt, hdev->le_cnt);
4876 
4877         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4878                 /* Schedule queues and send stuff to HCI driver */
4879                 hci_sched_sco(hdev);
4880                 hci_sched_esco(hdev);
4881                 hci_sched_acl(hdev);
4882                 hci_sched_le(hdev);
4883         }
4884 
4885         /* Send next queued raw (unknown type) packet */
4886         while ((skb = skb_dequeue(&hdev->raw_q)))
4887                 hci_send_frame(hdev, skb);
4888 }
4889 
4890 /* ----- HCI RX task (incoming data processing) ----- */
4891 
4892 /* ACL data packet */
4893 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4894 {
4895         struct hci_acl_hdr *hdr = (void *) skb->data;
4896         struct hci_conn *conn;
4897         __u16 handle, flags;
4898 
4899         skb_pull(skb, HCI_ACL_HDR_SIZE);
4900 
4901         handle = __le16_to_cpu(hdr->handle);
4902         flags  = hci_flags(handle);
4903         handle = hci_handle(handle);
4904 
4905         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4906                handle, flags);
4907 
4908         hdev->stat.acl_rx++;
4909 
4910         hci_dev_lock(hdev);
4911         conn = hci_conn_hash_lookup_handle(hdev, handle);
4912         hci_dev_unlock(hdev);
4913 
4914         if (conn) {
4915                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4916 
4917                 /* Send to upper protocol */
4918                 l2cap_recv_acldata(conn, skb, flags);
4919                 return;
4920         } else {
4921                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4922                            handle);
4923         }
4924 
4925         kfree_skb(skb);
4926 }
4927 
4928 /* SCO data packet */
4929 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4930 {
4931         struct hci_sco_hdr *hdr = (void *) skb->data;
4932         struct hci_conn *conn;
4933         __u16 handle, flags;
4934 
4935         skb_pull(skb, HCI_SCO_HDR_SIZE);
4936 
4937         handle = __le16_to_cpu(hdr->handle);
4938         flags  = hci_flags(handle);
4939         handle = hci_handle(handle);
4940 
4941         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4942                handle, flags);
4943 
4944         hdev->stat.sco_rx++;
4945 
4946         hci_dev_lock(hdev);
4947         conn = hci_conn_hash_lookup_handle(hdev, handle);
4948         hci_dev_unlock(hdev);
4949 
4950         if (conn) {
4951                 /* Send to upper protocol */
4952                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4953                 sco_recv_scodata(conn, skb);
4954                 return;
4955         } else {
4956                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4957                            handle);
4958         }
4959 
4960         kfree_skb(skb);
4961 }
4962 
4963 static bool hci_req_is_complete(struct hci_dev *hdev)
4964 {
4965         struct sk_buff *skb;
4966 
4967         skb = skb_peek(&hdev->cmd_q);
4968         if (!skb)
4969                 return true;
4970 
4971         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4972 }
4973 
4974 static void hci_resend_last(struct hci_dev *hdev)
4975 {
4976         struct hci_command_hdr *sent;
4977         struct sk_buff *skb;
4978         u16 opcode;
4979 
4980         if (!hdev->sent_cmd)
4981                 return;
4982 
4983         sent = (void *) hdev->sent_cmd->data;
4984         opcode = __le16_to_cpu(sent->opcode);
4985         if (opcode == HCI_OP_RESET)
4986                 return;
4987 
4988         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4989         if (!skb)
4990                 return;
4991 
4992         skb_queue_head(&hdev->cmd_q, skb);
4993         queue_work(hdev->workqueue, &hdev->cmd_work);
4994 }
4995 
4996 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4997                           hci_req_complete_t *req_complete,
4998                           hci_req_complete_skb_t *req_complete_skb)
4999 {
5000         struct sk_buff *skb;
5001         unsigned long flags;
5002 
5003         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5004 
5005         /* If the completed command doesn't match the last one that was
5006          * sent we need to do special handling of it.
5007          */
5008         if (!hci_sent_cmd_data(hdev, opcode)) {
5009                 /* Some CSR based controllers generate a spontaneous
5010                  * reset complete event during init and any pending
5011                  * command will never be completed. In such a case we
5012                  * need to resend whatever was the last sent
5013                  * command.
5014                  */
5015                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5016                         hci_resend_last(hdev);
5017 
5018                 return;
5019         }
5020 
5021         /* If we reach this point this event matches the last command sent */
5022         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5023 
5024         /* If the command succeeded and there's still more commands in
5025          * this request the request is not yet complete.
5026          */
5027         if (!status && !hci_req_is_complete(hdev))
5028                 return;
5029 
5030         /* If this was the last command in a request the complete
5031          * callback would be found in hdev->sent_cmd instead of the
5032          * command queue (hdev->cmd_q).
5033          */
5034         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5035                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5036                 return;
5037         }
5038 
5039         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5040                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5041                 return;
5042         }
5043 
5044         /* Remove all pending commands belonging to this request */
5045         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5046         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5047                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5048                         __skb_queue_head(&hdev->cmd_q, skb);
5049                         break;
5050                 }
5051 
5052                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5053                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5054                 else
5055                         *req_complete = bt_cb(skb)->hci.req_complete;
5056                 kfree_skb(skb);
5057         }
5058         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5059 }
5060 
5061 static void hci_rx_work(struct work_struct *work)
5062 {
5063         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5064         struct sk_buff *skb;
5065 
5066         BT_DBG("%s", hdev->name);
5067 
5068         while ((skb = skb_dequeue(&hdev->rx_q))) {
5069                 /* Send copy to monitor */
5070                 hci_send_to_monitor(hdev, skb);
5071 
5072                 if (atomic_read(&hdev->promisc)) {
5073                         /* Send copy to the sockets */
5074                         hci_send_to_sock(hdev, skb);
5075                 }
5076 
5077                 /* If the device has been opened in HCI_USER_CHANNEL,
5078                  * the userspace has exclusive access to device.
5079                  * When device is HCI_INIT, we still need to process
5080                  * the data packets to the driver in order
5081                  * to complete its setup().
5082                  */
5083                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5084                     !test_bit(HCI_INIT, &hdev->flags)) {
5085                         kfree_skb(skb);
5086                         continue;
5087                 }
5088 
5089                 if (test_bit(HCI_INIT, &hdev->flags)) {
5090                         /* Don't process data packets in this states. */
5091                         switch (hci_skb_pkt_type(skb)) {
5092                         case HCI_ACLDATA_PKT:
5093                         case HCI_SCODATA_PKT:
5094                         case HCI_ISODATA_PKT:
5095                                 kfree_skb(skb);
5096                                 continue;
5097                         }
5098                 }
5099 
5100                 /* Process frame */
5101                 switch (hci_skb_pkt_type(skb)) {
5102                 case HCI_EVENT_PKT:
5103                         BT_DBG("%s Event packet", hdev->name);
5104                         hci_event_packet(hdev, skb);
5105                         break;
5106 
5107                 case HCI_ACLDATA_PKT:
5108                         BT_DBG("%s ACL data packet", hdev->name);
5109                         hci_acldata_packet(hdev, skb);
5110                         break;
5111 
5112                 case HCI_SCODATA_PKT:
5113                         BT_DBG("%s SCO data packet", hdev->name);
5114                         hci_scodata_packet(hdev, skb);
5115                         break;
5116 
5117                 default:
5118                         kfree_skb(skb);
5119                         break;
5120                 }
5121         }
5122 }
5123 
5124 static void hci_cmd_work(struct work_struct *work)
5125 {
5126         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5127         struct sk_buff *skb;
5128 
5129         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5130                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5131 
5132         /* Send queued commands */
5133         if (atomic_read(&hdev->cmd_cnt)) {
5134                 skb = skb_dequeue(&hdev->cmd_q);
5135                 if (!skb)
5136                         return;
5137 
5138                 kfree_skb(hdev->sent_cmd);
5139 
5140                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5141                 if (hdev->sent_cmd) {
5142                         if (hci_req_status_pend(hdev))
5143                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5144                         atomic_dec(&hdev->cmd_cnt);
5145                         hci_send_frame(hdev, skb);
5146                         if (test_bit(HCI_RESET, &hdev->flags))
5147                                 cancel_delayed_work(&hdev->cmd_timer);
5148                         else
5149                                 schedule_delayed_work(&hdev->cmd_timer,
5150                                                       HCI_CMD_TIMEOUT);
5151                 } else {
5152                         skb_queue_head(&hdev->cmd_q, skb);
5153                         queue_work(hdev->workqueue, &hdev->cmd_work);
5154                 }
5155         }
5156 }
5157 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp