~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/rfkill.h>
 30 #include <linux/debugfs.h>
 31 #include <linux/crypto.h>
 32 #include <linux/property.h>
 33 #include <linux/suspend.h>
 34 #include <linux/wait.h>
 35 #include <asm/unaligned.h>
 36 
 37 #include <net/bluetooth/bluetooth.h>
 38 #include <net/bluetooth/hci_core.h>
 39 #include <net/bluetooth/l2cap.h>
 40 #include <net/bluetooth/mgmt.h>
 41 
 42 #include "hci_request.h"
 43 #include "hci_debugfs.h"
 44 #include "smp.h"
 45 #include "leds.h"
 46 #include "msft.h"
 47 
 48 static void hci_rx_work(struct work_struct *work);
 49 static void hci_cmd_work(struct work_struct *work);
 50 static void hci_tx_work(struct work_struct *work);
 51 
 52 /* HCI device list */
 53 LIST_HEAD(hci_dev_list);
 54 DEFINE_RWLOCK(hci_dev_list_lock);
 55 
 56 /* HCI callback list */
 57 LIST_HEAD(hci_cb_list);
 58 DEFINE_MUTEX(hci_cb_list_lock);
 59 
 60 /* HCI ID Numbering */
 61 static DEFINE_IDA(hci_index_ida);
 62 
 63 /* ---- HCI debugfs entries ---- */
 64 
 65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
 66                              size_t count, loff_t *ppos)
 67 {
 68         struct hci_dev *hdev = file->private_data;
 69         char buf[3];
 70 
 71         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
 72         buf[1] = '\n';
 73         buf[2] = '\0';
 74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 75 }
 76 
 77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
 78                               size_t count, loff_t *ppos)
 79 {
 80         struct hci_dev *hdev = file->private_data;
 81         struct sk_buff *skb;
 82         bool enable;
 83         int err;
 84 
 85         if (!test_bit(HCI_UP, &hdev->flags))
 86                 return -ENETDOWN;
 87 
 88         err = kstrtobool_from_user(user_buf, count, &enable);
 89         if (err)
 90                 return err;
 91 
 92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
 93                 return -EALREADY;
 94 
 95         hci_req_sync_lock(hdev);
 96         if (enable)
 97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
 98                                      HCI_CMD_TIMEOUT);
 99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103 
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106 
107         kfree_skb(skb);
108 
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110 
111         return count;
112 }
113 
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120 
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126 
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132 
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         bool enable;
138         int err;
139 
140         err = kstrtobool_from_user(user_buf, count, &enable);
141         if (err)
142                 return err;
143 
144         /* When the diagnostic flags are not persistent and the transport
145          * is not active or in user channel operation, then there is no need
146          * for the vendor callback. Instead just store the desired value and
147          * the setting will be programmed when the controller gets powered on.
148          */
149         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150             (!test_bit(HCI_RUNNING, &hdev->flags) ||
151              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152                 goto done;
153 
154         hci_req_sync_lock(hdev);
155         err = hdev->set_diag(hdev, enable);
156         hci_req_sync_unlock(hdev);
157 
158         if (err < 0)
159                 return err;
160 
161 done:
162         if (enable)
163                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164         else
165                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166 
167         return count;
168 }
169 
170 static const struct file_operations vendor_diag_fops = {
171         .open           = simple_open,
172         .read           = vendor_diag_read,
173         .write          = vendor_diag_write,
174         .llseek         = default_llseek,
175 };
176 
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180                             &dut_mode_fops);
181 
182         if (hdev->set_diag)
183                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184                                     &vendor_diag_fops);
185 }
186 
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189         BT_DBG("%s %ld", req->hdev->name, opt);
190 
191         /* Reset device */
192         set_bit(HCI_RESET, &req->hdev->flags);
193         hci_req_add(req, HCI_OP_RESET, 0, NULL);
194         return 0;
195 }
196 
197 static void bredr_init(struct hci_request *req)
198 {
199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200 
201         /* Read Local Supported Features */
202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203 
204         /* Read Local Version */
205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206 
207         /* Read BD Address */
208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210 
211 static void amp_init1(struct hci_request *req)
212 {
213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214 
215         /* Read Local Version */
216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217 
218         /* Read Local Supported Commands */
219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220 
221         /* Read Local AMP Info */
222         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223 
224         /* Read Data Blk size */
225         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226 
227         /* Read Flow Control Mode */
228         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229 
230         /* Read Location Data */
231         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233 
234 static int amp_init2(struct hci_request *req)
235 {
236         /* Read Local Supported Features. Not all AMP controllers
237          * support this so it's placed conditionally in the second
238          * stage init.
239          */
240         if (req->hdev->commands[14] & 0x20)
241                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242 
243         return 0;
244 }
245 
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248         struct hci_dev *hdev = req->hdev;
249 
250         BT_DBG("%s %ld", hdev->name, opt);
251 
252         /* Reset */
253         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254                 hci_reset_req(req, 0);
255 
256         switch (hdev->dev_type) {
257         case HCI_PRIMARY:
258                 bredr_init(req);
259                 break;
260         case HCI_AMP:
261                 amp_init1(req);
262                 break;
263         default:
264                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265                 break;
266         }
267 
268         return 0;
269 }
270 
271 static void bredr_setup(struct hci_request *req)
272 {
273         __le16 param;
274         __u8 flt_type;
275 
276         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278 
279         /* Read Class of Device */
280         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281 
282         /* Read Local Name */
283         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284 
285         /* Read Voice Setting */
286         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287 
288         /* Read Number of Supported IAC */
289         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290 
291         /* Read Current IAC LAP */
292         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293 
294         /* Clear Event Filters */
295         flt_type = HCI_FLT_CLEAR_ALL;
296         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297 
298         /* Connection accept timeout ~20 secs */
299         param = cpu_to_le16(0x7d00);
300         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302 
303 static void le_setup(struct hci_request *req)
304 {
305         struct hci_dev *hdev = req->hdev;
306 
307         /* Read LE Buffer Size */
308         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309 
310         /* Read LE Local Supported Features */
311         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312 
313         /* Read LE Supported States */
314         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315 
316         /* LE-only controllers have LE implicitly enabled */
317         if (!lmp_bredr_capable(hdev))
318                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320 
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323         struct hci_dev *hdev = req->hdev;
324 
325         /* The second byte is 0xff instead of 0x9f (two reserved bits
326          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327          * command otherwise.
328          */
329         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330 
331         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332          * any event mask for pre 1.2 devices.
333          */
334         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335                 return;
336 
337         if (lmp_bredr_capable(hdev)) {
338                 events[4] |= 0x01; /* Flow Specification Complete */
339         } else {
340                 /* Use a different default for LE-only devices */
341                 memset(events, 0, sizeof(events));
342                 events[1] |= 0x20; /* Command Complete */
343                 events[1] |= 0x40; /* Command Status */
344                 events[1] |= 0x80; /* Hardware Error */
345 
346                 /* If the controller supports the Disconnect command, enable
347                  * the corresponding event. In addition enable packet flow
348                  * control related events.
349                  */
350                 if (hdev->commands[0] & 0x20) {
351                         events[0] |= 0x10; /* Disconnection Complete */
352                         events[2] |= 0x04; /* Number of Completed Packets */
353                         events[3] |= 0x02; /* Data Buffer Overflow */
354                 }
355 
356                 /* If the controller supports the Read Remote Version
357                  * Information command, enable the corresponding event.
358                  */
359                 if (hdev->commands[2] & 0x80)
360                         events[1] |= 0x08; /* Read Remote Version Information
361                                             * Complete
362                                             */
363 
364                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365                         events[0] |= 0x80; /* Encryption Change */
366                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
367                 }
368         }
369 
370         if (lmp_inq_rssi_capable(hdev) ||
371             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372                 events[4] |= 0x02; /* Inquiry Result with RSSI */
373 
374         if (lmp_ext_feat_capable(hdev))
375                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376 
377         if (lmp_esco_capable(hdev)) {
378                 events[5] |= 0x08; /* Synchronous Connection Complete */
379                 events[5] |= 0x10; /* Synchronous Connection Changed */
380         }
381 
382         if (lmp_sniffsubr_capable(hdev))
383                 events[5] |= 0x20; /* Sniff Subrating */
384 
385         if (lmp_pause_enc_capable(hdev))
386                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387 
388         if (lmp_ext_inq_capable(hdev))
389                 events[5] |= 0x40; /* Extended Inquiry Result */
390 
391         if (lmp_no_flush_capable(hdev))
392                 events[7] |= 0x01; /* Enhanced Flush Complete */
393 
394         if (lmp_lsto_capable(hdev))
395                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396 
397         if (lmp_ssp_capable(hdev)) {
398                 events[6] |= 0x01;      /* IO Capability Request */
399                 events[6] |= 0x02;      /* IO Capability Response */
400                 events[6] |= 0x04;      /* User Confirmation Request */
401                 events[6] |= 0x08;      /* User Passkey Request */
402                 events[6] |= 0x10;      /* Remote OOB Data Request */
403                 events[6] |= 0x20;      /* Simple Pairing Complete */
404                 events[7] |= 0x04;      /* User Passkey Notification */
405                 events[7] |= 0x08;      /* Keypress Notification */
406                 events[7] |= 0x10;      /* Remote Host Supported
407                                          * Features Notification
408                                          */
409         }
410 
411         if (lmp_le_capable(hdev))
412                 events[7] |= 0x20;      /* LE Meta-Event */
413 
414         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416 
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419         struct hci_dev *hdev = req->hdev;
420 
421         if (hdev->dev_type == HCI_AMP)
422                 return amp_init2(req);
423 
424         if (lmp_bredr_capable(hdev))
425                 bredr_setup(req);
426         else
427                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428 
429         if (lmp_le_capable(hdev))
430                 le_setup(req);
431 
432         /* All Bluetooth 1.2 and later controllers should support the
433          * HCI command for reading the local supported commands.
434          *
435          * Unfortunately some controllers indicate Bluetooth 1.2 support,
436          * but do not have support for this command. If that is the case,
437          * the driver can quirk the behavior and skip reading the local
438          * supported commands.
439          */
440         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443 
444         if (lmp_ssp_capable(hdev)) {
445                 /* When SSP is available, then the host features page
446                  * should also be available as well. However some
447                  * controllers list the max_page as 0 as long as SSP
448                  * has not been enabled. To achieve proper debugging
449                  * output, force the minimum max_page to 1 at least.
450                  */
451                 hdev->max_page = 0x01;
452 
453                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454                         u8 mode = 0x01;
455 
456                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457                                     sizeof(mode), &mode);
458                 } else {
459                         struct hci_cp_write_eir cp;
460 
461                         memset(hdev->eir, 0, sizeof(hdev->eir));
462                         memset(&cp, 0, sizeof(cp));
463 
464                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465                 }
466         }
467 
468         if (lmp_inq_rssi_capable(hdev) ||
469             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470                 u8 mode;
471 
472                 /* If Extended Inquiry Result events are supported, then
473                  * they are clearly preferred over Inquiry Result with RSSI
474                  * events.
475                  */
476                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477 
478                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479         }
480 
481         if (lmp_inq_tx_pwr_capable(hdev))
482                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483 
484         if (lmp_ext_feat_capable(hdev)) {
485                 struct hci_cp_read_local_ext_features cp;
486 
487                 cp.page = 0x01;
488                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489                             sizeof(cp), &cp);
490         }
491 
492         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493                 u8 enable = 1;
494                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495                             &enable);
496         }
497 
498         return 0;
499 }
500 
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503         struct hci_dev *hdev = req->hdev;
504         struct hci_cp_write_def_link_policy cp;
505         u16 link_policy = 0;
506 
507         if (lmp_rswitch_capable(hdev))
508                 link_policy |= HCI_LP_RSWITCH;
509         if (lmp_hold_capable(hdev))
510                 link_policy |= HCI_LP_HOLD;
511         if (lmp_sniff_capable(hdev))
512                 link_policy |= HCI_LP_SNIFF;
513         if (lmp_park_capable(hdev))
514                 link_policy |= HCI_LP_PARK;
515 
516         cp.policy = cpu_to_le16(link_policy);
517         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519 
520 static void hci_set_le_support(struct hci_request *req)
521 {
522         struct hci_dev *hdev = req->hdev;
523         struct hci_cp_write_le_host_supported cp;
524 
525         /* LE-only devices do not support explicit enablement */
526         if (!lmp_bredr_capable(hdev))
527                 return;
528 
529         memset(&cp, 0, sizeof(cp));
530 
531         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532                 cp.le = 0x01;
533                 cp.simul = 0x00;
534         }
535 
536         if (cp.le != lmp_host_le_capable(hdev))
537                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538                             &cp);
539 }
540 
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543         struct hci_dev *hdev = req->hdev;
544         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545         bool changed = false;
546 
547         /* If Connectionless Slave Broadcast master role is supported
548          * enable all necessary events for it.
549          */
550         if (lmp_csb_master_capable(hdev)) {
551                 events[1] |= 0x40;      /* Triggered Clock Capture */
552                 events[1] |= 0x80;      /* Synchronization Train Complete */
553                 events[2] |= 0x10;      /* Slave Page Response Timeout */
554                 events[2] |= 0x20;      /* CSB Channel Map Change */
555                 changed = true;
556         }
557 
558         /* If Connectionless Slave Broadcast slave role is supported
559          * enable all necessary events for it.
560          */
561         if (lmp_csb_slave_capable(hdev)) {
562                 events[2] |= 0x01;      /* Synchronization Train Received */
563                 events[2] |= 0x02;      /* CSB Receive */
564                 events[2] |= 0x04;      /* CSB Timeout */
565                 events[2] |= 0x08;      /* Truncated Page Complete */
566                 changed = true;
567         }
568 
569         /* Enable Authenticated Payload Timeout Expired event if supported */
570         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571                 events[2] |= 0x80;
572                 changed = true;
573         }
574 
575         /* Some Broadcom based controllers indicate support for Set Event
576          * Mask Page 2 command, but then actually do not support it. Since
577          * the default value is all bits set to zero, the command is only
578          * required if the event mask has to be changed. In case no change
579          * to the event mask is needed, skip this command.
580          */
581         if (changed)
582                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583                             sizeof(events), events);
584 }
585 
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588         struct hci_dev *hdev = req->hdev;
589         u8 p;
590 
591         hci_setup_event_mask(req);
592 
593         if (hdev->commands[6] & 0x20 &&
594             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595                 struct hci_cp_read_stored_link_key cp;
596 
597                 bacpy(&cp.bdaddr, BDADDR_ANY);
598                 cp.read_all = 0x01;
599                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600         }
601 
602         if (hdev->commands[5] & 0x10)
603                 hci_setup_link_policy(req);
604 
605         if (hdev->commands[8] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607 
608         if (hdev->commands[18] & 0x04 &&
609             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611 
612         /* Some older Broadcom based Bluetooth 1.2 controllers do not
613          * support the Read Page Scan Type command. Check support for
614          * this command in the bit mask of supported commands.
615          */
616         if (hdev->commands[13] & 0x01)
617                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618 
619         if (lmp_le_capable(hdev)) {
620                 u8 events[8];
621 
622                 memset(events, 0, sizeof(events));
623 
624                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625                         events[0] |= 0x10;      /* LE Long Term Key Request */
626 
627                 /* If controller supports the Connection Parameters Request
628                  * Link Layer Procedure, enable the corresponding event.
629                  */
630                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631                         events[0] |= 0x20;      /* LE Remote Connection
632                                                  * Parameter Request
633                                                  */
634 
635                 /* If the controller supports the Data Length Extension
636                  * feature, enable the corresponding event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639                         events[0] |= 0x40;      /* LE Data Length Change */
640 
641                 /* If the controller supports LL Privacy feature, enable
642                  * the corresponding event.
643                  */
644                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645                         events[1] |= 0x02;      /* LE Enhanced Connection
646                                                  * Complete
647                                                  */
648 
649                 /* If the controller supports Extended Scanner Filter
650                  * Policies, enable the correspondig event.
651                  */
652                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653                         events[1] |= 0x04;      /* LE Direct Advertising
654                                                  * Report
655                                                  */
656 
657                 /* If the controller supports Channel Selection Algorithm #2
658                  * feature, enable the corresponding event.
659                  */
660                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661                         events[2] |= 0x08;      /* LE Channel Selection
662                                                  * Algorithm
663                                                  */
664 
665                 /* If the controller supports the LE Set Scan Enable command,
666                  * enable the corresponding advertising report event.
667                  */
668                 if (hdev->commands[26] & 0x08)
669                         events[0] |= 0x02;      /* LE Advertising Report */
670 
671                 /* If the controller supports the LE Create Connection
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[26] & 0x10)
675                         events[0] |= 0x01;      /* LE Connection Complete */
676 
677                 /* If the controller supports the LE Connection Update
678                  * command, enable the corresponding event.
679                  */
680                 if (hdev->commands[27] & 0x04)
681                         events[0] |= 0x04;      /* LE Connection Update
682                                                  * Complete
683                                                  */
684 
685                 /* If the controller supports the LE Read Remote Used Features
686                  * command, enable the corresponding event.
687                  */
688                 if (hdev->commands[27] & 0x20)
689                         events[0] |= 0x08;      /* LE Read Remote Used
690                                                  * Features Complete
691                                                  */
692 
693                 /* If the controller supports the LE Read Local P-256
694                  * Public Key command, enable the corresponding event.
695                  */
696                 if (hdev->commands[34] & 0x02)
697                         events[0] |= 0x80;      /* LE Read Local P-256
698                                                  * Public Key Complete
699                                                  */
700 
701                 /* If the controller supports the LE Generate DHKey
702                  * command, enable the corresponding event.
703                  */
704                 if (hdev->commands[34] & 0x04)
705                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
706 
707                 /* If the controller supports the LE Set Default PHY or
708                  * LE Set PHY commands, enable the corresponding event.
709                  */
710                 if (hdev->commands[35] & (0x20 | 0x40))
711                         events[1] |= 0x08;        /* LE PHY Update Complete */
712 
713                 /* If the controller supports LE Set Extended Scan Parameters
714                  * and LE Set Extended Scan Enable commands, enable the
715                  * corresponding event.
716                  */
717                 if (use_ext_scan(hdev))
718                         events[1] |= 0x10;      /* LE Extended Advertising
719                                                  * Report
720                                                  */
721 
722                 /* If the controller supports the LE Extended Advertising
723                  * command, enable the corresponding event.
724                  */
725                 if (ext_adv_capable(hdev))
726                         events[2] |= 0x02;      /* LE Advertising Set
727                                                  * Terminated
728                                                  */
729 
730                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731                             events);
732 
733                 /* Read LE Advertising Channel TX Power */
734                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735                         /* HCI TS spec forbids mixing of legacy and extended
736                          * advertising commands wherein READ_ADV_TX_POWER is
737                          * also included. So do not call it if extended adv
738                          * is supported otherwise controller will return
739                          * COMMAND_DISALLOWED for extended commands.
740                          */
741                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742                 }
743 
744                 if (hdev->commands[38] & 0x80) {
745                         /* Read LE Min/Max Tx Power*/
746                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
747                                     0, NULL);
748                 }
749 
750                 if (hdev->commands[26] & 0x40) {
751                         /* Read LE White List Size */
752                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
753                                     0, NULL);
754                 }
755 
756                 if (hdev->commands[26] & 0x80) {
757                         /* Clear LE White List */
758                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
759                 }
760 
761                 if (hdev->commands[34] & 0x40) {
762                         /* Read LE Resolving List Size */
763                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
764                                     0, NULL);
765                 }
766 
767                 if (hdev->commands[34] & 0x20) {
768                         /* Clear LE Resolving List */
769                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
770                 }
771 
772                 if (hdev->commands[35] & 0x04) {
773                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
774 
775                         /* Set RPA timeout */
776                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
777                                     &rpa_timeout);
778                 }
779 
780                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
781                         /* Read LE Maximum Data Length */
782                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
783 
784                         /* Read LE Suggested Default Data Length */
785                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
786                 }
787 
788                 if (ext_adv_capable(hdev)) {
789                         /* Read LE Number of Supported Advertising Sets */
790                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
791                                     0, NULL);
792                 }
793 
794                 hci_set_le_support(req);
795         }
796 
797         /* Read features beyond page 1 if available */
798         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
799                 struct hci_cp_read_local_ext_features cp;
800 
801                 cp.page = p;
802                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
803                             sizeof(cp), &cp);
804         }
805 
806         return 0;
807 }
808 
809 static int hci_init4_req(struct hci_request *req, unsigned long opt)
810 {
811         struct hci_dev *hdev = req->hdev;
812 
813         /* Some Broadcom based Bluetooth controllers do not support the
814          * Delete Stored Link Key command. They are clearly indicating its
815          * absence in the bit mask of supported commands.
816          *
817          * Check the supported commands and only if the command is marked
818          * as supported send it. If not supported assume that the controller
819          * does not have actual support for stored link keys which makes this
820          * command redundant anyway.
821          *
822          * Some controllers indicate that they support handling deleting
823          * stored link keys, but they don't. The quirk lets a driver
824          * just disable this command.
825          */
826         if (hdev->commands[6] & 0x80 &&
827             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
828                 struct hci_cp_delete_stored_link_key cp;
829 
830                 bacpy(&cp.bdaddr, BDADDR_ANY);
831                 cp.delete_all = 0x01;
832                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
833                             sizeof(cp), &cp);
834         }
835 
836         /* Set event mask page 2 if the HCI command for it is supported */
837         if (hdev->commands[22] & 0x04)
838                 hci_set_event_mask_page_2(req);
839 
840         /* Read local codec list if the HCI command is supported */
841         if (hdev->commands[29] & 0x20)
842                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
843 
844         /* Read local pairing options if the HCI command is supported */
845         if (hdev->commands[41] & 0x08)
846                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
847 
848         /* Get MWS transport configuration if the HCI command is supported */
849         if (hdev->commands[30] & 0x08)
850                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
851 
852         /* Check for Synchronization Train support */
853         if (lmp_sync_train_capable(hdev))
854                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
855 
856         /* Enable Secure Connections if supported and configured */
857         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
858             bredr_sc_enabled(hdev)) {
859                 u8 support = 0x01;
860 
861                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
862                             sizeof(support), &support);
863         }
864 
865         /* Set erroneous data reporting if supported to the wideband speech
866          * setting value
867          */
868         if (hdev->commands[18] & 0x08 &&
869             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
870                 bool enabled = hci_dev_test_flag(hdev,
871                                                  HCI_WIDEBAND_SPEECH_ENABLED);
872 
873                 if (enabled !=
874                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
875                         struct hci_cp_write_def_err_data_reporting cp;
876 
877                         cp.err_data_reporting = enabled ?
878                                                 ERR_DATA_REPORTING_ENABLED :
879                                                 ERR_DATA_REPORTING_DISABLED;
880 
881                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
882                                     sizeof(cp), &cp);
883                 }
884         }
885 
886         /* Set Suggested Default Data Length to maximum if supported */
887         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
888                 struct hci_cp_le_write_def_data_len cp;
889 
890                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
891                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
892                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
893         }
894 
895         /* Set Default PHY parameters if command is supported */
896         if (hdev->commands[35] & 0x20) {
897                 struct hci_cp_le_set_default_phy cp;
898 
899                 cp.all_phys = 0x00;
900                 cp.tx_phys = hdev->le_tx_def_phys;
901                 cp.rx_phys = hdev->le_rx_def_phys;
902 
903                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
904         }
905 
906         return 0;
907 }
908 
909 static int __hci_init(struct hci_dev *hdev)
910 {
911         int err;
912 
913         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
914         if (err < 0)
915                 return err;
916 
917         if (hci_dev_test_flag(hdev, HCI_SETUP))
918                 hci_debugfs_create_basic(hdev);
919 
920         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
921         if (err < 0)
922                 return err;
923 
924         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
925          * BR/EDR/LE type controllers. AMP controllers only need the
926          * first two stages of init.
927          */
928         if (hdev->dev_type != HCI_PRIMARY)
929                 return 0;
930 
931         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
932         if (err < 0)
933                 return err;
934 
935         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
936         if (err < 0)
937                 return err;
938 
939         /* This function is only called when the controller is actually in
940          * configured state. When the controller is marked as unconfigured,
941          * this initialization procedure is not run.
942          *
943          * It means that it is possible that a controller runs through its
944          * setup phase and then discovers missing settings. If that is the
945          * case, then this function will not be called. It then will only
946          * be called during the config phase.
947          *
948          * So only when in setup phase or config phase, create the debugfs
949          * entries and register the SMP channels.
950          */
951         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952             !hci_dev_test_flag(hdev, HCI_CONFIG))
953                 return 0;
954 
955         hci_debugfs_create_common(hdev);
956 
957         if (lmp_bredr_capable(hdev))
958                 hci_debugfs_create_bredr(hdev);
959 
960         if (lmp_le_capable(hdev))
961                 hci_debugfs_create_le(hdev);
962 
963         return 0;
964 }
965 
966 static int hci_init0_req(struct hci_request *req, unsigned long opt)
967 {
968         struct hci_dev *hdev = req->hdev;
969 
970         BT_DBG("%s %ld", hdev->name, opt);
971 
972         /* Reset */
973         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974                 hci_reset_req(req, 0);
975 
976         /* Read Local Version */
977         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
978 
979         /* Read BD Address */
980         if (hdev->set_bdaddr)
981                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
982 
983         return 0;
984 }
985 
986 static int __hci_unconf_init(struct hci_dev *hdev)
987 {
988         int err;
989 
990         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
991                 return 0;
992 
993         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
994         if (err < 0)
995                 return err;
996 
997         if (hci_dev_test_flag(hdev, HCI_SETUP))
998                 hci_debugfs_create_basic(hdev);
999 
1000         return 0;
1001 }
1002 
1003 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1004 {
1005         __u8 scan = opt;
1006 
1007         BT_DBG("%s %x", req->hdev->name, scan);
1008 
1009         /* Inquiry and Page scans */
1010         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1011         return 0;
1012 }
1013 
1014 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1015 {
1016         __u8 auth = opt;
1017 
1018         BT_DBG("%s %x", req->hdev->name, auth);
1019 
1020         /* Authentication */
1021         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1022         return 0;
1023 }
1024 
1025 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1026 {
1027         __u8 encrypt = opt;
1028 
1029         BT_DBG("%s %x", req->hdev->name, encrypt);
1030 
1031         /* Encryption */
1032         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1033         return 0;
1034 }
1035 
1036 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1037 {
1038         __le16 policy = cpu_to_le16(opt);
1039 
1040         BT_DBG("%s %x", req->hdev->name, policy);
1041 
1042         /* Default link policy */
1043         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1044         return 0;
1045 }
1046 
1047 /* Get HCI device by index.
1048  * Device is held on return. */
1049 struct hci_dev *hci_dev_get(int index)
1050 {
1051         struct hci_dev *hdev = NULL, *d;
1052 
1053         BT_DBG("%d", index);
1054 
1055         if (index < 0)
1056                 return NULL;
1057 
1058         read_lock(&hci_dev_list_lock);
1059         list_for_each_entry(d, &hci_dev_list, list) {
1060                 if (d->id == index) {
1061                         hdev = hci_dev_hold(d);
1062                         break;
1063                 }
1064         }
1065         read_unlock(&hci_dev_list_lock);
1066         return hdev;
1067 }
1068 
1069 /* ---- Inquiry support ---- */
1070 
1071 bool hci_discovery_active(struct hci_dev *hdev)
1072 {
1073         struct discovery_state *discov = &hdev->discovery;
1074 
1075         switch (discov->state) {
1076         case DISCOVERY_FINDING:
1077         case DISCOVERY_RESOLVING:
1078                 return true;
1079 
1080         default:
1081                 return false;
1082         }
1083 }
1084 
1085 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1086 {
1087         int old_state = hdev->discovery.state;
1088 
1089         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1090 
1091         if (old_state == state)
1092                 return;
1093 
1094         hdev->discovery.state = state;
1095 
1096         switch (state) {
1097         case DISCOVERY_STOPPED:
1098                 hci_update_background_scan(hdev);
1099 
1100                 if (old_state != DISCOVERY_STARTING)
1101                         mgmt_discovering(hdev, 0);
1102                 break;
1103         case DISCOVERY_STARTING:
1104                 break;
1105         case DISCOVERY_FINDING:
1106                 mgmt_discovering(hdev, 1);
1107                 break;
1108         case DISCOVERY_RESOLVING:
1109                 break;
1110         case DISCOVERY_STOPPING:
1111                 break;
1112         }
1113 }
1114 
1115 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1116 {
1117         struct discovery_state *cache = &hdev->discovery;
1118         struct inquiry_entry *p, *n;
1119 
1120         list_for_each_entry_safe(p, n, &cache->all, all) {
1121                 list_del(&p->all);
1122                 kfree(p);
1123         }
1124 
1125         INIT_LIST_HEAD(&cache->unknown);
1126         INIT_LIST_HEAD(&cache->resolve);
1127 }
1128 
1129 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1130                                                bdaddr_t *bdaddr)
1131 {
1132         struct discovery_state *cache = &hdev->discovery;
1133         struct inquiry_entry *e;
1134 
1135         BT_DBG("cache %p, %pMR", cache, bdaddr);
1136 
1137         list_for_each_entry(e, &cache->all, all) {
1138                 if (!bacmp(&e->data.bdaddr, bdaddr))
1139                         return e;
1140         }
1141 
1142         return NULL;
1143 }
1144 
1145 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1146                                                        bdaddr_t *bdaddr)
1147 {
1148         struct discovery_state *cache = &hdev->discovery;
1149         struct inquiry_entry *e;
1150 
1151         BT_DBG("cache %p, %pMR", cache, bdaddr);
1152 
1153         list_for_each_entry(e, &cache->unknown, list) {
1154                 if (!bacmp(&e->data.bdaddr, bdaddr))
1155                         return e;
1156         }
1157 
1158         return NULL;
1159 }
1160 
1161 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1162                                                        bdaddr_t *bdaddr,
1163                                                        int state)
1164 {
1165         struct discovery_state *cache = &hdev->discovery;
1166         struct inquiry_entry *e;
1167 
1168         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1169 
1170         list_for_each_entry(e, &cache->resolve, list) {
1171                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1172                         return e;
1173                 if (!bacmp(&e->data.bdaddr, bdaddr))
1174                         return e;
1175         }
1176 
1177         return NULL;
1178 }
1179 
1180 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1181                                       struct inquiry_entry *ie)
1182 {
1183         struct discovery_state *cache = &hdev->discovery;
1184         struct list_head *pos = &cache->resolve;
1185         struct inquiry_entry *p;
1186 
1187         list_del(&ie->list);
1188 
1189         list_for_each_entry(p, &cache->resolve, list) {
1190                 if (p->name_state != NAME_PENDING &&
1191                     abs(p->data.rssi) >= abs(ie->data.rssi))
1192                         break;
1193                 pos = &p->list;
1194         }
1195 
1196         list_add(&ie->list, pos);
1197 }
1198 
1199 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1200                              bool name_known)
1201 {
1202         struct discovery_state *cache = &hdev->discovery;
1203         struct inquiry_entry *ie;
1204         u32 flags = 0;
1205 
1206         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1207 
1208         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1209 
1210         if (!data->ssp_mode)
1211                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1212 
1213         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1214         if (ie) {
1215                 if (!ie->data.ssp_mode)
1216                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1217 
1218                 if (ie->name_state == NAME_NEEDED &&
1219                     data->rssi != ie->data.rssi) {
1220                         ie->data.rssi = data->rssi;
1221                         hci_inquiry_cache_update_resolve(hdev, ie);
1222                 }
1223 
1224                 goto update;
1225         }
1226 
1227         /* Entry not in the cache. Add new one. */
1228         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1229         if (!ie) {
1230                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1231                 goto done;
1232         }
1233 
1234         list_add(&ie->all, &cache->all);
1235 
1236         if (name_known) {
1237                 ie->name_state = NAME_KNOWN;
1238         } else {
1239                 ie->name_state = NAME_NOT_KNOWN;
1240                 list_add(&ie->list, &cache->unknown);
1241         }
1242 
1243 update:
1244         if (name_known && ie->name_state != NAME_KNOWN &&
1245             ie->name_state != NAME_PENDING) {
1246                 ie->name_state = NAME_KNOWN;
1247                 list_del(&ie->list);
1248         }
1249 
1250         memcpy(&ie->data, data, sizeof(*data));
1251         ie->timestamp = jiffies;
1252         cache->timestamp = jiffies;
1253 
1254         if (ie->name_state == NAME_NOT_KNOWN)
1255                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1256 
1257 done:
1258         return flags;
1259 }
1260 
1261 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1262 {
1263         struct discovery_state *cache = &hdev->discovery;
1264         struct inquiry_info *info = (struct inquiry_info *) buf;
1265         struct inquiry_entry *e;
1266         int copied = 0;
1267 
1268         list_for_each_entry(e, &cache->all, all) {
1269                 struct inquiry_data *data = &e->data;
1270 
1271                 if (copied >= num)
1272                         break;
1273 
1274                 bacpy(&info->bdaddr, &data->bdaddr);
1275                 info->pscan_rep_mode    = data->pscan_rep_mode;
1276                 info->pscan_period_mode = data->pscan_period_mode;
1277                 info->pscan_mode        = data->pscan_mode;
1278                 memcpy(info->dev_class, data->dev_class, 3);
1279                 info->clock_offset      = data->clock_offset;
1280 
1281                 info++;
1282                 copied++;
1283         }
1284 
1285         BT_DBG("cache %p, copied %d", cache, copied);
1286         return copied;
1287 }
1288 
1289 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1290 {
1291         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1292         struct hci_dev *hdev = req->hdev;
1293         struct hci_cp_inquiry cp;
1294 
1295         BT_DBG("%s", hdev->name);
1296 
1297         if (test_bit(HCI_INQUIRY, &hdev->flags))
1298                 return 0;
1299 
1300         /* Start Inquiry */
1301         memcpy(&cp.lap, &ir->lap, 3);
1302         cp.length  = ir->length;
1303         cp.num_rsp = ir->num_rsp;
1304         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1305 
1306         return 0;
1307 }
1308 
1309 int hci_inquiry(void __user *arg)
1310 {
1311         __u8 __user *ptr = arg;
1312         struct hci_inquiry_req ir;
1313         struct hci_dev *hdev;
1314         int err = 0, do_inquiry = 0, max_rsp;
1315         long timeo;
1316         __u8 *buf;
1317 
1318         if (copy_from_user(&ir, ptr, sizeof(ir)))
1319                 return -EFAULT;
1320 
1321         hdev = hci_dev_get(ir.dev_id);
1322         if (!hdev)
1323                 return -ENODEV;
1324 
1325         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1326                 err = -EBUSY;
1327                 goto done;
1328         }
1329 
1330         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1331                 err = -EOPNOTSUPP;
1332                 goto done;
1333         }
1334 
1335         if (hdev->dev_type != HCI_PRIMARY) {
1336                 err = -EOPNOTSUPP;
1337                 goto done;
1338         }
1339 
1340         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1341                 err = -EOPNOTSUPP;
1342                 goto done;
1343         }
1344 
1345         hci_dev_lock(hdev);
1346         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1347             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1348                 hci_inquiry_cache_flush(hdev);
1349                 do_inquiry = 1;
1350         }
1351         hci_dev_unlock(hdev);
1352 
1353         timeo = ir.length * msecs_to_jiffies(2000);
1354 
1355         if (do_inquiry) {
1356                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1357                                    timeo, NULL);
1358                 if (err < 0)
1359                         goto done;
1360 
1361                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1362                  * cleared). If it is interrupted by a signal, return -EINTR.
1363                  */
1364                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1365                                 TASK_INTERRUPTIBLE)) {
1366                         err = -EINTR;
1367                         goto done;
1368                 }
1369         }
1370 
1371         /* for unlimited number of responses we will use buffer with
1372          * 255 entries
1373          */
1374         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1375 
1376         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1377          * copy it to the user space.
1378          */
1379         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1380         if (!buf) {
1381                 err = -ENOMEM;
1382                 goto done;
1383         }
1384 
1385         hci_dev_lock(hdev);
1386         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1387         hci_dev_unlock(hdev);
1388 
1389         BT_DBG("num_rsp %d", ir.num_rsp);
1390 
1391         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1392                 ptr += sizeof(ir);
1393                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1394                                  ir.num_rsp))
1395                         err = -EFAULT;
1396         } else
1397                 err = -EFAULT;
1398 
1399         kfree(buf);
1400 
1401 done:
1402         hci_dev_put(hdev);
1403         return err;
1404 }
1405 
1406 /**
1407  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1408  *                                     (BD_ADDR) for a HCI device from
1409  *                                     a firmware node property.
1410  * @hdev:       The HCI device
1411  *
1412  * Search the firmware node for 'local-bd-address'.
1413  *
1414  * All-zero BD addresses are rejected, because those could be properties
1415  * that exist in the firmware tables, but were not updated by the firmware. For
1416  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1417  */
1418 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1419 {
1420         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1421         bdaddr_t ba;
1422         int ret;
1423 
1424         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1425                                             (u8 *)&ba, sizeof(ba));
1426         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1427                 return;
1428 
1429         bacpy(&hdev->public_addr, &ba);
1430 }
1431 
1432 static int hci_dev_do_open(struct hci_dev *hdev)
1433 {
1434         int ret = 0;
1435 
1436         BT_DBG("%s %p", hdev->name, hdev);
1437 
1438         hci_req_sync_lock(hdev);
1439 
1440         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1441                 ret = -ENODEV;
1442                 goto done;
1443         }
1444 
1445         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1447                 /* Check for rfkill but allow the HCI setup stage to
1448                  * proceed (which in itself doesn't cause any RF activity).
1449                  */
1450                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1451                         ret = -ERFKILL;
1452                         goto done;
1453                 }
1454 
1455                 /* Check for valid public address or a configured static
1456                  * random adddress, but let the HCI setup proceed to
1457                  * be able to determine if there is a public address
1458                  * or not.
1459                  *
1460                  * In case of user channel usage, it is not important
1461                  * if a public address or static random address is
1462                  * available.
1463                  *
1464                  * This check is only valid for BR/EDR controllers
1465                  * since AMP controllers do not have an address.
1466                  */
1467                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1468                     hdev->dev_type == HCI_PRIMARY &&
1469                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1470                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1471                         ret = -EADDRNOTAVAIL;
1472                         goto done;
1473                 }
1474         }
1475 
1476         if (test_bit(HCI_UP, &hdev->flags)) {
1477                 ret = -EALREADY;
1478                 goto done;
1479         }
1480 
1481         if (hdev->open(hdev)) {
1482                 ret = -EIO;
1483                 goto done;
1484         }
1485 
1486         set_bit(HCI_RUNNING, &hdev->flags);
1487         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1488 
1489         atomic_set(&hdev->cmd_cnt, 1);
1490         set_bit(HCI_INIT, &hdev->flags);
1491 
1492         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1493             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1494                 bool invalid_bdaddr;
1495 
1496                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1497 
1498                 if (hdev->setup)
1499                         ret = hdev->setup(hdev);
1500 
1501                 /* The transport driver can set the quirk to mark the
1502                  * BD_ADDR invalid before creating the HCI device or in
1503                  * its setup callback.
1504                  */
1505                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1506                                           &hdev->quirks);
1507 
1508                 if (ret)
1509                         goto setup_failed;
1510 
1511                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1512                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1513                                 hci_dev_get_bd_addr_from_property(hdev);
1514 
1515                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1516                             hdev->set_bdaddr) {
1517                                 ret = hdev->set_bdaddr(hdev,
1518                                                        &hdev->public_addr);
1519 
1520                                 /* If setting of the BD_ADDR from the device
1521                                  * property succeeds, then treat the address
1522                                  * as valid even if the invalid BD_ADDR
1523                                  * quirk indicates otherwise.
1524                                  */
1525                                 if (!ret)
1526                                         invalid_bdaddr = false;
1527                         }
1528                 }
1529 
1530 setup_failed:
1531                 /* The transport driver can set these quirks before
1532                  * creating the HCI device or in its setup callback.
1533                  *
1534                  * For the invalid BD_ADDR quirk it is possible that
1535                  * it becomes a valid address if the bootloader does
1536                  * provide it (see above).
1537                  *
1538                  * In case any of them is set, the controller has to
1539                  * start up as unconfigured.
1540                  */
1541                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1542                     invalid_bdaddr)
1543                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1544 
1545                 /* For an unconfigured controller it is required to
1546                  * read at least the version information provided by
1547                  * the Read Local Version Information command.
1548                  *
1549                  * If the set_bdaddr driver callback is provided, then
1550                  * also the original Bluetooth public device address
1551                  * will be read using the Read BD Address command.
1552                  */
1553                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1554                         ret = __hci_unconf_init(hdev);
1555         }
1556 
1557         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1558                 /* If public address change is configured, ensure that
1559                  * the address gets programmed. If the driver does not
1560                  * support changing the public address, fail the power
1561                  * on procedure.
1562                  */
1563                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1564                     hdev->set_bdaddr)
1565                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1566                 else
1567                         ret = -EADDRNOTAVAIL;
1568         }
1569 
1570         if (!ret) {
1571                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1572                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1573                         ret = __hci_init(hdev);
1574                         if (!ret && hdev->post_init)
1575                                 ret = hdev->post_init(hdev);
1576                 }
1577         }
1578 
1579         /* If the HCI Reset command is clearing all diagnostic settings,
1580          * then they need to be reprogrammed after the init procedure
1581          * completed.
1582          */
1583         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1584             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1585             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1586                 ret = hdev->set_diag(hdev, true);
1587 
1588         msft_do_open(hdev);
1589 
1590         clear_bit(HCI_INIT, &hdev->flags);
1591 
1592         if (!ret) {
1593                 hci_dev_hold(hdev);
1594                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1595                 hci_adv_instances_set_rpa_expired(hdev, true);
1596                 set_bit(HCI_UP, &hdev->flags);
1597                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1598                 hci_leds_update_powered(hdev, true);
1599                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1600                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1601                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1602                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1603                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1604                     hdev->dev_type == HCI_PRIMARY) {
1605                         ret = __hci_req_hci_power_on(hdev);
1606                         mgmt_power_on(hdev, ret);
1607                 }
1608         } else {
1609                 /* Init failed, cleanup */
1610                 flush_work(&hdev->tx_work);
1611 
1612                 /* Since hci_rx_work() is possible to awake new cmd_work
1613                  * it should be flushed first to avoid unexpected call of
1614                  * hci_cmd_work()
1615                  */
1616                 flush_work(&hdev->rx_work);
1617                 flush_work(&hdev->cmd_work);
1618 
1619                 skb_queue_purge(&hdev->cmd_q);
1620                 skb_queue_purge(&hdev->rx_q);
1621 
1622                 if (hdev->flush)
1623                         hdev->flush(hdev);
1624 
1625                 if (hdev->sent_cmd) {
1626                         kfree_skb(hdev->sent_cmd);
1627                         hdev->sent_cmd = NULL;
1628                 }
1629 
1630                 clear_bit(HCI_RUNNING, &hdev->flags);
1631                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1632 
1633                 hdev->close(hdev);
1634                 hdev->flags &= BIT(HCI_RAW);
1635         }
1636 
1637 done:
1638         hci_req_sync_unlock(hdev);
1639         return ret;
1640 }
1641 
1642 /* ---- HCI ioctl helpers ---- */
1643 
1644 int hci_dev_open(__u16 dev)
1645 {
1646         struct hci_dev *hdev;
1647         int err;
1648 
1649         hdev = hci_dev_get(dev);
1650         if (!hdev)
1651                 return -ENODEV;
1652 
1653         /* Devices that are marked as unconfigured can only be powered
1654          * up as user channel. Trying to bring them up as normal devices
1655          * will result into a failure. Only user channel operation is
1656          * possible.
1657          *
1658          * When this function is called for a user channel, the flag
1659          * HCI_USER_CHANNEL will be set first before attempting to
1660          * open the device.
1661          */
1662         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1663             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1664                 err = -EOPNOTSUPP;
1665                 goto done;
1666         }
1667 
1668         /* We need to ensure that no other power on/off work is pending
1669          * before proceeding to call hci_dev_do_open. This is
1670          * particularly important if the setup procedure has not yet
1671          * completed.
1672          */
1673         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1674                 cancel_delayed_work(&hdev->power_off);
1675 
1676         /* After this call it is guaranteed that the setup procedure
1677          * has finished. This means that error conditions like RFKILL
1678          * or no valid public or static random address apply.
1679          */
1680         flush_workqueue(hdev->req_workqueue);
1681 
1682         /* For controllers not using the management interface and that
1683          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1684          * so that pairing works for them. Once the management interface
1685          * is in use this bit will be cleared again and userspace has
1686          * to explicitly enable it.
1687          */
1688         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1689             !hci_dev_test_flag(hdev, HCI_MGMT))
1690                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1691 
1692         err = hci_dev_do_open(hdev);
1693 
1694 done:
1695         hci_dev_put(hdev);
1696         return err;
1697 }
1698 
1699 /* This function requires the caller holds hdev->lock */
1700 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1701 {
1702         struct hci_conn_params *p;
1703 
1704         list_for_each_entry(p, &hdev->le_conn_params, list) {
1705                 if (p->conn) {
1706                         hci_conn_drop(p->conn);
1707                         hci_conn_put(p->conn);
1708                         p->conn = NULL;
1709                 }
1710                 list_del_init(&p->action);
1711         }
1712 
1713         BT_DBG("All LE pending actions cleared");
1714 }
1715 
1716 int hci_dev_do_close(struct hci_dev *hdev)
1717 {
1718         bool auto_off;
1719 
1720         BT_DBG("%s %p", hdev->name, hdev);
1721 
1722         cancel_delayed_work(&hdev->power_off);
1723 
1724         hci_request_cancel_all(hdev);
1725         hci_req_sync_lock(hdev);
1726 
1727         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1728                 cancel_delayed_work_sync(&hdev->cmd_timer);
1729                 hci_req_sync_unlock(hdev);
1730                 return 0;
1731         }
1732 
1733         hci_leds_update_powered(hdev, false);
1734 
1735         /* Flush RX and TX works */
1736         flush_work(&hdev->tx_work);
1737         flush_work(&hdev->rx_work);
1738 
1739         if (hdev->discov_timeout > 0) {
1740                 hdev->discov_timeout = 0;
1741                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1742                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1743         }
1744 
1745         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1746                 cancel_delayed_work(&hdev->service_cache);
1747 
1748         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1749                 struct adv_info *adv_instance;
1750 
1751                 cancel_delayed_work_sync(&hdev->rpa_expired);
1752 
1753                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1754                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1755         }
1756 
1757         /* Avoid potential lockdep warnings from the *_flush() calls by
1758          * ensuring the workqueue is empty up front.
1759          */
1760         drain_workqueue(hdev->workqueue);
1761 
1762         hci_dev_lock(hdev);
1763 
1764         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1765 
1766         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1767 
1768         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1769             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1770             hci_dev_test_flag(hdev, HCI_MGMT))
1771                 __mgmt_power_off(hdev);
1772 
1773         hci_inquiry_cache_flush(hdev);
1774         hci_pend_le_actions_clear(hdev);
1775         hci_conn_hash_flush(hdev);
1776         hci_dev_unlock(hdev);
1777 
1778         smp_unregister(hdev);
1779 
1780         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1781 
1782         msft_do_close(hdev);
1783 
1784         if (hdev->flush)
1785                 hdev->flush(hdev);
1786 
1787         /* Reset device */
1788         skb_queue_purge(&hdev->cmd_q);
1789         atomic_set(&hdev->cmd_cnt, 1);
1790         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1791             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1792                 set_bit(HCI_INIT, &hdev->flags);
1793                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1794                 clear_bit(HCI_INIT, &hdev->flags);
1795         }
1796 
1797         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1798             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1799             test_bit(HCI_UP, &hdev->flags)) {
1800                 /* Execute vendor specific shutdown routine */
1801                 if (hdev->shutdown)
1802                         hdev->shutdown(hdev);
1803         }
1804 
1805         /* flush cmd  work */
1806         flush_work(&hdev->cmd_work);
1807 
1808         /* Drop queues */
1809         skb_queue_purge(&hdev->rx_q);
1810         skb_queue_purge(&hdev->cmd_q);
1811         skb_queue_purge(&hdev->raw_q);
1812 
1813         /* Drop last sent command */
1814         if (hdev->sent_cmd) {
1815                 cancel_delayed_work_sync(&hdev->cmd_timer);
1816                 kfree_skb(hdev->sent_cmd);
1817                 hdev->sent_cmd = NULL;
1818         }
1819 
1820         clear_bit(HCI_RUNNING, &hdev->flags);
1821         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1822 
1823         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1824                 wake_up(&hdev->suspend_wait_q);
1825 
1826         /* After this point our queues are empty
1827          * and no tasks are scheduled. */
1828         hdev->close(hdev);
1829 
1830         /* Clear flags */
1831         hdev->flags &= BIT(HCI_RAW);
1832         hci_dev_clear_volatile_flags(hdev);
1833 
1834         /* Controller radio is available but is currently powered down */
1835         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1836 
1837         memset(hdev->eir, 0, sizeof(hdev->eir));
1838         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1839         bacpy(&hdev->random_addr, BDADDR_ANY);
1840 
1841         hci_req_sync_unlock(hdev);
1842 
1843         hci_dev_put(hdev);
1844         return 0;
1845 }
1846 
1847 int hci_dev_close(__u16 dev)
1848 {
1849         struct hci_dev *hdev;
1850         int err;
1851 
1852         hdev = hci_dev_get(dev);
1853         if (!hdev)
1854                 return -ENODEV;
1855 
1856         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1857                 err = -EBUSY;
1858                 goto done;
1859         }
1860 
1861         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1862                 cancel_delayed_work(&hdev->power_off);
1863 
1864         err = hci_dev_do_close(hdev);
1865 
1866 done:
1867         hci_dev_put(hdev);
1868         return err;
1869 }
1870 
1871 static int hci_dev_do_reset(struct hci_dev *hdev)
1872 {
1873         int ret;
1874 
1875         BT_DBG("%s %p", hdev->name, hdev);
1876 
1877         hci_req_sync_lock(hdev);
1878 
1879         /* Drop queues */
1880         skb_queue_purge(&hdev->rx_q);
1881         skb_queue_purge(&hdev->cmd_q);
1882 
1883         /* Avoid potential lockdep warnings from the *_flush() calls by
1884          * ensuring the workqueue is empty up front.
1885          */
1886         drain_workqueue(hdev->workqueue);
1887 
1888         hci_dev_lock(hdev);
1889         hci_inquiry_cache_flush(hdev);
1890         hci_conn_hash_flush(hdev);
1891         hci_dev_unlock(hdev);
1892 
1893         if (hdev->flush)
1894                 hdev->flush(hdev);
1895 
1896         atomic_set(&hdev->cmd_cnt, 1);
1897         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1898 
1899         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1900 
1901         hci_req_sync_unlock(hdev);
1902         return ret;
1903 }
1904 
1905 int hci_dev_reset(__u16 dev)
1906 {
1907         struct hci_dev *hdev;
1908         int err;
1909 
1910         hdev = hci_dev_get(dev);
1911         if (!hdev)
1912                 return -ENODEV;
1913 
1914         if (!test_bit(HCI_UP, &hdev->flags)) {
1915                 err = -ENETDOWN;
1916                 goto done;
1917         }
1918 
1919         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1920                 err = -EBUSY;
1921                 goto done;
1922         }
1923 
1924         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1925                 err = -EOPNOTSUPP;
1926                 goto done;
1927         }
1928 
1929         err = hci_dev_do_reset(hdev);
1930 
1931 done:
1932         hci_dev_put(hdev);
1933         return err;
1934 }
1935 
1936 int hci_dev_reset_stat(__u16 dev)
1937 {
1938         struct hci_dev *hdev;
1939         int ret = 0;
1940 
1941         hdev = hci_dev_get(dev);
1942         if (!hdev)
1943                 return -ENODEV;
1944 
1945         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1946                 ret = -EBUSY;
1947                 goto done;
1948         }
1949 
1950         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1951                 ret = -EOPNOTSUPP;
1952                 goto done;
1953         }
1954 
1955         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1956 
1957 done:
1958         hci_dev_put(hdev);
1959         return ret;
1960 }
1961 
1962 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1963 {
1964         bool conn_changed, discov_changed;
1965 
1966         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1967 
1968         if ((scan & SCAN_PAGE))
1969                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1970                                                           HCI_CONNECTABLE);
1971         else
1972                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1973                                                            HCI_CONNECTABLE);
1974 
1975         if ((scan & SCAN_INQUIRY)) {
1976                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1977                                                             HCI_DISCOVERABLE);
1978         } else {
1979                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1980                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1981                                                              HCI_DISCOVERABLE);
1982         }
1983 
1984         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1985                 return;
1986 
1987         if (conn_changed || discov_changed) {
1988                 /* In case this was disabled through mgmt */
1989                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1990 
1991                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1992                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1993 
1994                 mgmt_new_settings(hdev);
1995         }
1996 }
1997 
1998 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1999 {
2000         struct hci_dev *hdev;
2001         struct hci_dev_req dr;
2002         int err = 0;
2003 
2004         if (copy_from_user(&dr, arg, sizeof(dr)))
2005                 return -EFAULT;
2006 
2007         hdev = hci_dev_get(dr.dev_id);
2008         if (!hdev)
2009                 return -ENODEV;
2010 
2011         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2012                 err = -EBUSY;
2013                 goto done;
2014         }
2015 
2016         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2017                 err = -EOPNOTSUPP;
2018                 goto done;
2019         }
2020 
2021         if (hdev->dev_type != HCI_PRIMARY) {
2022                 err = -EOPNOTSUPP;
2023                 goto done;
2024         }
2025 
2026         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2027                 err = -EOPNOTSUPP;
2028                 goto done;
2029         }
2030 
2031         switch (cmd) {
2032         case HCISETAUTH:
2033                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2034                                    HCI_INIT_TIMEOUT, NULL);
2035                 break;
2036 
2037         case HCISETENCRYPT:
2038                 if (!lmp_encrypt_capable(hdev)) {
2039                         err = -EOPNOTSUPP;
2040                         break;
2041                 }
2042 
2043                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2044                         /* Auth must be enabled first */
2045                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2046                                            HCI_INIT_TIMEOUT, NULL);
2047                         if (err)
2048                                 break;
2049                 }
2050 
2051                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2052                                    HCI_INIT_TIMEOUT, NULL);
2053                 break;
2054 
2055         case HCISETSCAN:
2056                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2057                                    HCI_INIT_TIMEOUT, NULL);
2058 
2059                 /* Ensure that the connectable and discoverable states
2060                  * get correctly modified as this was a non-mgmt change.
2061                  */
2062                 if (!err)
2063                         hci_update_scan_state(hdev, dr.dev_opt);
2064                 break;
2065 
2066         case HCISETLINKPOL:
2067                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2068                                    HCI_INIT_TIMEOUT, NULL);
2069                 break;
2070 
2071         case HCISETLINKMODE:
2072                 hdev->link_mode = ((__u16) dr.dev_opt) &
2073                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2074                 break;
2075 
2076         case HCISETPTYPE:
2077                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2078                         break;
2079 
2080                 hdev->pkt_type = (__u16) dr.dev_opt;
2081                 mgmt_phy_configuration_changed(hdev, NULL);
2082                 break;
2083 
2084         case HCISETACLMTU:
2085                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2086                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2087                 break;
2088 
2089         case HCISETSCOMTU:
2090                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2091                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2092                 break;
2093 
2094         default:
2095                 err = -EINVAL;
2096                 break;
2097         }
2098 
2099 done:
2100         hci_dev_put(hdev);
2101         return err;
2102 }
2103 
2104 int hci_get_dev_list(void __user *arg)
2105 {
2106         struct hci_dev *hdev;
2107         struct hci_dev_list_req *dl;
2108         struct hci_dev_req *dr;
2109         int n = 0, size, err;
2110         __u16 dev_num;
2111 
2112         if (get_user(dev_num, (__u16 __user *) arg))
2113                 return -EFAULT;
2114 
2115         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2116                 return -EINVAL;
2117 
2118         size = sizeof(*dl) + dev_num * sizeof(*dr);
2119 
2120         dl = kzalloc(size, GFP_KERNEL);
2121         if (!dl)
2122                 return -ENOMEM;
2123 
2124         dr = dl->dev_req;
2125 
2126         read_lock(&hci_dev_list_lock);
2127         list_for_each_entry(hdev, &hci_dev_list, list) {
2128                 unsigned long flags = hdev->flags;
2129 
2130                 /* When the auto-off is configured it means the transport
2131                  * is running, but in that case still indicate that the
2132                  * device is actually down.
2133                  */
2134                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2135                         flags &= ~BIT(HCI_UP);
2136 
2137                 (dr + n)->dev_id  = hdev->id;
2138                 (dr + n)->dev_opt = flags;
2139 
2140                 if (++n >= dev_num)
2141                         break;
2142         }
2143         read_unlock(&hci_dev_list_lock);
2144 
2145         dl->dev_num = n;
2146         size = sizeof(*dl) + n * sizeof(*dr);
2147 
2148         err = copy_to_user(arg, dl, size);
2149         kfree(dl);
2150 
2151         return err ? -EFAULT : 0;
2152 }
2153 
2154 int hci_get_dev_info(void __user *arg)
2155 {
2156         struct hci_dev *hdev;
2157         struct hci_dev_info di;
2158         unsigned long flags;
2159         int err = 0;
2160 
2161         if (copy_from_user(&di, arg, sizeof(di)))
2162                 return -EFAULT;
2163 
2164         hdev = hci_dev_get(di.dev_id);
2165         if (!hdev)
2166                 return -ENODEV;
2167 
2168         /* When the auto-off is configured it means the transport
2169          * is running, but in that case still indicate that the
2170          * device is actually down.
2171          */
2172         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2173                 flags = hdev->flags & ~BIT(HCI_UP);
2174         else
2175                 flags = hdev->flags;
2176 
2177         strcpy(di.name, hdev->name);
2178         di.bdaddr   = hdev->bdaddr;
2179         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2180         di.flags    = flags;
2181         di.pkt_type = hdev->pkt_type;
2182         if (lmp_bredr_capable(hdev)) {
2183                 di.acl_mtu  = hdev->acl_mtu;
2184                 di.acl_pkts = hdev->acl_pkts;
2185                 di.sco_mtu  = hdev->sco_mtu;
2186                 di.sco_pkts = hdev->sco_pkts;
2187         } else {
2188                 di.acl_mtu  = hdev->le_mtu;
2189                 di.acl_pkts = hdev->le_pkts;
2190                 di.sco_mtu  = 0;
2191                 di.sco_pkts = 0;
2192         }
2193         di.link_policy = hdev->link_policy;
2194         di.link_mode   = hdev->link_mode;
2195 
2196         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2197         memcpy(&di.features, &hdev->features, sizeof(di.features));
2198 
2199         if (copy_to_user(arg, &di, sizeof(di)))
2200                 err = -EFAULT;
2201 
2202         hci_dev_put(hdev);
2203 
2204         return err;
2205 }
2206 
2207 /* ---- Interface to HCI drivers ---- */
2208 
2209 static int hci_rfkill_set_block(void *data, bool blocked)
2210 {
2211         struct hci_dev *hdev = data;
2212 
2213         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2214 
2215         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2216                 return -EBUSY;
2217 
2218         if (blocked) {
2219                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2220                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2221                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2222                         hci_dev_do_close(hdev);
2223         } else {
2224                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2225         }
2226 
2227         return 0;
2228 }
2229 
2230 static const struct rfkill_ops hci_rfkill_ops = {
2231         .set_block = hci_rfkill_set_block,
2232 };
2233 
2234 static void hci_power_on(struct work_struct *work)
2235 {
2236         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2237         int err;
2238 
2239         BT_DBG("%s", hdev->name);
2240 
2241         if (test_bit(HCI_UP, &hdev->flags) &&
2242             hci_dev_test_flag(hdev, HCI_MGMT) &&
2243             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2244                 cancel_delayed_work(&hdev->power_off);
2245                 hci_req_sync_lock(hdev);
2246                 err = __hci_req_hci_power_on(hdev);
2247                 hci_req_sync_unlock(hdev);
2248                 mgmt_power_on(hdev, err);
2249                 return;
2250         }
2251 
2252         err = hci_dev_do_open(hdev);
2253         if (err < 0) {
2254                 hci_dev_lock(hdev);
2255                 mgmt_set_powered_failed(hdev, err);
2256                 hci_dev_unlock(hdev);
2257                 return;
2258         }
2259 
2260         /* During the HCI setup phase, a few error conditions are
2261          * ignored and they need to be checked now. If they are still
2262          * valid, it is important to turn the device back off.
2263          */
2264         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2265             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2266             (hdev->dev_type == HCI_PRIMARY &&
2267              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2268              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2269                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2270                 hci_dev_do_close(hdev);
2271         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2272                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2273                                    HCI_AUTO_OFF_TIMEOUT);
2274         }
2275 
2276         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2277                 /* For unconfigured devices, set the HCI_RAW flag
2278                  * so that userspace can easily identify them.
2279                  */
2280                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2281                         set_bit(HCI_RAW, &hdev->flags);
2282 
2283                 /* For fully configured devices, this will send
2284                  * the Index Added event. For unconfigured devices,
2285                  * it will send Unconfigued Index Added event.
2286                  *
2287                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2288                  * and no event will be send.
2289                  */
2290                 mgmt_index_added(hdev);
2291         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2292                 /* When the controller is now configured, then it
2293                  * is important to clear the HCI_RAW flag.
2294                  */
2295                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2296                         clear_bit(HCI_RAW, &hdev->flags);
2297 
2298                 /* Powering on the controller with HCI_CONFIG set only
2299                  * happens with the transition from unconfigured to
2300                  * configured. This will send the Index Added event.
2301                  */
2302                 mgmt_index_added(hdev);
2303         }
2304 }
2305 
2306 static void hci_power_off(struct work_struct *work)
2307 {
2308         struct hci_dev *hdev = container_of(work, struct hci_dev,
2309                                             power_off.work);
2310 
2311         BT_DBG("%s", hdev->name);
2312 
2313         hci_dev_do_close(hdev);
2314 }
2315 
2316 static void hci_error_reset(struct work_struct *work)
2317 {
2318         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2319 
2320         BT_DBG("%s", hdev->name);
2321 
2322         if (hdev->hw_error)
2323                 hdev->hw_error(hdev, hdev->hw_error_code);
2324         else
2325                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2326 
2327         if (hci_dev_do_close(hdev))
2328                 return;
2329 
2330         hci_dev_do_open(hdev);
2331 }
2332 
2333 void hci_uuids_clear(struct hci_dev *hdev)
2334 {
2335         struct bt_uuid *uuid, *tmp;
2336 
2337         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2338                 list_del(&uuid->list);
2339                 kfree(uuid);
2340         }
2341 }
2342 
2343 void hci_link_keys_clear(struct hci_dev *hdev)
2344 {
2345         struct link_key *key;
2346 
2347         list_for_each_entry(key, &hdev->link_keys, list) {
2348                 list_del_rcu(&key->list);
2349                 kfree_rcu(key, rcu);
2350         }
2351 }
2352 
2353 void hci_smp_ltks_clear(struct hci_dev *hdev)
2354 {
2355         struct smp_ltk *k;
2356 
2357         list_for_each_entry(k, &hdev->long_term_keys, list) {
2358                 list_del_rcu(&k->list);
2359                 kfree_rcu(k, rcu);
2360         }
2361 }
2362 
2363 void hci_smp_irks_clear(struct hci_dev *hdev)
2364 {
2365         struct smp_irk *k;
2366 
2367         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2368                 list_del_rcu(&k->list);
2369                 kfree_rcu(k, rcu);
2370         }
2371 }
2372 
2373 void hci_blocked_keys_clear(struct hci_dev *hdev)
2374 {
2375         struct blocked_key *b;
2376 
2377         list_for_each_entry(b, &hdev->blocked_keys, list) {
2378                 list_del_rcu(&b->list);
2379                 kfree_rcu(b, rcu);
2380         }
2381 }
2382 
2383 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2384 {
2385         bool blocked = false;
2386         struct blocked_key *b;
2387 
2388         rcu_read_lock();
2389         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2390                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2391                         blocked = true;
2392                         break;
2393                 }
2394         }
2395 
2396         rcu_read_unlock();
2397         return blocked;
2398 }
2399 
2400 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2401 {
2402         struct link_key *k;
2403 
2404         rcu_read_lock();
2405         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2406                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2407                         rcu_read_unlock();
2408 
2409                         if (hci_is_blocked_key(hdev,
2410                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2411                                                k->val)) {
2412                                 bt_dev_warn_ratelimited(hdev,
2413                                                         "Link key blocked for %pMR",
2414                                                         &k->bdaddr);
2415                                 return NULL;
2416                         }
2417 
2418                         return k;
2419                 }
2420         }
2421         rcu_read_unlock();
2422 
2423         return NULL;
2424 }
2425 
2426 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2427                                u8 key_type, u8 old_key_type)
2428 {
2429         /* Legacy key */
2430         if (key_type < 0x03)
2431                 return true;
2432 
2433         /* Debug keys are insecure so don't store them persistently */
2434         if (key_type == HCI_LK_DEBUG_COMBINATION)
2435                 return false;
2436 
2437         /* Changed combination key and there's no previous one */
2438         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2439                 return false;
2440 
2441         /* Security mode 3 case */
2442         if (!conn)
2443                 return true;
2444 
2445         /* BR/EDR key derived using SC from an LE link */
2446         if (conn->type == LE_LINK)
2447                 return true;
2448 
2449         /* Neither local nor remote side had no-bonding as requirement */
2450         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2451                 return true;
2452 
2453         /* Local side had dedicated bonding as requirement */
2454         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2455                 return true;
2456 
2457         /* Remote side had dedicated bonding as requirement */
2458         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2459                 return true;
2460 
2461         /* If none of the above criteria match, then don't store the key
2462          * persistently */
2463         return false;
2464 }
2465 
2466 static u8 ltk_role(u8 type)
2467 {
2468         if (type == SMP_LTK)
2469                 return HCI_ROLE_MASTER;
2470 
2471         return HCI_ROLE_SLAVE;
2472 }
2473 
2474 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2475                              u8 addr_type, u8 role)
2476 {
2477         struct smp_ltk *k;
2478 
2479         rcu_read_lock();
2480         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2481                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2482                         continue;
2483 
2484                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2485                         rcu_read_unlock();
2486 
2487                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2488                                                k->val)) {
2489                                 bt_dev_warn_ratelimited(hdev,
2490                                                         "LTK blocked for %pMR",
2491                                                         &k->bdaddr);
2492                                 return NULL;
2493                         }
2494 
2495                         return k;
2496                 }
2497         }
2498         rcu_read_unlock();
2499 
2500         return NULL;
2501 }
2502 
2503 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2504 {
2505         struct smp_irk *irk_to_return = NULL;
2506         struct smp_irk *irk;
2507 
2508         rcu_read_lock();
2509         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2510                 if (!bacmp(&irk->rpa, rpa)) {
2511                         irk_to_return = irk;
2512                         goto done;
2513                 }
2514         }
2515 
2516         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2517                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2518                         bacpy(&irk->rpa, rpa);
2519                         irk_to_return = irk;
2520                         goto done;
2521                 }
2522         }
2523 
2524 done:
2525         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2526                                                 irk_to_return->val)) {
2527                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2528                                         &irk_to_return->bdaddr);
2529                 irk_to_return = NULL;
2530         }
2531 
2532         rcu_read_unlock();
2533 
2534         return irk_to_return;
2535 }
2536 
2537 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2538                                      u8 addr_type)
2539 {
2540         struct smp_irk *irk_to_return = NULL;
2541         struct smp_irk *irk;
2542 
2543         /* Identity Address must be public or static random */
2544         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2545                 return NULL;
2546 
2547         rcu_read_lock();
2548         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2549                 if (addr_type == irk->addr_type &&
2550                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2551                         irk_to_return = irk;
2552                         goto done;
2553                 }
2554         }
2555 
2556 done:
2557 
2558         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2559                                                 irk_to_return->val)) {
2560                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2561                                         &irk_to_return->bdaddr);
2562                 irk_to_return = NULL;
2563         }
2564 
2565         rcu_read_unlock();
2566 
2567         return irk_to_return;
2568 }
2569 
2570 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2571                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2572                                   u8 pin_len, bool *persistent)
2573 {
2574         struct link_key *key, *old_key;
2575         u8 old_key_type;
2576 
2577         old_key = hci_find_link_key(hdev, bdaddr);
2578         if (old_key) {
2579                 old_key_type = old_key->type;
2580                 key = old_key;
2581         } else {
2582                 old_key_type = conn ? conn->key_type : 0xff;
2583                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2584                 if (!key)
2585                         return NULL;
2586                 list_add_rcu(&key->list, &hdev->link_keys);
2587         }
2588 
2589         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2590 
2591         /* Some buggy controller combinations generate a changed
2592          * combination key for legacy pairing even when there's no
2593          * previous key */
2594         if (type == HCI_LK_CHANGED_COMBINATION &&
2595             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2596                 type = HCI_LK_COMBINATION;
2597                 if (conn)
2598                         conn->key_type = type;
2599         }
2600 
2601         bacpy(&key->bdaddr, bdaddr);
2602         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2603         key->pin_len = pin_len;
2604 
2605         if (type == HCI_LK_CHANGED_COMBINATION)
2606                 key->type = old_key_type;
2607         else
2608                 key->type = type;
2609 
2610         if (persistent)
2611                 *persistent = hci_persistent_key(hdev, conn, type,
2612                                                  old_key_type);
2613 
2614         return key;
2615 }
2616 
2617 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2618                             u8 addr_type, u8 type, u8 authenticated,
2619                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2620 {
2621         struct smp_ltk *key, *old_key;
2622         u8 role = ltk_role(type);
2623 
2624         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2625         if (old_key)
2626                 key = old_key;
2627         else {
2628                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2629                 if (!key)
2630                         return NULL;
2631                 list_add_rcu(&key->list, &hdev->long_term_keys);
2632         }
2633 
2634         bacpy(&key->bdaddr, bdaddr);
2635         key->bdaddr_type = addr_type;
2636         memcpy(key->val, tk, sizeof(key->val));
2637         key->authenticated = authenticated;
2638         key->ediv = ediv;
2639         key->rand = rand;
2640         key->enc_size = enc_size;
2641         key->type = type;
2642 
2643         return key;
2644 }
2645 
2646 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2647                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2648 {
2649         struct smp_irk *irk;
2650 
2651         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2652         if (!irk) {
2653                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2654                 if (!irk)
2655                         return NULL;
2656 
2657                 bacpy(&irk->bdaddr, bdaddr);
2658                 irk->addr_type = addr_type;
2659 
2660                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2661         }
2662 
2663         memcpy(irk->val, val, 16);
2664         bacpy(&irk->rpa, rpa);
2665 
2666         return irk;
2667 }
2668 
2669 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2670 {
2671         struct link_key *key;
2672 
2673         key = hci_find_link_key(hdev, bdaddr);
2674         if (!key)
2675                 return -ENOENT;
2676 
2677         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2678 
2679         list_del_rcu(&key->list);
2680         kfree_rcu(key, rcu);
2681 
2682         return 0;
2683 }
2684 
2685 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2686 {
2687         struct smp_ltk *k;
2688         int removed = 0;
2689 
2690         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2691                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2692                         continue;
2693 
2694                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2695 
2696                 list_del_rcu(&k->list);
2697                 kfree_rcu(k, rcu);
2698                 removed++;
2699         }
2700 
2701         return removed ? 0 : -ENOENT;
2702 }
2703 
2704 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2705 {
2706         struct smp_irk *k;
2707 
2708         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2709                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2710                         continue;
2711 
2712                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2713 
2714                 list_del_rcu(&k->list);
2715                 kfree_rcu(k, rcu);
2716         }
2717 }
2718 
2719 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2720 {
2721         struct smp_ltk *k;
2722         struct smp_irk *irk;
2723         u8 addr_type;
2724 
2725         if (type == BDADDR_BREDR) {
2726                 if (hci_find_link_key(hdev, bdaddr))
2727                         return true;
2728                 return false;
2729         }
2730 
2731         /* Convert to HCI addr type which struct smp_ltk uses */
2732         if (type == BDADDR_LE_PUBLIC)
2733                 addr_type = ADDR_LE_DEV_PUBLIC;
2734         else
2735                 addr_type = ADDR_LE_DEV_RANDOM;
2736 
2737         irk = hci_get_irk(hdev, bdaddr, addr_type);
2738         if (irk) {
2739                 bdaddr = &irk->bdaddr;
2740                 addr_type = irk->addr_type;
2741         }
2742 
2743         rcu_read_lock();
2744         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2745                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2746                         rcu_read_unlock();
2747                         return true;
2748                 }
2749         }
2750         rcu_read_unlock();
2751 
2752         return false;
2753 }
2754 
2755 /* HCI command timer function */
2756 static void hci_cmd_timeout(struct work_struct *work)
2757 {
2758         struct hci_dev *hdev = container_of(work, struct hci_dev,
2759                                             cmd_timer.work);
2760 
2761         if (hdev->sent_cmd) {
2762                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2763                 u16 opcode = __le16_to_cpu(sent->opcode);
2764 
2765                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2766         } else {
2767                 bt_dev_err(hdev, "command tx timeout");
2768         }
2769 
2770         if (hdev->cmd_timeout)
2771                 hdev->cmd_timeout(hdev);
2772 
2773         atomic_set(&hdev->cmd_cnt, 1);
2774         queue_work(hdev->workqueue, &hdev->cmd_work);
2775 }
2776 
2777 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2778                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2779 {
2780         struct oob_data *data;
2781 
2782         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2783                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2784                         continue;
2785                 if (data->bdaddr_type != bdaddr_type)
2786                         continue;
2787                 return data;
2788         }
2789 
2790         return NULL;
2791 }
2792 
2793 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2794                                u8 bdaddr_type)
2795 {
2796         struct oob_data *data;
2797 
2798         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2799         if (!data)
2800                 return -ENOENT;
2801 
2802         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2803 
2804         list_del(&data->list);
2805         kfree(data);
2806 
2807         return 0;
2808 }
2809 
2810 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2811 {
2812         struct oob_data *data, *n;
2813 
2814         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2815                 list_del(&data->list);
2816                 kfree(data);
2817         }
2818 }
2819 
2820 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2821                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2822                             u8 *hash256, u8 *rand256)
2823 {
2824         struct oob_data *data;
2825 
2826         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2827         if (!data) {
2828                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2829                 if (!data)
2830                         return -ENOMEM;
2831 
2832                 bacpy(&data->bdaddr, bdaddr);
2833                 data->bdaddr_type = bdaddr_type;
2834                 list_add(&data->list, &hdev->remote_oob_data);
2835         }
2836 
2837         if (hash192 && rand192) {
2838                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2839                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2840                 if (hash256 && rand256)
2841                         data->present = 0x03;
2842         } else {
2843                 memset(data->hash192, 0, sizeof(data->hash192));
2844                 memset(data->rand192, 0, sizeof(data->rand192));
2845                 if (hash256 && rand256)
2846                         data->present = 0x02;
2847                 else
2848                         data->present = 0x00;
2849         }
2850 
2851         if (hash256 && rand256) {
2852                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2853                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2854         } else {
2855                 memset(data->hash256, 0, sizeof(data->hash256));
2856                 memset(data->rand256, 0, sizeof(data->rand256));
2857                 if (hash192 && rand192)
2858                         data->present = 0x01;
2859         }
2860 
2861         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2862 
2863         return 0;
2864 }
2865 
2866 /* This function requires the caller holds hdev->lock */
2867 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2868 {
2869         struct adv_info *adv_instance;
2870 
2871         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2872                 if (adv_instance->instance == instance)
2873                         return adv_instance;
2874         }
2875 
2876         return NULL;
2877 }
2878 
2879 /* This function requires the caller holds hdev->lock */
2880 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2881 {
2882         struct adv_info *cur_instance;
2883 
2884         cur_instance = hci_find_adv_instance(hdev, instance);
2885         if (!cur_instance)
2886                 return NULL;
2887 
2888         if (cur_instance == list_last_entry(&hdev->adv_instances,
2889                                             struct adv_info, list))
2890                 return list_first_entry(&hdev->adv_instances,
2891                                                  struct adv_info, list);
2892         else
2893                 return list_next_entry(cur_instance, list);
2894 }
2895 
2896 /* This function requires the caller holds hdev->lock */
2897 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2898 {
2899         struct adv_info *adv_instance;
2900 
2901         adv_instance = hci_find_adv_instance(hdev, instance);
2902         if (!adv_instance)
2903                 return -ENOENT;
2904 
2905         BT_DBG("%s removing %dMR", hdev->name, instance);
2906 
2907         if (hdev->cur_adv_instance == instance) {
2908                 if (hdev->adv_instance_timeout) {
2909                         cancel_delayed_work(&hdev->adv_instance_expire);
2910                         hdev->adv_instance_timeout = 0;
2911                 }
2912                 hdev->cur_adv_instance = 0x00;
2913         }
2914 
2915         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2916 
2917         list_del(&adv_instance->list);
2918         kfree(adv_instance);
2919 
2920         hdev->adv_instance_cnt--;
2921 
2922         return 0;
2923 }
2924 
2925 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2926 {
2927         struct adv_info *adv_instance, *n;
2928 
2929         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2930                 adv_instance->rpa_expired = rpa_expired;
2931 }
2932 
2933 /* This function requires the caller holds hdev->lock */
2934 void hci_adv_instances_clear(struct hci_dev *hdev)
2935 {
2936         struct adv_info *adv_instance, *n;
2937 
2938         if (hdev->adv_instance_timeout) {
2939                 cancel_delayed_work(&hdev->adv_instance_expire);
2940                 hdev->adv_instance_timeout = 0;
2941         }
2942 
2943         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2944                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2945                 list_del(&adv_instance->list);
2946                 kfree(adv_instance);
2947         }
2948 
2949         hdev->adv_instance_cnt = 0;
2950         hdev->cur_adv_instance = 0x00;
2951 }
2952 
2953 static void adv_instance_rpa_expired(struct work_struct *work)
2954 {
2955         struct adv_info *adv_instance = container_of(work, struct adv_info,
2956                                                      rpa_expired_cb.work);
2957 
2958         BT_DBG("");
2959 
2960         adv_instance->rpa_expired = true;
2961 }
2962 
2963 /* This function requires the caller holds hdev->lock */
2964 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2965                          u16 adv_data_len, u8 *adv_data,
2966                          u16 scan_rsp_len, u8 *scan_rsp_data,
2967                          u16 timeout, u16 duration, s8 tx_power,
2968                          u32 min_interval, u32 max_interval)
2969 {
2970         struct adv_info *adv_instance;
2971 
2972         adv_instance = hci_find_adv_instance(hdev, instance);
2973         if (adv_instance) {
2974                 memset(adv_instance->adv_data, 0,
2975                        sizeof(adv_instance->adv_data));
2976                 memset(adv_instance->scan_rsp_data, 0,
2977                        sizeof(adv_instance->scan_rsp_data));
2978         } else {
2979                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2980                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2981                         return -EOVERFLOW;
2982 
2983                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2984                 if (!adv_instance)
2985                         return -ENOMEM;
2986 
2987                 adv_instance->pending = true;
2988                 adv_instance->instance = instance;
2989                 list_add(&adv_instance->list, &hdev->adv_instances);
2990                 hdev->adv_instance_cnt++;
2991         }
2992 
2993         adv_instance->flags = flags;
2994         adv_instance->adv_data_len = adv_data_len;
2995         adv_instance->scan_rsp_len = scan_rsp_len;
2996         adv_instance->min_interval = min_interval;
2997         adv_instance->max_interval = max_interval;
2998         adv_instance->tx_power = tx_power;
2999 
3000         if (adv_data_len)
3001                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3002 
3003         if (scan_rsp_len)
3004                 memcpy(adv_instance->scan_rsp_data,
3005                        scan_rsp_data, scan_rsp_len);
3006 
3007         adv_instance->timeout = timeout;
3008         adv_instance->remaining_time = timeout;
3009 
3010         if (duration == 0)
3011                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3012         else
3013                 adv_instance->duration = duration;
3014 
3015         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3016                           adv_instance_rpa_expired);
3017 
3018         BT_DBG("%s for %dMR", hdev->name, instance);
3019 
3020         return 0;
3021 }
3022 
3023 /* This function requires the caller holds hdev->lock */
3024 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3025                               u16 adv_data_len, u8 *adv_data,
3026                               u16 scan_rsp_len, u8 *scan_rsp_data)
3027 {
3028         struct adv_info *adv_instance;
3029 
3030         adv_instance = hci_find_adv_instance(hdev, instance);
3031 
3032         /* If advertisement doesn't exist, we can't modify its data */
3033         if (!adv_instance)
3034                 return -ENOENT;
3035 
3036         if (adv_data_len) {
3037                 memset(adv_instance->adv_data, 0,
3038                        sizeof(adv_instance->adv_data));
3039                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3040                 adv_instance->adv_data_len = adv_data_len;
3041         }
3042 
3043         if (scan_rsp_len) {
3044                 memset(adv_instance->scan_rsp_data, 0,
3045                        sizeof(adv_instance->scan_rsp_data));
3046                 memcpy(adv_instance->scan_rsp_data,
3047                        scan_rsp_data, scan_rsp_len);
3048                 adv_instance->scan_rsp_len = scan_rsp_len;
3049         }
3050 
3051         return 0;
3052 }
3053 
3054 /* This function requires the caller holds hdev->lock */
3055 void hci_adv_monitors_clear(struct hci_dev *hdev)
3056 {
3057         struct adv_monitor *monitor;
3058         int handle;
3059 
3060         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3061                 hci_free_adv_monitor(hdev, monitor);
3062 
3063         idr_destroy(&hdev->adv_monitors_idr);
3064 }
3065 
3066 /* Frees the monitor structure and do some bookkeepings.
3067  * This function requires the caller holds hdev->lock.
3068  */
3069 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3070 {
3071         struct adv_pattern *pattern;
3072         struct adv_pattern *tmp;
3073 
3074         if (!monitor)
3075                 return;
3076 
3077         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3078                 list_del(&pattern->list);
3079                 kfree(pattern);
3080         }
3081 
3082         if (monitor->handle)
3083                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3084 
3085         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3086                 hdev->adv_monitors_cnt--;
3087                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3088         }
3089 
3090         kfree(monitor);
3091 }
3092 
3093 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3094 {
3095         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3096 }
3097 
3098 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3099 {
3100         return mgmt_remove_adv_monitor_complete(hdev, status);
3101 }
3102 
3103 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3104  * also attempts to forward the request to the controller.
3105  * Returns true if request is forwarded (result is pending), false otherwise.
3106  * This function requires the caller holds hdev->lock.
3107  */
3108 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3109                          int *err)
3110 {
3111         int min, max, handle;
3112 
3113         *err = 0;
3114 
3115         if (!monitor) {
3116                 *err = -EINVAL;
3117                 return false;
3118         }
3119 
3120         min = HCI_MIN_ADV_MONITOR_HANDLE;
3121         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3122         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3123                            GFP_KERNEL);
3124         if (handle < 0) {
3125                 *err = handle;
3126                 return false;
3127         }
3128 
3129         monitor->handle = handle;
3130 
3131         if (!hdev_is_powered(hdev))
3132                 return false;
3133 
3134         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3135         case HCI_ADV_MONITOR_EXT_NONE:
3136                 hci_update_background_scan(hdev);
3137                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3138                 /* Message was not forwarded to controller - not an error */
3139                 return false;
3140         case HCI_ADV_MONITOR_EXT_MSFT:
3141                 *err = msft_add_monitor_pattern(hdev, monitor);
3142                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3143                            *err);
3144                 break;
3145         }
3146 
3147         return (*err == 0);
3148 }
3149 
3150 /* Attempts to tell the controller and free the monitor. If somehow the
3151  * controller doesn't have a corresponding handle, remove anyway.
3152  * Returns true if request is forwarded (result is pending), false otherwise.
3153  * This function requires the caller holds hdev->lock.
3154  */
3155 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3156                                    struct adv_monitor *monitor,
3157                                    u16 handle, int *err)
3158 {
3159         *err = 0;
3160 
3161         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3162         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3163                 goto free_monitor;
3164         case HCI_ADV_MONITOR_EXT_MSFT:
3165                 *err = msft_remove_monitor(hdev, monitor, handle);
3166                 break;
3167         }
3168 
3169         /* In case no matching handle registered, just free the monitor */
3170         if (*err == -ENOENT)
3171                 goto free_monitor;
3172 
3173         return (*err == 0);
3174 
3175 free_monitor:
3176         if (*err == -ENOENT)
3177                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3178                             monitor->handle);
3179         hci_free_adv_monitor(hdev, monitor);
3180 
3181         *err = 0;
3182         return false;
3183 }
3184 
3185 /* Returns true if request is forwarded (result is pending), false otherwise.
3186  * This function requires the caller holds hdev->lock.
3187  */
3188 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3189 {
3190         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3191         bool pending;
3192 
3193         if (!monitor) {
3194                 *err = -EINVAL;
3195                 return false;
3196         }
3197 
3198         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3199         if (!*err && !pending)
3200                 hci_update_background_scan(hdev);
3201 
3202         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3203                    hdev->name, handle, *err, pending ? "" : "not ");
3204 
3205         return pending;
3206 }
3207 
3208 /* Returns true if request is forwarded (result is pending), false otherwise.
3209  * This function requires the caller holds hdev->lock.
3210  */
3211 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3212 {
3213         struct adv_monitor *monitor;
3214         int idr_next_id = 0;
3215         bool pending = false;
3216         bool update = false;
3217 
3218         *err = 0;
3219 
3220         while (!*err && !pending) {
3221                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3222                 if (!monitor)
3223                         break;
3224 
3225                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3226 
3227                 if (!*err && !pending)
3228                         update = true;
3229         }
3230 
3231         if (update)
3232                 hci_update_background_scan(hdev);
3233 
3234         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3235                    hdev->name, *err, pending ? "" : "not ");
3236 
3237         return pending;
3238 }
3239 
3240 /* This function requires the caller holds hdev->lock */
3241 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3242 {
3243         return !idr_is_empty(&hdev->adv_monitors_idr);
3244 }
3245 
3246 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3247 {
3248         if (msft_monitor_supported(hdev))
3249                 return HCI_ADV_MONITOR_EXT_MSFT;
3250 
3251         return HCI_ADV_MONITOR_EXT_NONE;
3252 }
3253 
3254 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3255                                          bdaddr_t *bdaddr, u8 type)
3256 {
3257         struct bdaddr_list *b;
3258 
3259         list_for_each_entry(b, bdaddr_list, list) {
3260                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3261                         return b;
3262         }
3263 
3264         return NULL;
3265 }
3266 
3267 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3268                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3269                                 u8 type)
3270 {
3271         struct bdaddr_list_with_irk *b;
3272 
3273         list_for_each_entry(b, bdaddr_list, list) {
3274                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3275                         return b;
3276         }
3277 
3278         return NULL;
3279 }
3280 
3281 struct bdaddr_list_with_flags *
3282 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3283                                   bdaddr_t *bdaddr, u8 type)
3284 {
3285         struct bdaddr_list_with_flags *b;
3286 
3287         list_for_each_entry(b, bdaddr_list, list) {
3288                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3289                         return b;
3290         }
3291 
3292         return NULL;
3293 }
3294 
3295 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3296 {
3297         struct bdaddr_list *b, *n;
3298 
3299         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3300                 list_del(&b->list);
3301                 kfree(b);
3302         }
3303 }
3304 
3305 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3306 {
3307         struct bdaddr_list *entry;
3308 
3309         if (!bacmp(bdaddr, BDADDR_ANY))
3310                 return -EBADF;
3311 
3312         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3313                 return -EEXIST;
3314 
3315         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3316         if (!entry)
3317                 return -ENOMEM;
3318 
3319         bacpy(&entry->bdaddr, bdaddr);
3320         entry->bdaddr_type = type;
3321 
3322         list_add(&entry->list, list);
3323 
3324         return 0;
3325 }
3326 
3327 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3328                                         u8 type, u8 *peer_irk, u8 *local_irk)
3329 {
3330         struct bdaddr_list_with_irk *entry;
3331 
3332         if (!bacmp(bdaddr, BDADDR_ANY))
3333                 return -EBADF;
3334 
3335         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3336                 return -EEXIST;
3337 
3338         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3339         if (!entry)
3340                 return -ENOMEM;
3341 
3342         bacpy(&entry->bdaddr, bdaddr);
3343         entry->bdaddr_type = type;
3344 
3345         if (peer_irk)
3346                 memcpy(entry->peer_irk, peer_irk, 16);
3347 
3348         if (local_irk)
3349                 memcpy(entry->local_irk, local_irk, 16);
3350 
3351         list_add(&entry->list, list);
3352 
3353         return 0;
3354 }
3355 
3356 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3357                                    u8 type, u32 flags)
3358 {
3359         struct bdaddr_list_with_flags *entry;
3360 
3361         if (!bacmp(bdaddr, BDADDR_ANY))
3362                 return -EBADF;
3363 
3364         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3365                 return -EEXIST;
3366 
3367         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3368         if (!entry)
3369                 return -ENOMEM;
3370 
3371         bacpy(&entry->bdaddr, bdaddr);
3372         entry->bdaddr_type = type;
3373         entry->current_flags = flags;
3374 
3375         list_add(&entry->list, list);
3376 
3377         return 0;
3378 }
3379 
3380 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3381 {
3382         struct bdaddr_list *entry;
3383 
3384         if (!bacmp(bdaddr, BDADDR_ANY)) {
3385                 hci_bdaddr_list_clear(list);
3386                 return 0;
3387         }
3388 
3389         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3390         if (!entry)
3391                 return -ENOENT;
3392 
3393         list_del(&entry->list);
3394         kfree(entry);
3395 
3396         return 0;
3397 }
3398 
3399 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3400                                                         u8 type)
3401 {
3402         struct bdaddr_list_with_irk *entry;
3403 
3404         if (!bacmp(bdaddr, BDADDR_ANY)) {
3405                 hci_bdaddr_list_clear(list);
3406                 return 0;
3407         }
3408 
3409         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3410         if (!entry)
3411                 return -ENOENT;
3412 
3413         list_del(&entry->list);
3414         kfree(entry);
3415 
3416         return 0;
3417 }
3418 
3419 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3420                                    u8 type)
3421 {
3422         struct bdaddr_list_with_flags *entry;
3423 
3424         if (!bacmp(bdaddr, BDADDR_ANY)) {
3425                 hci_bdaddr_list_clear(list);
3426                 return 0;
3427         }
3428 
3429         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3430         if (!entry)
3431                 return -ENOENT;
3432 
3433         list_del(&entry->list);
3434         kfree(entry);
3435 
3436         return 0;
3437 }
3438 
3439 /* This function requires the caller holds hdev->lock */
3440 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3441                                                bdaddr_t *addr, u8 addr_type)
3442 {
3443         struct hci_conn_params *params;
3444 
3445         list_for_each_entry(params, &hdev->le_conn_params, list) {
3446                 if (bacmp(&params->addr, addr) == 0 &&
3447                     params->addr_type == addr_type) {
3448                         return params;
3449                 }
3450         }
3451 
3452         return NULL;
3453 }
3454 
3455 /* This function requires the caller holds hdev->lock */
3456 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3457                                                   bdaddr_t *addr, u8 addr_type)
3458 {
3459         struct hci_conn_params *param;
3460 
3461         switch (addr_type) {
3462         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3463                 addr_type = ADDR_LE_DEV_PUBLIC;
3464                 break;
3465         case ADDR_LE_DEV_RANDOM_RESOLVED:
3466                 addr_type = ADDR_LE_DEV_RANDOM;
3467                 break;
3468         }
3469 
3470         list_for_each_entry(param, list, action) {
3471                 if (bacmp(&param->addr, addr) == 0 &&
3472                     param->addr_type == addr_type)
3473                         return param;
3474         }
3475 
3476         return NULL;
3477 }
3478 
3479 /* This function requires the caller holds hdev->lock */
3480 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3481                                             bdaddr_t *addr, u8 addr_type)
3482 {
3483         struct hci_conn_params *params;
3484 
3485         params = hci_conn_params_lookup(hdev, addr, addr_type);
3486         if (params)
3487                 return params;
3488 
3489         params = kzalloc(sizeof(*params), GFP_KERNEL);
3490         if (!params) {
3491                 bt_dev_err(hdev, "out of memory");
3492                 return NULL;
3493         }
3494 
3495         bacpy(&params->addr, addr);
3496         params->addr_type = addr_type;
3497 
3498         list_add(&params->list, &hdev->le_conn_params);
3499         INIT_LIST_HEAD(&params->action);
3500 
3501         params->conn_min_interval = hdev->le_conn_min_interval;
3502         params->conn_max_interval = hdev->le_conn_max_interval;
3503         params->conn_latency = hdev->le_conn_latency;
3504         params->supervision_timeout = hdev->le_supv_timeout;
3505         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3506 
3507         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3508 
3509         return params;
3510 }
3511 
3512 static void hci_conn_params_free(struct hci_conn_params *params)
3513 {
3514         if (params->conn) {
3515                 hci_conn_drop(params->conn);
3516                 hci_conn_put(params->conn);
3517         }
3518 
3519         list_del(&params->action);
3520         list_del(&params->list);
3521         kfree(params);
3522 }
3523 
3524 /* This function requires the caller holds hdev->lock */
3525 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3526 {
3527         struct hci_conn_params *params;
3528 
3529         params = hci_conn_params_lookup(hdev, addr, addr_type);
3530         if (!params)
3531                 return;
3532 
3533         hci_conn_params_free(params);
3534 
3535         hci_update_background_scan(hdev);
3536 
3537         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3538 }
3539 
3540 /* This function requires the caller holds hdev->lock */
3541 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3542 {
3543         struct hci_conn_params *params, *tmp;
3544 
3545         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3546                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3547                         continue;
3548 
3549                 /* If trying to estabilish one time connection to disabled
3550                  * device, leave the params, but mark them as just once.
3551                  */
3552                 if (params->explicit_connect) {
3553                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3554                         continue;
3555                 }
3556 
3557                 list_del(&params->list);
3558                 kfree(params);
3559         }
3560 
3561         BT_DBG("All LE disabled connection parameters were removed");
3562 }
3563 
3564 /* This function requires the caller holds hdev->lock */
3565 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3566 {
3567         struct hci_conn_params *params, *tmp;
3568 
3569         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3570                 hci_conn_params_free(params);
3571 
3572         BT_DBG("All LE connection parameters were removed");
3573 }
3574 
3575 /* Copy the Identity Address of the controller.
3576  *
3577  * If the controller has a public BD_ADDR, then by default use that one.
3578  * If this is a LE only controller without a public address, default to
3579  * the static random address.
3580  *
3581  * For debugging purposes it is possible to force controllers with a
3582  * public address to use the static random address instead.
3583  *
3584  * In case BR/EDR has been disabled on a dual-mode controller and
3585  * userspace has configured a static address, then that address
3586  * becomes the identity address instead of the public BR/EDR address.
3587  */
3588 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3589                                u8 *bdaddr_type)
3590 {
3591         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3592             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3593             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3594              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3595                 bacpy(bdaddr, &hdev->static_addr);
3596                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3597         } else {
3598                 bacpy(bdaddr, &hdev->bdaddr);
3599                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3600         }
3601 }
3602 
3603 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3604 {
3605         int i;
3606 
3607         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3608                 clear_bit(i, hdev->suspend_tasks);
3609 
3610         wake_up(&hdev->suspend_wait_q);
3611 }
3612 
3613 static int hci_suspend_wait_event(struct hci_dev *hdev)
3614 {
3615 #define WAKE_COND                                                              \
3616         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3617          __SUSPEND_NUM_TASKS)
3618 
3619         int i;
3620         int ret = wait_event_timeout(hdev->suspend_wait_q,
3621                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3622 
3623         if (ret == 0) {
3624                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3625                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3626                         if (test_bit(i, hdev->suspend_tasks))
3627                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3628                         clear_bit(i, hdev->suspend_tasks);
3629                 }
3630 
3631                 ret = -ETIMEDOUT;
3632         } else {
3633                 ret = 0;
3634         }
3635 
3636         return ret;
3637 }
3638 
3639 static void hci_prepare_suspend(struct work_struct *work)
3640 {
3641         struct hci_dev *hdev =
3642                 container_of(work, struct hci_dev, suspend_prepare);
3643 
3644         hci_dev_lock(hdev);
3645         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3646         hci_dev_unlock(hdev);
3647 }
3648 
3649 static int hci_change_suspend_state(struct hci_dev *hdev,
3650                                     enum suspended_state next)
3651 {
3652         hdev->suspend_state_next = next;
3653         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3654         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3655         return hci_suspend_wait_event(hdev);
3656 }
3657 
3658 static void hci_clear_wake_reason(struct hci_dev *hdev)
3659 {
3660         hci_dev_lock(hdev);
3661 
3662         hdev->wake_reason = 0;
3663         bacpy(&hdev->wake_addr, BDADDR_ANY);
3664         hdev->wake_addr_type = 0;
3665 
3666         hci_dev_unlock(hdev);
3667 }
3668 
3669 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3670                                 void *data)
3671 {
3672         struct hci_dev *hdev =
3673                 container_of(nb, struct hci_dev, suspend_notifier);
3674         int ret = 0;
3675         u8 state = BT_RUNNING;
3676 
3677         /* If powering down, wait for completion. */
3678         if (mgmt_powering_down(hdev)) {
3679                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3680                 ret = hci_suspend_wait_event(hdev);
3681                 if (ret)
3682                         goto done;
3683         }
3684 
3685         /* Suspend notifier should only act on events when powered. */
3686         if (!hdev_is_powered(hdev) ||
3687             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3688                 goto done;
3689 
3690         if (action == PM_SUSPEND_PREPARE) {
3691                 /* Suspend consists of two actions:
3692                  *  - First, disconnect everything and make the controller not
3693                  *    connectable (disabling scanning)
3694                  *  - Second, program event filter/whitelist and enable scan
3695                  */
3696                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3697                 if (!ret)
3698                         state = BT_SUSPEND_DISCONNECT;
3699 
3700                 /* Only configure whitelist if disconnect succeeded and wake
3701                  * isn't being prevented.
3702                  */
3703                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3704                         ret = hci_change_suspend_state(hdev,
3705                                                 BT_SUSPEND_CONFIGURE_WAKE);
3706                         if (!ret)
3707                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3708                 }
3709 
3710                 hci_clear_wake_reason(hdev);
3711                 mgmt_suspending(hdev, state);
3712 
3713         } else if (action == PM_POST_SUSPEND) {
3714                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3715 
3716                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3717                               hdev->wake_addr_type);
3718         }
3719 
3720 done:
3721         /* We always allow suspend even if suspend preparation failed and
3722          * attempt to recover in resume.
3723          */
3724         if (ret)
3725                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3726                            action, ret);
3727 
3728         return NOTIFY_DONE;
3729 }
3730 
3731 /* Alloc HCI device */
3732 struct hci_dev *hci_alloc_dev(void)
3733 {
3734         struct hci_dev *hdev;
3735 
3736         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3737         if (!hdev)
3738                 return NULL;
3739 
3740         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3741         hdev->esco_type = (ESCO_HV1);
3742         hdev->link_mode = (HCI_LM_ACCEPT);
3743         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3744         hdev->io_capability = 0x03;     /* No Input No Output */
3745         hdev->manufacturer = 0xffff;    /* Default to internal use */
3746         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3747         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3748         hdev->adv_instance_cnt = 0;
3749         hdev->cur_adv_instance = 0x00;
3750         hdev->adv_instance_timeout = 0;
3751 
3752         hdev->advmon_allowlist_duration = 300;
3753         hdev->advmon_no_filter_duration = 500;
3754         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3755 
3756         hdev->sniff_max_interval = 800;
3757         hdev->sniff_min_interval = 80;
3758 
3759         hdev->le_adv_channel_map = 0x07;
3760         hdev->le_adv_min_interval = 0x0800;
3761         hdev->le_adv_max_interval = 0x0800;
3762         hdev->le_scan_interval = 0x0060;
3763         hdev->le_scan_window = 0x0030;
3764         hdev->le_scan_int_suspend = 0x0400;
3765         hdev->le_scan_window_suspend = 0x0012;
3766         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3767         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3768         hdev->le_scan_int_connect = 0x0060;
3769         hdev->le_scan_window_connect = 0x0060;
3770         hdev->le_conn_min_interval = 0x0018;
3771         hdev->le_conn_max_interval = 0x0028;
3772         hdev->le_conn_latency = 0x0000;
3773         hdev->le_supv_timeout = 0x002a;
3774         hdev->le_def_tx_len = 0x001b;
3775         hdev->le_def_tx_time = 0x0148;
3776         hdev->le_max_tx_len = 0x001b;
3777         hdev->le_max_tx_time = 0x0148;
3778         hdev->le_max_rx_len = 0x001b;
3779         hdev->le_max_rx_time = 0x0148;
3780         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3781         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3782         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3783         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3784         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3785         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3786         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3787         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3788         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3789 
3790         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3791         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3792         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3793         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3794         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3795         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3796 
3797         /* default 1.28 sec page scan */
3798         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3799         hdev->def_page_scan_int = 0x0800;
3800         hdev->def_page_scan_window = 0x0012;
3801 
3802         mutex_init(&hdev->lock);
3803         mutex_init(&hdev->req_lock);
3804 
3805         INIT_LIST_HEAD(&hdev->mgmt_pending);
3806         INIT_LIST_HEAD(&hdev->blacklist);
3807         INIT_LIST_HEAD(&hdev->whitelist);
3808         INIT_LIST_HEAD(&hdev->uuids);
3809         INIT_LIST_HEAD(&hdev->link_keys);
3810         INIT_LIST_HEAD(&hdev->long_term_keys);
3811         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3812         INIT_LIST_HEAD(&hdev->remote_oob_data);
3813         INIT_LIST_HEAD(&hdev->le_white_list);
3814         INIT_LIST_HEAD(&hdev->le_resolv_list);
3815         INIT_LIST_HEAD(&hdev->le_conn_params);
3816         INIT_LIST_HEAD(&hdev->pend_le_conns);
3817         INIT_LIST_HEAD(&hdev->pend_le_reports);
3818         INIT_LIST_HEAD(&hdev->conn_hash.list);
3819         INIT_LIST_HEAD(&hdev->adv_instances);
3820         INIT_LIST_HEAD(&hdev->blocked_keys);
3821 
3822         INIT_WORK(&hdev->rx_work, hci_rx_work);
3823         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3824         INIT_WORK(&hdev->tx_work, hci_tx_work);
3825         INIT_WORK(&hdev->power_on, hci_power_on);
3826         INIT_WORK(&hdev->error_reset, hci_error_reset);
3827         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3828 
3829         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3830 
3831         skb_queue_head_init(&hdev->rx_q);
3832         skb_queue_head_init(&hdev->cmd_q);
3833         skb_queue_head_init(&hdev->raw_q);
3834 
3835         init_waitqueue_head(&hdev->req_wait_q);
3836         init_waitqueue_head(&hdev->suspend_wait_q);
3837 
3838         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3839 
3840         hci_request_setup(hdev);
3841 
3842         hci_init_sysfs(hdev);
3843         discovery_init(hdev);
3844 
3845         return hdev;
3846 }
3847 EXPORT_SYMBOL(hci_alloc_dev);
3848 
3849 /* Free HCI device */
3850 void hci_free_dev(struct hci_dev *hdev)
3851 {
3852         /* will free via device release */
3853         put_device(&hdev->dev);
3854 }
3855 EXPORT_SYMBOL(hci_free_dev);
3856 
3857 /* Register HCI device */
3858 int hci_register_dev(struct hci_dev *hdev)
3859 {
3860         int id, error;
3861 
3862         if (!hdev->open || !hdev->close || !hdev->send)
3863                 return -EINVAL;
3864 
3865         /* Do not allow HCI_AMP devices to register at index 0,
3866          * so the index can be used as the AMP controller ID.
3867          */
3868         switch (hdev->dev_type) {
3869         case HCI_PRIMARY:
3870                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3871                 break;
3872         case HCI_AMP:
3873                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3874                 break;
3875         default:
3876                 return -EINVAL;
3877         }
3878 
3879         if (id < 0)
3880                 return id;
3881 
3882         sprintf(hdev->name, "hci%d", id);
3883         hdev->id = id;
3884 
3885         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3886 
3887         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3888         if (!hdev->workqueue) {
3889                 error = -ENOMEM;
3890                 goto err;
3891         }
3892 
3893         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3894                                                       hdev->name);
3895         if (!hdev->req_workqueue) {
3896                 destroy_workqueue(hdev->workqueue);
3897                 error = -ENOMEM;
3898                 goto err;
3899         }
3900 
3901         if (!IS_ERR_OR_NULL(bt_debugfs))
3902                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3903 
3904         dev_set_name(&hdev->dev, "%s", hdev->name);
3905 
3906         error = device_add(&hdev->dev);
3907         if (error < 0)
3908                 goto err_wqueue;
3909 
3910         hci_leds_init(hdev);
3911 
3912         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3913                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3914                                     hdev);
3915         if (hdev->rfkill) {
3916                 if (rfkill_register(hdev->rfkill) < 0) {
3917                         rfkill_destroy(hdev->rfkill);
3918                         hdev->rfkill = NULL;
3919                 }
3920         }
3921 
3922         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3923                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3924 
3925         hci_dev_set_flag(hdev, HCI_SETUP);
3926         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3927 
3928         if (hdev->dev_type == HCI_PRIMARY) {
3929                 /* Assume BR/EDR support until proven otherwise (such as
3930                  * through reading supported features during init.
3931                  */
3932                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3933         }
3934 
3935         write_lock(&hci_dev_list_lock);
3936         list_add(&hdev->list, &hci_dev_list);
3937         write_unlock(&hci_dev_list_lock);
3938 
3939         /* Devices that are marked for raw-only usage are unconfigured
3940          * and should not be included in normal operation.
3941          */
3942         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3943                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3944 
3945         hci_sock_dev_event(hdev, HCI_DEV_REG);
3946         hci_dev_hold(hdev);
3947 
3948         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3949                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3950                 error = register_pm_notifier(&hdev->suspend_notifier);
3951                 if (error)
3952                         goto err_wqueue;
3953         }
3954 
3955         queue_work(hdev->req_workqueue, &hdev->power_on);
3956 
3957         idr_init(&hdev->adv_monitors_idr);
3958 
3959         return id;
3960 
3961 err_wqueue:
3962         destroy_workqueue(hdev->workqueue);
3963         destroy_workqueue(hdev->req_workqueue);
3964 err:
3965         ida_simple_remove(&hci_index_ida, hdev->id);
3966 
3967         return error;
3968 }
3969 EXPORT_SYMBOL(hci_register_dev);
3970 
3971 /* Unregister HCI device */
3972 void hci_unregister_dev(struct hci_dev *hdev)
3973 {
3974         int id;
3975 
3976         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3977 
3978         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3979 
3980         id = hdev->id;
3981 
3982         write_lock(&hci_dev_list_lock);
3983         list_del(&hdev->list);
3984         write_unlock(&hci_dev_list_lock);
3985 
3986         cancel_work_sync(&hdev->power_on);
3987 
3988         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3989                 hci_suspend_clear_tasks(hdev);
3990                 unregister_pm_notifier(&hdev->suspend_notifier);
3991                 cancel_work_sync(&hdev->suspend_prepare);
3992         }
3993 
3994         hci_dev_do_close(hdev);
3995 
3996         if (!test_bit(HCI_INIT, &hdev->flags) &&
3997             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3998             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3999                 hci_dev_lock(hdev);
4000                 mgmt_index_removed(hdev);
4001                 hci_dev_unlock(hdev);
4002         }
4003 
4004         /* mgmt_index_removed should take care of emptying the
4005          * pending list */
4006         BUG_ON(!list_empty(&hdev->mgmt_pending));
4007 
4008         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4009 
4010         if (hdev->rfkill) {
4011                 rfkill_unregister(hdev->rfkill);
4012                 rfkill_destroy(hdev->rfkill);
4013         }
4014 
4015         device_del(&hdev->dev);
4016 
4017         debugfs_remove_recursive(hdev->debugfs);
4018         kfree_const(hdev->hw_info);
4019         kfree_const(hdev->fw_info);
4020 
4021         destroy_workqueue(hdev->workqueue);
4022         destroy_workqueue(hdev->req_workqueue);
4023 
4024         hci_dev_lock(hdev);
4025         hci_bdaddr_list_clear(&hdev->blacklist);
4026         hci_bdaddr_list_clear(&hdev->whitelist);
4027         hci_uuids_clear(hdev);
4028         hci_link_keys_clear(hdev);
4029         hci_smp_ltks_clear(hdev);
4030         hci_smp_irks_clear(hdev);
4031         hci_remote_oob_data_clear(hdev);
4032         hci_adv_instances_clear(hdev);
4033         hci_adv_monitors_clear(hdev);
4034         hci_bdaddr_list_clear(&hdev->le_white_list);
4035         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4036         hci_conn_params_clear_all(hdev);
4037         hci_discovery_filter_clear(hdev);
4038         hci_blocked_keys_clear(hdev);
4039         hci_dev_unlock(hdev);
4040 
4041         hci_dev_put(hdev);
4042 
4043         ida_simple_remove(&hci_index_ida, id);
4044 }
4045 EXPORT_SYMBOL(hci_unregister_dev);
4046 
4047 /* Suspend HCI device */
4048 int hci_suspend_dev(struct hci_dev *hdev)
4049 {
4050         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4051         return 0;
4052 }
4053 EXPORT_SYMBOL(hci_suspend_dev);
4054 
4055 /* Resume HCI device */
4056 int hci_resume_dev(struct hci_dev *hdev)
4057 {
4058         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4059         return 0;
4060 }
4061 EXPORT_SYMBOL(hci_resume_dev);
4062 
4063 /* Reset HCI device */
4064 int hci_reset_dev(struct hci_dev *hdev)
4065 {
4066         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4067         struct sk_buff *skb;
4068 
4069         skb = bt_skb_alloc(3, GFP_ATOMIC);
4070         if (!skb)
4071                 return -ENOMEM;
4072 
4073         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4074         skb_put_data(skb, hw_err, 3);
4075 
4076         /* Send Hardware Error to upper stack */
4077         return hci_recv_frame(hdev, skb);
4078 }
4079 EXPORT_SYMBOL(hci_reset_dev);
4080 
4081 /* Receive frame from HCI drivers */
4082 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4083 {
4084         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4085                       && !test_bit(HCI_INIT, &hdev->flags))) {
4086                 kfree_skb(skb);
4087                 return -ENXIO;
4088         }
4089 
4090         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4091             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4092             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4093             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4094                 kfree_skb(skb);
4095                 return -EINVAL;
4096         }
4097 
4098         /* Incoming skb */
4099         bt_cb(skb)->incoming = 1;
4100 
4101         /* Time stamp */
4102         __net_timestamp(skb);
4103 
4104         skb_queue_tail(&hdev->rx_q, skb);
4105         queue_work(hdev->workqueue, &hdev->rx_work);
4106 
4107         return 0;
4108 }
4109 EXPORT_SYMBOL(hci_recv_frame);
4110 
4111 /* Receive diagnostic message from HCI drivers */
4112 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4113 {
4114         /* Mark as diagnostic packet */
4115         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4116 
4117         /* Time stamp */
4118         __net_timestamp(skb);
4119 
4120         skb_queue_tail(&hdev->rx_q, skb);
4121         queue_work(hdev->workqueue, &hdev->rx_work);
4122 
4123         return 0;
4124 }
4125 EXPORT_SYMBOL(hci_recv_diag);
4126 
4127 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4128 {
4129         va_list vargs;
4130 
4131         va_start(vargs, fmt);
4132         kfree_const(hdev->hw_info);
4133         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4134         va_end(vargs);
4135 }
4136 EXPORT_SYMBOL(hci_set_hw_info);
4137 
4138 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4139 {
4140         va_list vargs;
4141 
4142         va_start(vargs, fmt);
4143         kfree_const(hdev->fw_info);
4144         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4145         va_end(vargs);
4146 }
4147 EXPORT_SYMBOL(hci_set_fw_info);
4148 
4149 /* ---- Interface to upper protocols ---- */
4150 
4151 int hci_register_cb(struct hci_cb *cb)
4152 {
4153         BT_DBG("%p name %s", cb, cb->name);
4154 
4155         mutex_lock(&hci_cb_list_lock);
4156         list_add_tail(&cb->list, &hci_cb_list);
4157         mutex_unlock(&hci_cb_list_lock);
4158 
4159         return 0;
4160 }
4161 EXPORT_SYMBOL(hci_register_cb);
4162 
4163 int hci_unregister_cb(struct hci_cb *cb)
4164 {
4165         BT_DBG("%p name %s", cb, cb->name);
4166 
4167         mutex_lock(&hci_cb_list_lock);
4168         list_del(&cb->list);
4169         mutex_unlock(&hci_cb_list_lock);
4170 
4171         return 0;
4172 }
4173 EXPORT_SYMBOL(hci_unregister_cb);
4174 
4175 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4176 {
4177         int err;
4178 
4179         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4180                skb->len);
4181 
4182         /* Time stamp */
4183         __net_timestamp(skb);
4184 
4185         /* Send copy to monitor */
4186         hci_send_to_monitor(hdev, skb);
4187 
4188         if (atomic_read(&hdev->promisc)) {
4189                 /* Send copy to the sockets */
4190                 hci_send_to_sock(hdev, skb);
4191         }
4192 
4193         /* Get rid of skb owner, prior to sending to the driver. */
4194         skb_orphan(skb);
4195 
4196         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4197                 kfree_skb(skb);
4198                 return;
4199         }
4200 
4201         err = hdev->send(hdev, skb);
4202         if (err < 0) {
4203                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4204                 kfree_skb(skb);
4205         }
4206 }
4207 
4208 /* Send HCI command */
4209 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4210                  const void *param)
4211 {
4212         struct sk_buff *skb;
4213 
4214         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4215 
4216         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4217         if (!skb) {
4218                 bt_dev_err(hdev, "no memory for command");
4219                 return -ENOMEM;
4220         }
4221 
4222         /* Stand-alone HCI commands must be flagged as
4223          * single-command requests.
4224          */
4225         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4226 
4227         skb_queue_tail(&hdev->cmd_q, skb);
4228         queue_work(hdev->workqueue, &hdev->cmd_work);
4229 
4230         return 0;
4231 }
4232 
4233 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4234                    const void *param)
4235 {
4236         struct sk_buff *skb;
4237 
4238         if (hci_opcode_ogf(opcode) != 0x3f) {
4239                 /* A controller receiving a command shall respond with either
4240                  * a Command Status Event or a Command Complete Event.
4241                  * Therefore, all standard HCI commands must be sent via the
4242                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4243                  * Some vendors do not comply with this rule for vendor-specific
4244                  * commands and do not return any event. We want to support
4245                  * unresponded commands for such cases only.
4246                  */
4247                 bt_dev_err(hdev, "unresponded command not supported");
4248                 return -EINVAL;
4249         }
4250 
4251         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4252         if (!skb) {
4253                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4254                            opcode);
4255                 return -ENOMEM;
4256         }
4257 
4258         hci_send_frame(hdev, skb);
4259 
4260         return 0;
4261 }
4262 EXPORT_SYMBOL(__hci_cmd_send);
4263 
4264 /* Get data from the previously sent command */
4265 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4266 {
4267         struct hci_command_hdr *hdr;
4268 
4269         if (!hdev->sent_cmd)
4270                 return NULL;
4271 
4272         hdr = (void *) hdev->sent_cmd->data;
4273 
4274         if (hdr->opcode != cpu_to_le16(opcode))
4275                 return NULL;
4276 
4277         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4278 
4279         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4280 }
4281 
4282 /* Send HCI command and wait for command commplete event */
4283 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4284                              const void *param, u32 timeout)
4285 {
4286         struct sk_buff *skb;
4287 
4288         if (!test_bit(HCI_UP, &hdev->flags))
4289                 return ERR_PTR(-ENETDOWN);
4290 
4291         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4292 
4293         hci_req_sync_lock(hdev);
4294         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4295         hci_req_sync_unlock(hdev);
4296 
4297         return skb;
4298 }
4299 EXPORT_SYMBOL(hci_cmd_sync);
4300 
4301 /* Send ACL data */
4302 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4303 {
4304         struct hci_acl_hdr *hdr;
4305         int len = skb->len;
4306 
4307         skb_push(skb, HCI_ACL_HDR_SIZE);
4308         skb_reset_transport_header(skb);
4309         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4310         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4311         hdr->dlen   = cpu_to_le16(len);
4312 }
4313 
4314 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4315                           struct sk_buff *skb, __u16 flags)
4316 {
4317         struct hci_conn *conn = chan->conn;
4318         struct hci_dev *hdev = conn->hdev;
4319         struct sk_buff *list;
4320 
4321         skb->len = skb_headlen(skb);
4322         skb->data_len = 0;
4323 
4324         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4325 
4326         switch (hdev->dev_type) {
4327         case HCI_PRIMARY:
4328                 hci_add_acl_hdr(skb, conn->handle, flags);
4329                 break;
4330         case HCI_AMP:
4331                 hci_add_acl_hdr(skb, chan->handle, flags);
4332                 break;
4333         default:
4334                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4335                 return;
4336         }
4337 
4338         list = skb_shinfo(skb)->frag_list;
4339         if (!list) {
4340                 /* Non fragmented */
4341                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4342 
4343                 skb_queue_tail(queue, skb);
4344         } else {
4345                 /* Fragmented */
4346                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4347 
4348                 skb_shinfo(skb)->frag_list = NULL;
4349 
4350                 /* Queue all fragments atomically. We need to use spin_lock_bh
4351                  * here because of 6LoWPAN links, as there this function is
4352                  * called from softirq and using normal spin lock could cause
4353                  * deadlocks.
4354                  */
4355                 spin_lock_bh(&queue->lock);
4356 
4357                 __skb_queue_tail(queue, skb);
4358 
4359                 flags &= ~ACL_START;
4360                 flags |= ACL_CONT;
4361                 do {
4362                         skb = list; list = list->next;
4363 
4364                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4365                         hci_add_acl_hdr(skb, conn->handle, flags);
4366 
4367                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4368 
4369                         __skb_queue_tail(queue, skb);
4370                 } while (list);
4371 
4372                 spin_unlock_bh(&queue->lock);
4373         }
4374 }
4375 
4376 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4377 {
4378         struct hci_dev *hdev = chan->conn->hdev;
4379 
4380         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4381 
4382         hci_queue_acl(chan, &chan->data_q, skb, flags);
4383 
4384         queue_work(hdev->workqueue, &hdev->tx_work);
4385 }
4386 
4387 /* Send SCO data */
4388 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4389 {
4390         struct hci_dev *hdev = conn->hdev;
4391         struct hci_sco_hdr hdr;
4392 
4393         BT_DBG("%s len %d", hdev->name, skb->len);
4394 
4395         hdr.handle = cpu_to_le16(conn->handle);
4396         hdr.dlen   = skb->len;
4397 
4398         skb_push(skb, HCI_SCO_HDR_SIZE);
4399         skb_reset_transport_header(skb);
4400         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4401 
4402         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4403 
4404         skb_queue_tail(&conn->data_q, skb);
4405         queue_work(hdev->workqueue, &hdev->tx_work);
4406 }
4407 
4408 /* ---- HCI TX task (outgoing data) ---- */
4409 
4410 /* HCI Connection scheduler */
4411 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4412                                      int *quote)
4413 {
4414         struct hci_conn_hash *h = &hdev->conn_hash;
4415         struct hci_conn *conn = NULL, *c;
4416         unsigned int num = 0, min = ~0;
4417 
4418         /* We don't have to lock device here. Connections are always
4419          * added and removed with TX task disabled. */
4420 
4421         rcu_read_lock();
4422 
4423         list_for_each_entry_rcu(c, &h->list, list) {
4424                 if (c->type != type || skb_queue_empty(&c->data_q))
4425                         continue;
4426 
4427                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4428                         continue;
4429 
4430                 num++;
4431 
4432                 if (c->sent < min) {
4433                         min  = c->sent;
4434                         conn = c;
4435                 }
4436 
4437                 if (hci_conn_num(hdev, type) == num)
4438                         break;
4439         }
4440 
4441         rcu_read_unlock();
4442 
4443         if (conn) {
4444                 int cnt, q;
4445 
4446                 switch (conn->type) {
4447                 case ACL_LINK:
4448                         cnt = hdev->acl_cnt;
4449                         break;
4450                 case SCO_LINK:
4451                 case ESCO_LINK:
4452                         cnt = hdev->sco_cnt;
4453                         break;
4454                 case LE_LINK:
4455                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4456                         break;
4457                 default:
4458                         cnt = 0;
4459                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4460                 }
4461 
4462                 q = cnt / num;
4463                 *quote = q ? q : 1;
4464         } else
4465                 *quote = 0;
4466 
4467         BT_DBG("conn %p quote %d", conn, *quote);
4468         return conn;
4469 }
4470 
4471 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4472 {
4473         struct hci_conn_hash *h = &hdev->conn_hash;
4474         struct hci_conn *c;
4475 
4476         bt_dev_err(hdev, "link tx timeout");
4477 
4478         rcu_read_lock();
4479 
4480         /* Kill stalled connections */
4481         list_for_each_entry_rcu(c, &h->list, list) {
4482                 if (c->type == type && c->sent) {
4483                         bt_dev_err(hdev, "killing stalled connection %pMR",
4484                                    &c->dst);
4485                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4486                 }
4487         }
4488 
4489         rcu_read_unlock();
4490 }
4491 
4492 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4493                                       int *quote)
4494 {
4495         struct hci_conn_hash *h = &hdev->conn_hash;
4496         struct hci_chan *chan = NULL;
4497         unsigned int num = 0, min = ~0, cur_prio = 0;
4498         struct hci_conn *conn;
4499         int cnt, q, conn_num = 0;
4500 
4501         BT_DBG("%s", hdev->name);
4502 
4503         rcu_read_lock();
4504 
4505         list_for_each_entry_rcu(conn, &h->list, list) {
4506                 struct hci_chan *tmp;
4507 
4508                 if (conn->type != type)
4509                         continue;
4510 
4511                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4512                         continue;
4513 
4514                 conn_num++;
4515 
4516                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4517                         struct sk_buff *skb;
4518 
4519                         if (skb_queue_empty(&tmp->data_q))
4520                                 continue;
4521 
4522                         skb = skb_peek(&tmp->data_q);
4523                         if (skb->priority < cur_prio)
4524                                 continue;
4525 
4526                         if (skb->priority > cur_prio) {
4527                                 num = 0;
4528                                 min = ~0;
4529                                 cur_prio = skb->priority;
4530                         }
4531 
4532                         num++;
4533 
4534                         if (conn->sent < min) {
4535                                 min  = conn->sent;
4536                                 chan = tmp;
4537                         }
4538                 }
4539 
4540                 if (hci_conn_num(hdev, type) == conn_num)
4541                         break;
4542         }
4543 
4544         rcu_read_unlock();
4545 
4546         if (!chan)
4547                 return NULL;
4548 
4549         switch (chan->conn->type) {
4550         case ACL_LINK:
4551                 cnt = hdev->acl_cnt;
4552                 break;
4553         case AMP_LINK:
4554                 cnt = hdev->block_cnt;
4555                 break;
4556         case SCO_LINK:
4557         case ESCO_LINK:
4558                 cnt = hdev->sco_cnt;
4559                 break;
4560         case LE_LINK:
4561                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4562                 break;
4563         default:
4564                 cnt = 0;
4565                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4566         }
4567 
4568         q = cnt / num;
4569         *quote = q ? q : 1;
4570         BT_DBG("chan %p quote %d", chan, *quote);
4571         return chan;
4572 }
4573 
4574 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4575 {
4576         struct hci_conn_hash *h = &hdev->conn_hash;
4577         struct hci_conn *conn;
4578         int num = 0;
4579 
4580         BT_DBG("%s", hdev->name);
4581 
4582         rcu_read_lock();
4583 
4584         list_for_each_entry_rcu(conn, &h->list, list) {
4585                 struct hci_chan *chan;
4586 
4587                 if (conn->type != type)
4588                         continue;
4589 
4590                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4591                         continue;
4592 
4593                 num++;
4594 
4595                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4596                         struct sk_buff *skb;
4597 
4598                         if (chan->sent) {
4599                                 chan->sent = 0;
4600                                 continue;
4601                         }
4602 
4603                         if (skb_queue_empty(&chan->data_q))
4604                                 continue;
4605 
4606                         skb = skb_peek(&chan->data_q);
4607                         if (skb->priority >= HCI_PRIO_MAX - 1)
4608                                 continue;
4609 
4610                         skb->priority = HCI_PRIO_MAX - 1;
4611 
4612                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4613                                skb->priority);
4614                 }
4615 
4616                 if (hci_conn_num(hdev, type) == num)
4617                         break;
4618         }
4619 
4620         rcu_read_unlock();
4621 
4622 }
4623 
4624 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4625 {
4626         /* Calculate count of blocks used by this packet */
4627         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4628 }
4629 
4630 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4631 {
4632         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4633                 /* ACL tx timeout must be longer than maximum
4634                  * link supervision timeout (40.9 seconds) */
4635                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4636                                        HCI_ACL_TX_TIMEOUT))
4637                         hci_link_tx_to(hdev, ACL_LINK);
4638         }
4639 }
4640 
4641 /* Schedule SCO */
4642 static void hci_sched_sco(struct hci_dev *hdev)
4643 {
4644         struct hci_conn *conn;
4645         struct sk_buff *skb;
4646         int quote;
4647 
4648         BT_DBG("%s", hdev->name);
4649 
4650         if (!hci_conn_num(hdev, SCO_LINK))
4651                 return;
4652 
4653         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4654                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4655                         BT_DBG("skb %p len %d", skb, skb->len);
4656                         hci_send_frame(hdev, skb);
4657 
4658                         conn->sent++;
4659                         if (conn->sent == ~0)
4660                                 conn->sent = 0;
4661                 }
4662         }
4663 }
4664 
4665 static void hci_sched_esco(struct hci_dev *hdev)
4666 {
4667         struct hci_conn *conn;
4668         struct sk_buff *skb;
4669         int quote;
4670 
4671         BT_DBG("%s", hdev->name);
4672 
4673         if (!hci_conn_num(hdev, ESCO_LINK))
4674                 return;
4675 
4676         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4677                                                      &quote))) {
4678                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4679                         BT_DBG("skb %p len %d", skb, skb->len);
4680                         hci_send_frame(hdev, skb);
4681 
4682                         conn->sent++;
4683                         if (conn->sent == ~0)
4684                                 conn->sent = 0;
4685                 }
4686         }
4687 }
4688 
4689 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4690 {
4691         unsigned int cnt = hdev->acl_cnt;
4692         struct hci_chan *chan;
4693         struct sk_buff *skb;
4694         int quote;
4695 
4696         __check_timeout(hdev, cnt);
4697 
4698         while (hdev->acl_cnt &&
4699                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4700                 u32 priority = (skb_peek(&chan->data_q))->priority;
4701                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4702                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4703                                skb->len, skb->priority);
4704 
4705                         /* Stop if priority has changed */
4706                         if (skb->priority < priority)
4707                                 break;
4708 
4709                         skb = skb_dequeue(&chan->data_q);
4710 
4711                         hci_conn_enter_active_mode(chan->conn,
4712                                                    bt_cb(skb)->force_active);
4713 
4714                         hci_send_frame(hdev, skb);
4715                         hdev->acl_last_tx = jiffies;
4716 
4717                         hdev->acl_cnt--;
4718                         chan->sent++;
4719                         chan->conn->sent++;
4720 
4721                         /* Send pending SCO packets right away */
4722                         hci_sched_sco(hdev);
4723                         hci_sched_esco(hdev);
4724                 }
4725         }
4726 
4727         if (cnt != hdev->acl_cnt)
4728                 hci_prio_recalculate(hdev, ACL_LINK);
4729 }
4730 
4731 static void hci_sched_acl_blk(struct hci_dev *hdev)
4732 {
4733         unsigned int cnt = hdev->block_cnt;
4734         struct hci_chan *chan;
4735         struct sk_buff *skb;
4736         int quote;
4737         u8 type;
4738 
4739         __check_timeout(hdev, cnt);
4740 
4741         BT_DBG("%s", hdev->name);
4742 
4743         if (hdev->dev_type == HCI_AMP)
4744                 type = AMP_LINK;
4745         else
4746                 type = ACL_LINK;
4747 
4748         while (hdev->block_cnt > 0 &&
4749                (chan = hci_chan_sent(hdev, type, &quote))) {
4750                 u32 priority = (skb_peek(&chan->data_q))->priority;
4751                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4752                         int blocks;
4753 
4754                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4755                                skb->len, skb->priority);
4756 
4757                         /* Stop if priority has changed */
4758                         if (skb->priority < priority)
4759                                 break;
4760 
4761                         skb = skb_dequeue(&chan->data_q);
4762 
4763                         blocks = __get_blocks(hdev, skb);
4764                         if (blocks > hdev->block_cnt)
4765                                 return;
4766 
4767                         hci_conn_enter_active_mode(chan->conn,
4768                                                    bt_cb(skb)->force_active);
4769 
4770                         hci_send_frame(hdev, skb);
4771                         hdev->acl_last_tx = jiffies;
4772 
4773                         hdev->block_cnt -= blocks;
4774                         quote -= blocks;
4775 
4776                         chan->sent += blocks;
4777                         chan->conn->sent += blocks;
4778                 }
4779         }
4780 
4781         if (cnt != hdev->block_cnt)
4782                 hci_prio_recalculate(hdev, type);
4783 }
4784 
4785 static void hci_sched_acl(struct hci_dev *hdev)
4786 {
4787         BT_DBG("%s", hdev->name);
4788 
4789         /* No ACL link over BR/EDR controller */
4790         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4791                 return;
4792 
4793         /* No AMP link over AMP controller */
4794         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4795                 return;
4796 
4797         switch (hdev->flow_ctl_mode) {
4798         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4799                 hci_sched_acl_pkt(hdev);
4800                 break;
4801 
4802         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4803                 hci_sched_acl_blk(hdev);
4804                 break;
4805         }
4806 }
4807 
4808 static void hci_sched_le(struct hci_dev *hdev)
4809 {
4810         struct hci_chan *chan;
4811         struct sk_buff *skb;
4812         int quote, cnt, tmp;
4813 
4814         BT_DBG("%s", hdev->name);
4815 
4816         if (!hci_conn_num(hdev, LE_LINK))
4817                 return;
4818 
4819         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4820 
4821         __check_timeout(hdev, cnt);
4822 
4823         tmp = cnt;
4824         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4825                 u32 priority = (skb_peek(&chan->data_q))->priority;
4826                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4827                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4828                                skb->len, skb->priority);
4829 
4830                         /* Stop if priority has changed */
4831                         if (skb->priority < priority)
4832                                 break;
4833 
4834                         skb = skb_dequeue(&chan->data_q);
4835 
4836                         hci_send_frame(hdev, skb);
4837                         hdev->le_last_tx = jiffies;
4838 
4839                         cnt--;
4840                         chan->sent++;
4841                         chan->conn->sent++;
4842 
4843                         /* Send pending SCO packets right away */
4844                         hci_sched_sco(hdev);
4845                         hci_sched_esco(hdev);
4846                 }
4847         }
4848 
4849         if (hdev->le_pkts)
4850                 hdev->le_cnt = cnt;
4851         else
4852                 hdev->acl_cnt = cnt;
4853 
4854         if (cnt != tmp)
4855                 hci_prio_recalculate(hdev, LE_LINK);
4856 }
4857 
4858 static void hci_tx_work(struct work_struct *work)
4859 {
4860         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4861         struct sk_buff *skb;
4862 
4863         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4864                hdev->sco_cnt, hdev->le_cnt);
4865 
4866         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4867                 /* Schedule queues and send stuff to HCI driver */
4868                 hci_sched_sco(hdev);
4869                 hci_sched_esco(hdev);
4870                 hci_sched_acl(hdev);
4871                 hci_sched_le(hdev);
4872         }
4873 
4874         /* Send next queued raw (unknown type) packet */
4875         while ((skb = skb_dequeue(&hdev->raw_q)))
4876                 hci_send_frame(hdev, skb);
4877 }
4878 
4879 /* ----- HCI RX task (incoming data processing) ----- */
4880 
4881 /* ACL data packet */
4882 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4883 {
4884         struct hci_acl_hdr *hdr = (void *) skb->data;
4885         struct hci_conn *conn;
4886         __u16 handle, flags;
4887 
4888         skb_pull(skb, HCI_ACL_HDR_SIZE);
4889 
4890         handle = __le16_to_cpu(hdr->handle);
4891         flags  = hci_flags(handle);
4892         handle = hci_handle(handle);
4893 
4894         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4895                handle, flags);
4896 
4897         hdev->stat.acl_rx++;
4898 
4899         hci_dev_lock(hdev);
4900         conn = hci_conn_hash_lookup_handle(hdev, handle);
4901         hci_dev_unlock(hdev);
4902 
4903         if (conn) {
4904                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4905 
4906                 /* Send to upper protocol */
4907                 l2cap_recv_acldata(conn, skb, flags);
4908                 return;
4909         } else {
4910                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4911                            handle);
4912         }
4913 
4914         kfree_skb(skb);
4915 }
4916 
4917 /* SCO data packet */
4918 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4919 {
4920         struct hci_sco_hdr *hdr = (void *) skb->data;
4921         struct hci_conn *conn;
4922         __u16 handle, flags;
4923 
4924         skb_pull(skb, HCI_SCO_HDR_SIZE);
4925 
4926         handle = __le16_to_cpu(hdr->handle);
4927         flags  = hci_flags(handle);
4928         handle = hci_handle(handle);
4929 
4930         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4931                handle, flags);
4932 
4933         hdev->stat.sco_rx++;
4934 
4935         hci_dev_lock(hdev);
4936         conn = hci_conn_hash_lookup_handle(hdev, handle);
4937         hci_dev_unlock(hdev);
4938 
4939         if (conn) {
4940                 /* Send to upper protocol */
4941                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4942                 sco_recv_scodata(conn, skb);
4943                 return;
4944         } else {
4945                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4946                            handle);
4947         }
4948 
4949         kfree_skb(skb);
4950 }
4951 
4952 static bool hci_req_is_complete(struct hci_dev *hdev)
4953 {
4954         struct sk_buff *skb;
4955 
4956         skb = skb_peek(&hdev->cmd_q);
4957         if (!skb)
4958                 return true;
4959 
4960         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4961 }
4962 
4963 static void hci_resend_last(struct hci_dev *hdev)
4964 {
4965         struct hci_command_hdr *sent;
4966         struct sk_buff *skb;
4967         u16 opcode;
4968 
4969         if (!hdev->sent_cmd)
4970                 return;
4971 
4972         sent = (void *) hdev->sent_cmd->data;
4973         opcode = __le16_to_cpu(sent->opcode);
4974         if (opcode == HCI_OP_RESET)
4975                 return;
4976 
4977         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4978         if (!skb)
4979                 return;
4980 
4981         skb_queue_head(&hdev->cmd_q, skb);
4982         queue_work(hdev->workqueue, &hdev->cmd_work);
4983 }
4984 
4985 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4986                           hci_req_complete_t *req_complete,
4987                           hci_req_complete_skb_t *req_complete_skb)
4988 {
4989         struct sk_buff *skb;
4990         unsigned long flags;
4991 
4992         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4993 
4994         /* If the completed command doesn't match the last one that was
4995          * sent we need to do special handling of it.
4996          */
4997         if (!hci_sent_cmd_data(hdev, opcode)) {
4998                 /* Some CSR based controllers generate a spontaneous
4999                  * reset complete event during init and any pending
5000                  * command will never be completed. In such a case we
5001                  * need to resend whatever was the last sent
5002                  * command.
5003                  */
5004                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5005                         hci_resend_last(hdev);
5006 
5007                 return;
5008         }
5009 
5010         /* If we reach this point this event matches the last command sent */
5011         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5012 
5013         /* If the command succeeded and there's still more commands in
5014          * this request the request is not yet complete.
5015          */
5016         if (!status && !hci_req_is_complete(hdev))
5017                 return;
5018 
5019         /* If this was the last command in a request the complete
5020          * callback would be found in hdev->sent_cmd instead of the
5021          * command queue (hdev->cmd_q).
5022          */
5023         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5024                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5025                 return;
5026         }
5027 
5028         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5029                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5030                 return;
5031         }
5032 
5033         /* Remove all pending commands belonging to this request */
5034         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5035         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5036                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5037                         __skb_queue_head(&hdev->cmd_q, skb);
5038                         break;
5039                 }
5040 
5041                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5042                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5043                 else
5044                         *req_complete = bt_cb(skb)->hci.req_complete;
5045                 kfree_skb(skb);
5046         }
5047         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5048 }
5049 
5050 static void hci_rx_work(struct work_struct *work)
5051 {
5052         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5053         struct sk_buff *skb;
5054 
5055         BT_DBG("%s", hdev->name);
5056 
5057         while ((skb = skb_dequeue(&hdev->rx_q))) {
5058                 /* Send copy to monitor */
5059                 hci_send_to_monitor(hdev, skb);
5060 
5061                 if (atomic_read(&hdev->promisc)) {
5062                         /* Send copy to the sockets */
5063                         hci_send_to_sock(hdev, skb);
5064                 }
5065 
5066                 /* If the device has been opened in HCI_USER_CHANNEL,
5067                  * the userspace has exclusive access to device.
5068                  * When device is HCI_INIT, we still need to process
5069                  * the data packets to the driver in order
5070                  * to complete its setup().
5071                  */
5072                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5073                     !test_bit(HCI_INIT, &hdev->flags)) {
5074                         kfree_skb(skb);
5075                         continue;
5076                 }
5077 
5078                 if (test_bit(HCI_INIT, &hdev->flags)) {
5079                         /* Don't process data packets in this states. */
5080                         switch (hci_skb_pkt_type(skb)) {
5081                         case HCI_ACLDATA_PKT:
5082                         case HCI_SCODATA_PKT:
5083                         case HCI_ISODATA_PKT:
5084                                 kfree_skb(skb);
5085                                 continue;
5086                         }
5087                 }
5088 
5089                 /* Process frame */
5090                 switch (hci_skb_pkt_type(skb)) {
5091                 case HCI_EVENT_PKT:
5092                         BT_DBG("%s Event packet", hdev->name);
5093                         hci_event_packet(hdev, skb);
5094                         break;
5095 
5096                 case HCI_ACLDATA_PKT:
5097                         BT_DBG("%s ACL data packet", hdev->name);
5098                         hci_acldata_packet(hdev, skb);
5099                         break;
5100 
5101                 case HCI_SCODATA_PKT:
5102                         BT_DBG("%s SCO data packet", hdev->name);
5103                         hci_scodata_packet(hdev, skb);
5104                         break;
5105 
5106                 default:
5107                         kfree_skb(skb);
5108                         break;
5109                 }
5110         }
5111 }
5112 
5113 static void hci_cmd_work(struct work_struct *work)
5114 {
5115         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5116         struct sk_buff *skb;
5117 
5118         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5119                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5120 
5121         /* Send queued commands */
5122         if (atomic_read(&hdev->cmd_cnt)) {
5123                 skb = skb_dequeue(&hdev->cmd_q);
5124                 if (!skb)
5125                         return;
5126 
5127                 kfree_skb(hdev->sent_cmd);
5128 
5129                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5130                 if (hdev->sent_cmd) {
5131                         if (hci_req_status_pend(hdev))
5132                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5133                         atomic_dec(&hdev->cmd_cnt);
5134                         hci_send_frame(hdev, skb);
5135                         if (test_bit(HCI_RESET, &hdev->flags))
5136                                 cancel_delayed_work(&hdev->cmd_timer);
5137                         else
5138                                 schedule_delayed_work(&hdev->cmd_timer,
5139                                                       HCI_CMD_TIMEOUT);
5140                 } else {
5141                         skb_queue_head(&hdev->cmd_q, skb);
5142                         queue_work(hdev->workqueue, &hdev->cmd_work);
5143                 }
5144         }
5145 }
5146 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp