~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/idr.h>
 30 #include <linux/rfkill.h>
 31 #include <linux/debugfs.h>
 32 #include <linux/crypto.h>
 33 #include <linux/property.h>
 34 #include <linux/suspend.h>
 35 #include <linux/wait.h>
 36 #include <asm/unaligned.h>
 37 
 38 #include <net/bluetooth/bluetooth.h>
 39 #include <net/bluetooth/hci_core.h>
 40 #include <net/bluetooth/l2cap.h>
 41 #include <net/bluetooth/mgmt.h>
 42 
 43 #include "hci_request.h"
 44 #include "hci_debugfs.h"
 45 #include "smp.h"
 46 #include "leds.h"
 47 
 48 static void hci_rx_work(struct work_struct *work);
 49 static void hci_cmd_work(struct work_struct *work);
 50 static void hci_tx_work(struct work_struct *work);
 51 
 52 /* HCI device list */
 53 LIST_HEAD(hci_dev_list);
 54 DEFINE_RWLOCK(hci_dev_list_lock);
 55 
 56 /* HCI callback list */
 57 LIST_HEAD(hci_cb_list);
 58 DEFINE_MUTEX(hci_cb_list_lock);
 59 
 60 /* HCI ID Numbering */
 61 static DEFINE_IDA(hci_index_ida);
 62 
 63 /* ---- HCI debugfs entries ---- */
 64 
 65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
 66                              size_t count, loff_t *ppos)
 67 {
 68         struct hci_dev *hdev = file->private_data;
 69         char buf[3];
 70 
 71         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
 72         buf[1] = '\n';
 73         buf[2] = '\0';
 74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 75 }
 76 
 77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
 78                               size_t count, loff_t *ppos)
 79 {
 80         struct hci_dev *hdev = file->private_data;
 81         struct sk_buff *skb;
 82         bool enable;
 83         int err;
 84 
 85         if (!test_bit(HCI_UP, &hdev->flags))
 86                 return -ENETDOWN;
 87 
 88         err = kstrtobool_from_user(user_buf, count, &enable);
 89         if (err)
 90                 return err;
 91 
 92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
 93                 return -EALREADY;
 94 
 95         hci_req_sync_lock(hdev);
 96         if (enable)
 97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
 98                                      HCI_CMD_TIMEOUT);
 99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103 
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106 
107         kfree_skb(skb);
108 
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110 
111         return count;
112 }
113 
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120 
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126 
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132 
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         bool enable;
138         int err;
139 
140         err = kstrtobool_from_user(user_buf, count, &enable);
141         if (err)
142                 return err;
143 
144         /* When the diagnostic flags are not persistent and the transport
145          * is not active or in user channel operation, then there is no need
146          * for the vendor callback. Instead just store the desired value and
147          * the setting will be programmed when the controller gets powered on.
148          */
149         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150             (!test_bit(HCI_RUNNING, &hdev->flags) ||
151              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152                 goto done;
153 
154         hci_req_sync_lock(hdev);
155         err = hdev->set_diag(hdev, enable);
156         hci_req_sync_unlock(hdev);
157 
158         if (err < 0)
159                 return err;
160 
161 done:
162         if (enable)
163                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164         else
165                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166 
167         return count;
168 }
169 
170 static const struct file_operations vendor_diag_fops = {
171         .open           = simple_open,
172         .read           = vendor_diag_read,
173         .write          = vendor_diag_write,
174         .llseek         = default_llseek,
175 };
176 
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180                             &dut_mode_fops);
181 
182         if (hdev->set_diag)
183                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184                                     &vendor_diag_fops);
185 }
186 
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189         BT_DBG("%s %ld", req->hdev->name, opt);
190 
191         /* Reset device */
192         set_bit(HCI_RESET, &req->hdev->flags);
193         hci_req_add(req, HCI_OP_RESET, 0, NULL);
194         return 0;
195 }
196 
197 static void bredr_init(struct hci_request *req)
198 {
199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200 
201         /* Read Local Supported Features */
202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203 
204         /* Read Local Version */
205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206 
207         /* Read BD Address */
208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210 
211 static void amp_init1(struct hci_request *req)
212 {
213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214 
215         /* Read Local Version */
216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217 
218         /* Read Local Supported Commands */
219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220 
221         /* Read Local AMP Info */
222         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223 
224         /* Read Data Blk size */
225         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226 
227         /* Read Flow Control Mode */
228         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229 
230         /* Read Location Data */
231         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233 
234 static int amp_init2(struct hci_request *req)
235 {
236         /* Read Local Supported Features. Not all AMP controllers
237          * support this so it's placed conditionally in the second
238          * stage init.
239          */
240         if (req->hdev->commands[14] & 0x20)
241                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242 
243         return 0;
244 }
245 
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248         struct hci_dev *hdev = req->hdev;
249 
250         BT_DBG("%s %ld", hdev->name, opt);
251 
252         /* Reset */
253         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254                 hci_reset_req(req, 0);
255 
256         switch (hdev->dev_type) {
257         case HCI_PRIMARY:
258                 bredr_init(req);
259                 break;
260         case HCI_AMP:
261                 amp_init1(req);
262                 break;
263         default:
264                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265                 break;
266         }
267 
268         return 0;
269 }
270 
271 static void bredr_setup(struct hci_request *req)
272 {
273         __le16 param;
274         __u8 flt_type;
275 
276         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278 
279         /* Read Class of Device */
280         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281 
282         /* Read Local Name */
283         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284 
285         /* Read Voice Setting */
286         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287 
288         /* Read Number of Supported IAC */
289         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290 
291         /* Read Current IAC LAP */
292         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293 
294         /* Clear Event Filters */
295         flt_type = HCI_FLT_CLEAR_ALL;
296         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297 
298         /* Connection accept timeout ~20 secs */
299         param = cpu_to_le16(0x7d00);
300         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302 
303 static void le_setup(struct hci_request *req)
304 {
305         struct hci_dev *hdev = req->hdev;
306 
307         /* Read LE Buffer Size */
308         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309 
310         /* Read LE Local Supported Features */
311         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312 
313         /* Read LE Supported States */
314         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315 
316         /* LE-only controllers have LE implicitly enabled */
317         if (!lmp_bredr_capable(hdev))
318                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320 
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323         struct hci_dev *hdev = req->hdev;
324 
325         /* The second byte is 0xff instead of 0x9f (two reserved bits
326          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327          * command otherwise.
328          */
329         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330 
331         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332          * any event mask for pre 1.2 devices.
333          */
334         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335                 return;
336 
337         if (lmp_bredr_capable(hdev)) {
338                 events[4] |= 0x01; /* Flow Specification Complete */
339         } else {
340                 /* Use a different default for LE-only devices */
341                 memset(events, 0, sizeof(events));
342                 events[1] |= 0x20; /* Command Complete */
343                 events[1] |= 0x40; /* Command Status */
344                 events[1] |= 0x80; /* Hardware Error */
345 
346                 /* If the controller supports the Disconnect command, enable
347                  * the corresponding event. In addition enable packet flow
348                  * control related events.
349                  */
350                 if (hdev->commands[0] & 0x20) {
351                         events[0] |= 0x10; /* Disconnection Complete */
352                         events[2] |= 0x04; /* Number of Completed Packets */
353                         events[3] |= 0x02; /* Data Buffer Overflow */
354                 }
355 
356                 /* If the controller supports the Read Remote Version
357                  * Information command, enable the corresponding event.
358                  */
359                 if (hdev->commands[2] & 0x80)
360                         events[1] |= 0x08; /* Read Remote Version Information
361                                             * Complete
362                                             */
363 
364                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365                         events[0] |= 0x80; /* Encryption Change */
366                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
367                 }
368         }
369 
370         if (lmp_inq_rssi_capable(hdev) ||
371             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372                 events[4] |= 0x02; /* Inquiry Result with RSSI */
373 
374         if (lmp_ext_feat_capable(hdev))
375                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376 
377         if (lmp_esco_capable(hdev)) {
378                 events[5] |= 0x08; /* Synchronous Connection Complete */
379                 events[5] |= 0x10; /* Synchronous Connection Changed */
380         }
381 
382         if (lmp_sniffsubr_capable(hdev))
383                 events[5] |= 0x20; /* Sniff Subrating */
384 
385         if (lmp_pause_enc_capable(hdev))
386                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387 
388         if (lmp_ext_inq_capable(hdev))
389                 events[5] |= 0x40; /* Extended Inquiry Result */
390 
391         if (lmp_no_flush_capable(hdev))
392                 events[7] |= 0x01; /* Enhanced Flush Complete */
393 
394         if (lmp_lsto_capable(hdev))
395                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396 
397         if (lmp_ssp_capable(hdev)) {
398                 events[6] |= 0x01;      /* IO Capability Request */
399                 events[6] |= 0x02;      /* IO Capability Response */
400                 events[6] |= 0x04;      /* User Confirmation Request */
401                 events[6] |= 0x08;      /* User Passkey Request */
402                 events[6] |= 0x10;      /* Remote OOB Data Request */
403                 events[6] |= 0x20;      /* Simple Pairing Complete */
404                 events[7] |= 0x04;      /* User Passkey Notification */
405                 events[7] |= 0x08;      /* Keypress Notification */
406                 events[7] |= 0x10;      /* Remote Host Supported
407                                          * Features Notification
408                                          */
409         }
410 
411         if (lmp_le_capable(hdev))
412                 events[7] |= 0x20;      /* LE Meta-Event */
413 
414         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416 
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419         struct hci_dev *hdev = req->hdev;
420 
421         if (hdev->dev_type == HCI_AMP)
422                 return amp_init2(req);
423 
424         if (lmp_bredr_capable(hdev))
425                 bredr_setup(req);
426         else
427                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428 
429         if (lmp_le_capable(hdev))
430                 le_setup(req);
431 
432         /* All Bluetooth 1.2 and later controllers should support the
433          * HCI command for reading the local supported commands.
434          *
435          * Unfortunately some controllers indicate Bluetooth 1.2 support,
436          * but do not have support for this command. If that is the case,
437          * the driver can quirk the behavior and skip reading the local
438          * supported commands.
439          */
440         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443 
444         if (lmp_ssp_capable(hdev)) {
445                 /* When SSP is available, then the host features page
446                  * should also be available as well. However some
447                  * controllers list the max_page as 0 as long as SSP
448                  * has not been enabled. To achieve proper debugging
449                  * output, force the minimum max_page to 1 at least.
450                  */
451                 hdev->max_page = 0x01;
452 
453                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454                         u8 mode = 0x01;
455 
456                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457                                     sizeof(mode), &mode);
458                 } else {
459                         struct hci_cp_write_eir cp;
460 
461                         memset(hdev->eir, 0, sizeof(hdev->eir));
462                         memset(&cp, 0, sizeof(cp));
463 
464                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465                 }
466         }
467 
468         if (lmp_inq_rssi_capable(hdev) ||
469             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470                 u8 mode;
471 
472                 /* If Extended Inquiry Result events are supported, then
473                  * they are clearly preferred over Inquiry Result with RSSI
474                  * events.
475                  */
476                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477 
478                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479         }
480 
481         if (lmp_inq_tx_pwr_capable(hdev))
482                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483 
484         if (lmp_ext_feat_capable(hdev)) {
485                 struct hci_cp_read_local_ext_features cp;
486 
487                 cp.page = 0x01;
488                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489                             sizeof(cp), &cp);
490         }
491 
492         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493                 u8 enable = 1;
494                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495                             &enable);
496         }
497 
498         return 0;
499 }
500 
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503         struct hci_dev *hdev = req->hdev;
504         struct hci_cp_write_def_link_policy cp;
505         u16 link_policy = 0;
506 
507         if (lmp_rswitch_capable(hdev))
508                 link_policy |= HCI_LP_RSWITCH;
509         if (lmp_hold_capable(hdev))
510                 link_policy |= HCI_LP_HOLD;
511         if (lmp_sniff_capable(hdev))
512                 link_policy |= HCI_LP_SNIFF;
513         if (lmp_park_capable(hdev))
514                 link_policy |= HCI_LP_PARK;
515 
516         cp.policy = cpu_to_le16(link_policy);
517         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519 
520 static void hci_set_le_support(struct hci_request *req)
521 {
522         struct hci_dev *hdev = req->hdev;
523         struct hci_cp_write_le_host_supported cp;
524 
525         /* LE-only devices do not support explicit enablement */
526         if (!lmp_bredr_capable(hdev))
527                 return;
528 
529         memset(&cp, 0, sizeof(cp));
530 
531         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532                 cp.le = 0x01;
533                 cp.simul = 0x00;
534         }
535 
536         if (cp.le != lmp_host_le_capable(hdev))
537                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538                             &cp);
539 }
540 
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543         struct hci_dev *hdev = req->hdev;
544         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545         bool changed = false;
546 
547         /* If Connectionless Slave Broadcast master role is supported
548          * enable all necessary events for it.
549          */
550         if (lmp_csb_master_capable(hdev)) {
551                 events[1] |= 0x40;      /* Triggered Clock Capture */
552                 events[1] |= 0x80;      /* Synchronization Train Complete */
553                 events[2] |= 0x10;      /* Slave Page Response Timeout */
554                 events[2] |= 0x20;      /* CSB Channel Map Change */
555                 changed = true;
556         }
557 
558         /* If Connectionless Slave Broadcast slave role is supported
559          * enable all necessary events for it.
560          */
561         if (lmp_csb_slave_capable(hdev)) {
562                 events[2] |= 0x01;      /* Synchronization Train Received */
563                 events[2] |= 0x02;      /* CSB Receive */
564                 events[2] |= 0x04;      /* CSB Timeout */
565                 events[2] |= 0x08;      /* Truncated Page Complete */
566                 changed = true;
567         }
568 
569         /* Enable Authenticated Payload Timeout Expired event if supported */
570         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571                 events[2] |= 0x80;
572                 changed = true;
573         }
574 
575         /* Some Broadcom based controllers indicate support for Set Event
576          * Mask Page 2 command, but then actually do not support it. Since
577          * the default value is all bits set to zero, the command is only
578          * required if the event mask has to be changed. In case no change
579          * to the event mask is needed, skip this command.
580          */
581         if (changed)
582                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583                             sizeof(events), events);
584 }
585 
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588         struct hci_dev *hdev = req->hdev;
589         u8 p;
590 
591         hci_setup_event_mask(req);
592 
593         if (hdev->commands[6] & 0x20 &&
594             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595                 struct hci_cp_read_stored_link_key cp;
596 
597                 bacpy(&cp.bdaddr, BDADDR_ANY);
598                 cp.read_all = 0x01;
599                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600         }
601 
602         if (hdev->commands[5] & 0x10)
603                 hci_setup_link_policy(req);
604 
605         if (hdev->commands[8] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607 
608         if (hdev->commands[18] & 0x04 &&
609             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611 
612         /* Some older Broadcom based Bluetooth 1.2 controllers do not
613          * support the Read Page Scan Type command. Check support for
614          * this command in the bit mask of supported commands.
615          */
616         if (hdev->commands[13] & 0x01)
617                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618 
619         if (lmp_le_capable(hdev)) {
620                 u8 events[8];
621 
622                 memset(events, 0, sizeof(events));
623 
624                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625                         events[0] |= 0x10;      /* LE Long Term Key Request */
626 
627                 /* If controller supports the Connection Parameters Request
628                  * Link Layer Procedure, enable the corresponding event.
629                  */
630                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631                         events[0] |= 0x20;      /* LE Remote Connection
632                                                  * Parameter Request
633                                                  */
634 
635                 /* If the controller supports the Data Length Extension
636                  * feature, enable the corresponding event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639                         events[0] |= 0x40;      /* LE Data Length Change */
640 
641                 /* If the controller supports Extended Scanner Filter
642                  * Policies, enable the correspondig event.
643                  */
644                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
645                         events[1] |= 0x04;      /* LE Direct Advertising
646                                                  * Report
647                                                  */
648 
649                 /* If the controller supports Channel Selection Algorithm #2
650                  * feature, enable the corresponding event.
651                  */
652                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
653                         events[2] |= 0x08;      /* LE Channel Selection
654                                                  * Algorithm
655                                                  */
656 
657                 /* If the controller supports the LE Set Scan Enable command,
658                  * enable the corresponding advertising report event.
659                  */
660                 if (hdev->commands[26] & 0x08)
661                         events[0] |= 0x02;      /* LE Advertising Report */
662 
663                 /* If the controller supports the LE Create Connection
664                  * command, enable the corresponding event.
665                  */
666                 if (hdev->commands[26] & 0x10)
667                         events[0] |= 0x01;      /* LE Connection Complete */
668 
669                 /* If the controller supports the LE Connection Update
670                  * command, enable the corresponding event.
671                  */
672                 if (hdev->commands[27] & 0x04)
673                         events[0] |= 0x04;      /* LE Connection Update
674                                                  * Complete
675                                                  */
676 
677                 /* If the controller supports the LE Read Remote Used Features
678                  * command, enable the corresponding event.
679                  */
680                 if (hdev->commands[27] & 0x20)
681                         events[0] |= 0x08;      /* LE Read Remote Used
682                                                  * Features Complete
683                                                  */
684 
685                 /* If the controller supports the LE Read Local P-256
686                  * Public Key command, enable the corresponding event.
687                  */
688                 if (hdev->commands[34] & 0x02)
689                         events[0] |= 0x80;      /* LE Read Local P-256
690                                                  * Public Key Complete
691                                                  */
692 
693                 /* If the controller supports the LE Generate DHKey
694                  * command, enable the corresponding event.
695                  */
696                 if (hdev->commands[34] & 0x04)
697                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
698 
699                 /* If the controller supports the LE Set Default PHY or
700                  * LE Set PHY commands, enable the corresponding event.
701                  */
702                 if (hdev->commands[35] & (0x20 | 0x40))
703                         events[1] |= 0x08;        /* LE PHY Update Complete */
704 
705                 /* If the controller supports LE Set Extended Scan Parameters
706                  * and LE Set Extended Scan Enable commands, enable the
707                  * corresponding event.
708                  */
709                 if (use_ext_scan(hdev))
710                         events[1] |= 0x10;      /* LE Extended Advertising
711                                                  * Report
712                                                  */
713 
714                 /* If the controller supports the LE Extended Create Connection
715                  * command, enable the corresponding event.
716                  */
717                 if (use_ext_conn(hdev))
718                         events[1] |= 0x02;      /* LE Enhanced Connection
719                                                  * Complete
720                                                  */
721 
722                 /* If the controller supports the LE Extended Advertising
723                  * command, enable the corresponding event.
724                  */
725                 if (ext_adv_capable(hdev))
726                         events[2] |= 0x02;      /* LE Advertising Set
727                                                  * Terminated
728                                                  */
729 
730                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731                             events);
732 
733                 /* Read LE Advertising Channel TX Power */
734                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735                         /* HCI TS spec forbids mixing of legacy and extended
736                          * advertising commands wherein READ_ADV_TX_POWER is
737                          * also included. So do not call it if extended adv
738                          * is supported otherwise controller will return
739                          * COMMAND_DISALLOWED for extended commands.
740                          */
741                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742                 }
743 
744                 if (hdev->commands[26] & 0x40) {
745                         /* Read LE White List Size */
746                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
747                                     0, NULL);
748                 }
749 
750                 if (hdev->commands[26] & 0x80) {
751                         /* Clear LE White List */
752                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
753                 }
754 
755                 if (hdev->commands[34] & 0x40) {
756                         /* Read LE Resolving List Size */
757                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758                                     0, NULL);
759                 }
760 
761                 if (hdev->commands[34] & 0x20) {
762                         /* Clear LE Resolving List */
763                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764                 }
765 
766                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
767                         /* Read LE Maximum Data Length */
768                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
769 
770                         /* Read LE Suggested Default Data Length */
771                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
772                 }
773 
774                 if (ext_adv_capable(hdev)) {
775                         /* Read LE Number of Supported Advertising Sets */
776                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
777                                     0, NULL);
778                 }
779 
780                 hci_set_le_support(req);
781         }
782 
783         /* Read features beyond page 1 if available */
784         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
785                 struct hci_cp_read_local_ext_features cp;
786 
787                 cp.page = p;
788                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
789                             sizeof(cp), &cp);
790         }
791 
792         return 0;
793 }
794 
795 static int hci_init4_req(struct hci_request *req, unsigned long opt)
796 {
797         struct hci_dev *hdev = req->hdev;
798 
799         /* Some Broadcom based Bluetooth controllers do not support the
800          * Delete Stored Link Key command. They are clearly indicating its
801          * absence in the bit mask of supported commands.
802          *
803          * Check the supported commands and only if the the command is marked
804          * as supported send it. If not supported assume that the controller
805          * does not have actual support for stored link keys which makes this
806          * command redundant anyway.
807          *
808          * Some controllers indicate that they support handling deleting
809          * stored link keys, but they don't. The quirk lets a driver
810          * just disable this command.
811          */
812         if (hdev->commands[6] & 0x80 &&
813             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
814                 struct hci_cp_delete_stored_link_key cp;
815 
816                 bacpy(&cp.bdaddr, BDADDR_ANY);
817                 cp.delete_all = 0x01;
818                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
819                             sizeof(cp), &cp);
820         }
821 
822         /* Set event mask page 2 if the HCI command for it is supported */
823         if (hdev->commands[22] & 0x04)
824                 hci_set_event_mask_page_2(req);
825 
826         /* Read local codec list if the HCI command is supported */
827         if (hdev->commands[29] & 0x20)
828                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
829 
830         /* Get MWS transport configuration if the HCI command is supported */
831         if (hdev->commands[30] & 0x08)
832                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
833 
834         /* Check for Synchronization Train support */
835         if (lmp_sync_train_capable(hdev))
836                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
837 
838         /* Enable Secure Connections if supported and configured */
839         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
840             bredr_sc_enabled(hdev)) {
841                 u8 support = 0x01;
842 
843                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
844                             sizeof(support), &support);
845         }
846 
847         /* Set erroneous data reporting if supported to the wideband speech
848          * setting value
849          */
850         if (hdev->commands[18] & 0x08 &&
851             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
852                 bool enabled = hci_dev_test_flag(hdev,
853                                                  HCI_WIDEBAND_SPEECH_ENABLED);
854 
855                 if (enabled !=
856                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
857                         struct hci_cp_write_def_err_data_reporting cp;
858 
859                         cp.err_data_reporting = enabled ?
860                                                 ERR_DATA_REPORTING_ENABLED :
861                                                 ERR_DATA_REPORTING_DISABLED;
862 
863                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
864                                     sizeof(cp), &cp);
865                 }
866         }
867 
868         /* Set Suggested Default Data Length to maximum if supported */
869         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
870                 struct hci_cp_le_write_def_data_len cp;
871 
872                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
873                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
874                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
875         }
876 
877         /* Set Default PHY parameters if command is supported */
878         if (hdev->commands[35] & 0x20) {
879                 struct hci_cp_le_set_default_phy cp;
880 
881                 cp.all_phys = 0x00;
882                 cp.tx_phys = hdev->le_tx_def_phys;
883                 cp.rx_phys = hdev->le_rx_def_phys;
884 
885                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
886         }
887 
888         return 0;
889 }
890 
891 static int __hci_init(struct hci_dev *hdev)
892 {
893         int err;
894 
895         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
896         if (err < 0)
897                 return err;
898 
899         if (hci_dev_test_flag(hdev, HCI_SETUP))
900                 hci_debugfs_create_basic(hdev);
901 
902         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
903         if (err < 0)
904                 return err;
905 
906         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
907          * BR/EDR/LE type controllers. AMP controllers only need the
908          * first two stages of init.
909          */
910         if (hdev->dev_type != HCI_PRIMARY)
911                 return 0;
912 
913         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
914         if (err < 0)
915                 return err;
916 
917         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
918         if (err < 0)
919                 return err;
920 
921         /* This function is only called when the controller is actually in
922          * configured state. When the controller is marked as unconfigured,
923          * this initialization procedure is not run.
924          *
925          * It means that it is possible that a controller runs through its
926          * setup phase and then discovers missing settings. If that is the
927          * case, then this function will not be called. It then will only
928          * be called during the config phase.
929          *
930          * So only when in setup phase or config phase, create the debugfs
931          * entries and register the SMP channels.
932          */
933         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
934             !hci_dev_test_flag(hdev, HCI_CONFIG))
935                 return 0;
936 
937         hci_debugfs_create_common(hdev);
938 
939         if (lmp_bredr_capable(hdev))
940                 hci_debugfs_create_bredr(hdev);
941 
942         if (lmp_le_capable(hdev))
943                 hci_debugfs_create_le(hdev);
944 
945         return 0;
946 }
947 
948 static int hci_init0_req(struct hci_request *req, unsigned long opt)
949 {
950         struct hci_dev *hdev = req->hdev;
951 
952         BT_DBG("%s %ld", hdev->name, opt);
953 
954         /* Reset */
955         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
956                 hci_reset_req(req, 0);
957 
958         /* Read Local Version */
959         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
960 
961         /* Read BD Address */
962         if (hdev->set_bdaddr)
963                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
964 
965         return 0;
966 }
967 
968 static int __hci_unconf_init(struct hci_dev *hdev)
969 {
970         int err;
971 
972         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
973                 return 0;
974 
975         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
976         if (err < 0)
977                 return err;
978 
979         if (hci_dev_test_flag(hdev, HCI_SETUP))
980                 hci_debugfs_create_basic(hdev);
981 
982         return 0;
983 }
984 
985 static int hci_scan_req(struct hci_request *req, unsigned long opt)
986 {
987         __u8 scan = opt;
988 
989         BT_DBG("%s %x", req->hdev->name, scan);
990 
991         /* Inquiry and Page scans */
992         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
993         return 0;
994 }
995 
996 static int hci_auth_req(struct hci_request *req, unsigned long opt)
997 {
998         __u8 auth = opt;
999 
1000         BT_DBG("%s %x", req->hdev->name, auth);
1001 
1002         /* Authentication */
1003         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1004         return 0;
1005 }
1006 
1007 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1008 {
1009         __u8 encrypt = opt;
1010 
1011         BT_DBG("%s %x", req->hdev->name, encrypt);
1012 
1013         /* Encryption */
1014         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1015         return 0;
1016 }
1017 
1018 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1019 {
1020         __le16 policy = cpu_to_le16(opt);
1021 
1022         BT_DBG("%s %x", req->hdev->name, policy);
1023 
1024         /* Default link policy */
1025         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1026         return 0;
1027 }
1028 
1029 /* Get HCI device by index.
1030  * Device is held on return. */
1031 struct hci_dev *hci_dev_get(int index)
1032 {
1033         struct hci_dev *hdev = NULL, *d;
1034 
1035         BT_DBG("%d", index);
1036 
1037         if (index < 0)
1038                 return NULL;
1039 
1040         read_lock(&hci_dev_list_lock);
1041         list_for_each_entry(d, &hci_dev_list, list) {
1042                 if (d->id == index) {
1043                         hdev = hci_dev_hold(d);
1044                         break;
1045                 }
1046         }
1047         read_unlock(&hci_dev_list_lock);
1048         return hdev;
1049 }
1050 
1051 /* ---- Inquiry support ---- */
1052 
1053 bool hci_discovery_active(struct hci_dev *hdev)
1054 {
1055         struct discovery_state *discov = &hdev->discovery;
1056 
1057         switch (discov->state) {
1058         case DISCOVERY_FINDING:
1059         case DISCOVERY_RESOLVING:
1060                 return true;
1061 
1062         default:
1063                 return false;
1064         }
1065 }
1066 
1067 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1068 {
1069         int old_state = hdev->discovery.state;
1070 
1071         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1072 
1073         if (old_state == state)
1074                 return;
1075 
1076         hdev->discovery.state = state;
1077 
1078         switch (state) {
1079         case DISCOVERY_STOPPED:
1080                 hci_update_background_scan(hdev);
1081 
1082                 if (old_state != DISCOVERY_STARTING)
1083                         mgmt_discovering(hdev, 0);
1084                 break;
1085         case DISCOVERY_STARTING:
1086                 break;
1087         case DISCOVERY_FINDING:
1088                 mgmt_discovering(hdev, 1);
1089                 break;
1090         case DISCOVERY_RESOLVING:
1091                 break;
1092         case DISCOVERY_STOPPING:
1093                 break;
1094         }
1095 }
1096 
1097 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1098 {
1099         struct discovery_state *cache = &hdev->discovery;
1100         struct inquiry_entry *p, *n;
1101 
1102         list_for_each_entry_safe(p, n, &cache->all, all) {
1103                 list_del(&p->all);
1104                 kfree(p);
1105         }
1106 
1107         INIT_LIST_HEAD(&cache->unknown);
1108         INIT_LIST_HEAD(&cache->resolve);
1109 }
1110 
1111 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1112                                                bdaddr_t *bdaddr)
1113 {
1114         struct discovery_state *cache = &hdev->discovery;
1115         struct inquiry_entry *e;
1116 
1117         BT_DBG("cache %p, %pMR", cache, bdaddr);
1118 
1119         list_for_each_entry(e, &cache->all, all) {
1120                 if (!bacmp(&e->data.bdaddr, bdaddr))
1121                         return e;
1122         }
1123 
1124         return NULL;
1125 }
1126 
1127 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1128                                                        bdaddr_t *bdaddr)
1129 {
1130         struct discovery_state *cache = &hdev->discovery;
1131         struct inquiry_entry *e;
1132 
1133         BT_DBG("cache %p, %pMR", cache, bdaddr);
1134 
1135         list_for_each_entry(e, &cache->unknown, list) {
1136                 if (!bacmp(&e->data.bdaddr, bdaddr))
1137                         return e;
1138         }
1139 
1140         return NULL;
1141 }
1142 
1143 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1144                                                        bdaddr_t *bdaddr,
1145                                                        int state)
1146 {
1147         struct discovery_state *cache = &hdev->discovery;
1148         struct inquiry_entry *e;
1149 
1150         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1151 
1152         list_for_each_entry(e, &cache->resolve, list) {
1153                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1154                         return e;
1155                 if (!bacmp(&e->data.bdaddr, bdaddr))
1156                         return e;
1157         }
1158 
1159         return NULL;
1160 }
1161 
1162 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1163                                       struct inquiry_entry *ie)
1164 {
1165         struct discovery_state *cache = &hdev->discovery;
1166         struct list_head *pos = &cache->resolve;
1167         struct inquiry_entry *p;
1168 
1169         list_del(&ie->list);
1170 
1171         list_for_each_entry(p, &cache->resolve, list) {
1172                 if (p->name_state != NAME_PENDING &&
1173                     abs(p->data.rssi) >= abs(ie->data.rssi))
1174                         break;
1175                 pos = &p->list;
1176         }
1177 
1178         list_add(&ie->list, pos);
1179 }
1180 
1181 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1182                              bool name_known)
1183 {
1184         struct discovery_state *cache = &hdev->discovery;
1185         struct inquiry_entry *ie;
1186         u32 flags = 0;
1187 
1188         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1189 
1190         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1191 
1192         if (!data->ssp_mode)
1193                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1194 
1195         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1196         if (ie) {
1197                 if (!ie->data.ssp_mode)
1198                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1199 
1200                 if (ie->name_state == NAME_NEEDED &&
1201                     data->rssi != ie->data.rssi) {
1202                         ie->data.rssi = data->rssi;
1203                         hci_inquiry_cache_update_resolve(hdev, ie);
1204                 }
1205 
1206                 goto update;
1207         }
1208 
1209         /* Entry not in the cache. Add new one. */
1210         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1211         if (!ie) {
1212                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1213                 goto done;
1214         }
1215 
1216         list_add(&ie->all, &cache->all);
1217 
1218         if (name_known) {
1219                 ie->name_state = NAME_KNOWN;
1220         } else {
1221                 ie->name_state = NAME_NOT_KNOWN;
1222                 list_add(&ie->list, &cache->unknown);
1223         }
1224 
1225 update:
1226         if (name_known && ie->name_state != NAME_KNOWN &&
1227             ie->name_state != NAME_PENDING) {
1228                 ie->name_state = NAME_KNOWN;
1229                 list_del(&ie->list);
1230         }
1231 
1232         memcpy(&ie->data, data, sizeof(*data));
1233         ie->timestamp = jiffies;
1234         cache->timestamp = jiffies;
1235 
1236         if (ie->name_state == NAME_NOT_KNOWN)
1237                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1238 
1239 done:
1240         return flags;
1241 }
1242 
1243 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1244 {
1245         struct discovery_state *cache = &hdev->discovery;
1246         struct inquiry_info *info = (struct inquiry_info *) buf;
1247         struct inquiry_entry *e;
1248         int copied = 0;
1249 
1250         list_for_each_entry(e, &cache->all, all) {
1251                 struct inquiry_data *data = &e->data;
1252 
1253                 if (copied >= num)
1254                         break;
1255 
1256                 bacpy(&info->bdaddr, &data->bdaddr);
1257                 info->pscan_rep_mode    = data->pscan_rep_mode;
1258                 info->pscan_period_mode = data->pscan_period_mode;
1259                 info->pscan_mode        = data->pscan_mode;
1260                 memcpy(info->dev_class, data->dev_class, 3);
1261                 info->clock_offset      = data->clock_offset;
1262 
1263                 info++;
1264                 copied++;
1265         }
1266 
1267         BT_DBG("cache %p, copied %d", cache, copied);
1268         return copied;
1269 }
1270 
1271 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1272 {
1273         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1274         struct hci_dev *hdev = req->hdev;
1275         struct hci_cp_inquiry cp;
1276 
1277         BT_DBG("%s", hdev->name);
1278 
1279         if (test_bit(HCI_INQUIRY, &hdev->flags))
1280                 return 0;
1281 
1282         /* Start Inquiry */
1283         memcpy(&cp.lap, &ir->lap, 3);
1284         cp.length  = ir->length;
1285         cp.num_rsp = ir->num_rsp;
1286         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1287 
1288         return 0;
1289 }
1290 
1291 int hci_inquiry(void __user *arg)
1292 {
1293         __u8 __user *ptr = arg;
1294         struct hci_inquiry_req ir;
1295         struct hci_dev *hdev;
1296         int err = 0, do_inquiry = 0, max_rsp;
1297         long timeo;
1298         __u8 *buf;
1299 
1300         if (copy_from_user(&ir, ptr, sizeof(ir)))
1301                 return -EFAULT;
1302 
1303         hdev = hci_dev_get(ir.dev_id);
1304         if (!hdev)
1305                 return -ENODEV;
1306 
1307         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1308                 err = -EBUSY;
1309                 goto done;
1310         }
1311 
1312         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1313                 err = -EOPNOTSUPP;
1314                 goto done;
1315         }
1316 
1317         if (hdev->dev_type != HCI_PRIMARY) {
1318                 err = -EOPNOTSUPP;
1319                 goto done;
1320         }
1321 
1322         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1323                 err = -EOPNOTSUPP;
1324                 goto done;
1325         }
1326 
1327         hci_dev_lock(hdev);
1328         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1329             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1330                 hci_inquiry_cache_flush(hdev);
1331                 do_inquiry = 1;
1332         }
1333         hci_dev_unlock(hdev);
1334 
1335         timeo = ir.length * msecs_to_jiffies(2000);
1336 
1337         if (do_inquiry) {
1338                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1339                                    timeo, NULL);
1340                 if (err < 0)
1341                         goto done;
1342 
1343                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1344                  * cleared). If it is interrupted by a signal, return -EINTR.
1345                  */
1346                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1347                                 TASK_INTERRUPTIBLE))
1348                         return -EINTR;
1349         }
1350 
1351         /* for unlimited number of responses we will use buffer with
1352          * 255 entries
1353          */
1354         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1355 
1356         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1357          * copy it to the user space.
1358          */
1359         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1360         if (!buf) {
1361                 err = -ENOMEM;
1362                 goto done;
1363         }
1364 
1365         hci_dev_lock(hdev);
1366         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1367         hci_dev_unlock(hdev);
1368 
1369         BT_DBG("num_rsp %d", ir.num_rsp);
1370 
1371         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1372                 ptr += sizeof(ir);
1373                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1374                                  ir.num_rsp))
1375                         err = -EFAULT;
1376         } else
1377                 err = -EFAULT;
1378 
1379         kfree(buf);
1380 
1381 done:
1382         hci_dev_put(hdev);
1383         return err;
1384 }
1385 
1386 /**
1387  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1388  *                                     (BD_ADDR) for a HCI device from
1389  *                                     a firmware node property.
1390  * @hdev:       The HCI device
1391  *
1392  * Search the firmware node for 'local-bd-address'.
1393  *
1394  * All-zero BD addresses are rejected, because those could be properties
1395  * that exist in the firmware tables, but were not updated by the firmware. For
1396  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1397  */
1398 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1399 {
1400         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1401         bdaddr_t ba;
1402         int ret;
1403 
1404         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1405                                             (u8 *)&ba, sizeof(ba));
1406         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1407                 return;
1408 
1409         bacpy(&hdev->public_addr, &ba);
1410 }
1411 
1412 static int hci_dev_do_open(struct hci_dev *hdev)
1413 {
1414         int ret = 0;
1415 
1416         BT_DBG("%s %p", hdev->name, hdev);
1417 
1418         hci_req_sync_lock(hdev);
1419 
1420         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1421                 ret = -ENODEV;
1422                 goto done;
1423         }
1424 
1425         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1426             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1427                 /* Check for rfkill but allow the HCI setup stage to
1428                  * proceed (which in itself doesn't cause any RF activity).
1429                  */
1430                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1431                         ret = -ERFKILL;
1432                         goto done;
1433                 }
1434 
1435                 /* Check for valid public address or a configured static
1436                  * random adddress, but let the HCI setup proceed to
1437                  * be able to determine if there is a public address
1438                  * or not.
1439                  *
1440                  * In case of user channel usage, it is not important
1441                  * if a public address or static random address is
1442                  * available.
1443                  *
1444                  * This check is only valid for BR/EDR controllers
1445                  * since AMP controllers do not have an address.
1446                  */
1447                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1448                     hdev->dev_type == HCI_PRIMARY &&
1449                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1450                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1451                         ret = -EADDRNOTAVAIL;
1452                         goto done;
1453                 }
1454         }
1455 
1456         if (test_bit(HCI_UP, &hdev->flags)) {
1457                 ret = -EALREADY;
1458                 goto done;
1459         }
1460 
1461         if (hdev->open(hdev)) {
1462                 ret = -EIO;
1463                 goto done;
1464         }
1465 
1466         set_bit(HCI_RUNNING, &hdev->flags);
1467         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1468 
1469         atomic_set(&hdev->cmd_cnt, 1);
1470         set_bit(HCI_INIT, &hdev->flags);
1471 
1472         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1473             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1474                 bool invalid_bdaddr;
1475 
1476                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1477 
1478                 if (hdev->setup)
1479                         ret = hdev->setup(hdev);
1480 
1481                 /* The transport driver can set the quirk to mark the
1482                  * BD_ADDR invalid before creating the HCI device or in
1483                  * its setup callback.
1484                  */
1485                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1486                                           &hdev->quirks);
1487 
1488                 if (ret)
1489                         goto setup_failed;
1490 
1491                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1492                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1493                                 hci_dev_get_bd_addr_from_property(hdev);
1494 
1495                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1496                             hdev->set_bdaddr) {
1497                                 ret = hdev->set_bdaddr(hdev,
1498                                                        &hdev->public_addr);
1499 
1500                                 /* If setting of the BD_ADDR from the device
1501                                  * property succeeds, then treat the address
1502                                  * as valid even if the invalid BD_ADDR
1503                                  * quirk indicates otherwise.
1504                                  */
1505                                 if (!ret)
1506                                         invalid_bdaddr = false;
1507                         }
1508                 }
1509 
1510 setup_failed:
1511                 /* The transport driver can set these quirks before
1512                  * creating the HCI device or in its setup callback.
1513                  *
1514                  * For the invalid BD_ADDR quirk it is possible that
1515                  * it becomes a valid address if the bootloader does
1516                  * provide it (see above).
1517                  *
1518                  * In case any of them is set, the controller has to
1519                  * start up as unconfigured.
1520                  */
1521                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1522                     invalid_bdaddr)
1523                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1524 
1525                 /* For an unconfigured controller it is required to
1526                  * read at least the version information provided by
1527                  * the Read Local Version Information command.
1528                  *
1529                  * If the set_bdaddr driver callback is provided, then
1530                  * also the original Bluetooth public device address
1531                  * will be read using the Read BD Address command.
1532                  */
1533                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1534                         ret = __hci_unconf_init(hdev);
1535         }
1536 
1537         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1538                 /* If public address change is configured, ensure that
1539                  * the address gets programmed. If the driver does not
1540                  * support changing the public address, fail the power
1541                  * on procedure.
1542                  */
1543                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1544                     hdev->set_bdaddr)
1545                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1546                 else
1547                         ret = -EADDRNOTAVAIL;
1548         }
1549 
1550         if (!ret) {
1551                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1552                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1553                         ret = __hci_init(hdev);
1554                         if (!ret && hdev->post_init)
1555                                 ret = hdev->post_init(hdev);
1556                 }
1557         }
1558 
1559         /* If the HCI Reset command is clearing all diagnostic settings,
1560          * then they need to be reprogrammed after the init procedure
1561          * completed.
1562          */
1563         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1564             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1565             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1566                 ret = hdev->set_diag(hdev, true);
1567 
1568         clear_bit(HCI_INIT, &hdev->flags);
1569 
1570         if (!ret) {
1571                 hci_dev_hold(hdev);
1572                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1573                 hci_adv_instances_set_rpa_expired(hdev, true);
1574                 set_bit(HCI_UP, &hdev->flags);
1575                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1576                 hci_leds_update_powered(hdev, true);
1577                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1578                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1579                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1581                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1582                     hdev->dev_type == HCI_PRIMARY) {
1583                         ret = __hci_req_hci_power_on(hdev);
1584                         mgmt_power_on(hdev, ret);
1585                 }
1586         } else {
1587                 /* Init failed, cleanup */
1588                 flush_work(&hdev->tx_work);
1589                 flush_work(&hdev->cmd_work);
1590                 flush_work(&hdev->rx_work);
1591 
1592                 skb_queue_purge(&hdev->cmd_q);
1593                 skb_queue_purge(&hdev->rx_q);
1594 
1595                 if (hdev->flush)
1596                         hdev->flush(hdev);
1597 
1598                 if (hdev->sent_cmd) {
1599                         kfree_skb(hdev->sent_cmd);
1600                         hdev->sent_cmd = NULL;
1601                 }
1602 
1603                 clear_bit(HCI_RUNNING, &hdev->flags);
1604                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1605 
1606                 hdev->close(hdev);
1607                 hdev->flags &= BIT(HCI_RAW);
1608         }
1609 
1610 done:
1611         hci_req_sync_unlock(hdev);
1612         return ret;
1613 }
1614 
1615 /* ---- HCI ioctl helpers ---- */
1616 
1617 int hci_dev_open(__u16 dev)
1618 {
1619         struct hci_dev *hdev;
1620         int err;
1621 
1622         hdev = hci_dev_get(dev);
1623         if (!hdev)
1624                 return -ENODEV;
1625 
1626         /* Devices that are marked as unconfigured can only be powered
1627          * up as user channel. Trying to bring them up as normal devices
1628          * will result into a failure. Only user channel operation is
1629          * possible.
1630          *
1631          * When this function is called for a user channel, the flag
1632          * HCI_USER_CHANNEL will be set first before attempting to
1633          * open the device.
1634          */
1635         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1636             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1637                 err = -EOPNOTSUPP;
1638                 goto done;
1639         }
1640 
1641         /* We need to ensure that no other power on/off work is pending
1642          * before proceeding to call hci_dev_do_open. This is
1643          * particularly important if the setup procedure has not yet
1644          * completed.
1645          */
1646         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1647                 cancel_delayed_work(&hdev->power_off);
1648 
1649         /* After this call it is guaranteed that the setup procedure
1650          * has finished. This means that error conditions like RFKILL
1651          * or no valid public or static random address apply.
1652          */
1653         flush_workqueue(hdev->req_workqueue);
1654 
1655         /* For controllers not using the management interface and that
1656          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1657          * so that pairing works for them. Once the management interface
1658          * is in use this bit will be cleared again and userspace has
1659          * to explicitly enable it.
1660          */
1661         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1662             !hci_dev_test_flag(hdev, HCI_MGMT))
1663                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1664 
1665         err = hci_dev_do_open(hdev);
1666 
1667 done:
1668         hci_dev_put(hdev);
1669         return err;
1670 }
1671 
1672 /* This function requires the caller holds hdev->lock */
1673 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1674 {
1675         struct hci_conn_params *p;
1676 
1677         list_for_each_entry(p, &hdev->le_conn_params, list) {
1678                 if (p->conn) {
1679                         hci_conn_drop(p->conn);
1680                         hci_conn_put(p->conn);
1681                         p->conn = NULL;
1682                 }
1683                 list_del_init(&p->action);
1684         }
1685 
1686         BT_DBG("All LE pending actions cleared");
1687 }
1688 
1689 int hci_dev_do_close(struct hci_dev *hdev)
1690 {
1691         bool auto_off;
1692 
1693         BT_DBG("%s %p", hdev->name, hdev);
1694 
1695         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1696             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1697             test_bit(HCI_UP, &hdev->flags)) {
1698                 /* Execute vendor specific shutdown routine */
1699                 if (hdev->shutdown)
1700                         hdev->shutdown(hdev);
1701         }
1702 
1703         cancel_delayed_work(&hdev->power_off);
1704 
1705         hci_request_cancel_all(hdev);
1706         hci_req_sync_lock(hdev);
1707 
1708         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1709                 cancel_delayed_work_sync(&hdev->cmd_timer);
1710                 hci_req_sync_unlock(hdev);
1711                 return 0;
1712         }
1713 
1714         hci_leds_update_powered(hdev, false);
1715 
1716         /* Flush RX and TX works */
1717         flush_work(&hdev->tx_work);
1718         flush_work(&hdev->rx_work);
1719 
1720         if (hdev->discov_timeout > 0) {
1721                 hdev->discov_timeout = 0;
1722                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1723                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1724         }
1725 
1726         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1727                 cancel_delayed_work(&hdev->service_cache);
1728 
1729         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1730                 struct adv_info *adv_instance;
1731 
1732                 cancel_delayed_work_sync(&hdev->rpa_expired);
1733 
1734                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1735                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1736         }
1737 
1738         /* Avoid potential lockdep warnings from the *_flush() calls by
1739          * ensuring the workqueue is empty up front.
1740          */
1741         drain_workqueue(hdev->workqueue);
1742 
1743         hci_dev_lock(hdev);
1744 
1745         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1746 
1747         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1748 
1749         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1750             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1751             hci_dev_test_flag(hdev, HCI_MGMT))
1752                 __mgmt_power_off(hdev);
1753 
1754         hci_inquiry_cache_flush(hdev);
1755         hci_pend_le_actions_clear(hdev);
1756         hci_conn_hash_flush(hdev);
1757         hci_dev_unlock(hdev);
1758 
1759         smp_unregister(hdev);
1760 
1761         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1762 
1763         if (hdev->flush)
1764                 hdev->flush(hdev);
1765 
1766         /* Reset device */
1767         skb_queue_purge(&hdev->cmd_q);
1768         atomic_set(&hdev->cmd_cnt, 1);
1769         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1770             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1771                 set_bit(HCI_INIT, &hdev->flags);
1772                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1773                 clear_bit(HCI_INIT, &hdev->flags);
1774         }
1775 
1776         /* flush cmd  work */
1777         flush_work(&hdev->cmd_work);
1778 
1779         /* Drop queues */
1780         skb_queue_purge(&hdev->rx_q);
1781         skb_queue_purge(&hdev->cmd_q);
1782         skb_queue_purge(&hdev->raw_q);
1783 
1784         /* Drop last sent command */
1785         if (hdev->sent_cmd) {
1786                 cancel_delayed_work_sync(&hdev->cmd_timer);
1787                 kfree_skb(hdev->sent_cmd);
1788                 hdev->sent_cmd = NULL;
1789         }
1790 
1791         clear_bit(HCI_RUNNING, &hdev->flags);
1792         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1793 
1794         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1795                 wake_up(&hdev->suspend_wait_q);
1796 
1797         /* After this point our queues are empty
1798          * and no tasks are scheduled. */
1799         hdev->close(hdev);
1800 
1801         /* Clear flags */
1802         hdev->flags &= BIT(HCI_RAW);
1803         hci_dev_clear_volatile_flags(hdev);
1804 
1805         /* Controller radio is available but is currently powered down */
1806         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1807 
1808         memset(hdev->eir, 0, sizeof(hdev->eir));
1809         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1810         bacpy(&hdev->random_addr, BDADDR_ANY);
1811 
1812         hci_req_sync_unlock(hdev);
1813 
1814         hci_dev_put(hdev);
1815         return 0;
1816 }
1817 
1818 int hci_dev_close(__u16 dev)
1819 {
1820         struct hci_dev *hdev;
1821         int err;
1822 
1823         hdev = hci_dev_get(dev);
1824         if (!hdev)
1825                 return -ENODEV;
1826 
1827         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1828                 err = -EBUSY;
1829                 goto done;
1830         }
1831 
1832         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1833                 cancel_delayed_work(&hdev->power_off);
1834 
1835         err = hci_dev_do_close(hdev);
1836 
1837 done:
1838         hci_dev_put(hdev);
1839         return err;
1840 }
1841 
1842 static int hci_dev_do_reset(struct hci_dev *hdev)
1843 {
1844         int ret;
1845 
1846         BT_DBG("%s %p", hdev->name, hdev);
1847 
1848         hci_req_sync_lock(hdev);
1849 
1850         /* Drop queues */
1851         skb_queue_purge(&hdev->rx_q);
1852         skb_queue_purge(&hdev->cmd_q);
1853 
1854         /* Avoid potential lockdep warnings from the *_flush() calls by
1855          * ensuring the workqueue is empty up front.
1856          */
1857         drain_workqueue(hdev->workqueue);
1858 
1859         hci_dev_lock(hdev);
1860         hci_inquiry_cache_flush(hdev);
1861         hci_conn_hash_flush(hdev);
1862         hci_dev_unlock(hdev);
1863 
1864         if (hdev->flush)
1865                 hdev->flush(hdev);
1866 
1867         atomic_set(&hdev->cmd_cnt, 1);
1868         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1869 
1870         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1871 
1872         hci_req_sync_unlock(hdev);
1873         return ret;
1874 }
1875 
1876 int hci_dev_reset(__u16 dev)
1877 {
1878         struct hci_dev *hdev;
1879         int err;
1880 
1881         hdev = hci_dev_get(dev);
1882         if (!hdev)
1883                 return -ENODEV;
1884 
1885         if (!test_bit(HCI_UP, &hdev->flags)) {
1886                 err = -ENETDOWN;
1887                 goto done;
1888         }
1889 
1890         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1891                 err = -EBUSY;
1892                 goto done;
1893         }
1894 
1895         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1896                 err = -EOPNOTSUPP;
1897                 goto done;
1898         }
1899 
1900         err = hci_dev_do_reset(hdev);
1901 
1902 done:
1903         hci_dev_put(hdev);
1904         return err;
1905 }
1906 
1907 int hci_dev_reset_stat(__u16 dev)
1908 {
1909         struct hci_dev *hdev;
1910         int ret = 0;
1911 
1912         hdev = hci_dev_get(dev);
1913         if (!hdev)
1914                 return -ENODEV;
1915 
1916         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1917                 ret = -EBUSY;
1918                 goto done;
1919         }
1920 
1921         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1922                 ret = -EOPNOTSUPP;
1923                 goto done;
1924         }
1925 
1926         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1927 
1928 done:
1929         hci_dev_put(hdev);
1930         return ret;
1931 }
1932 
1933 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1934 {
1935         bool conn_changed, discov_changed;
1936 
1937         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1938 
1939         if ((scan & SCAN_PAGE))
1940                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1941                                                           HCI_CONNECTABLE);
1942         else
1943                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1944                                                            HCI_CONNECTABLE);
1945 
1946         if ((scan & SCAN_INQUIRY)) {
1947                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1948                                                             HCI_DISCOVERABLE);
1949         } else {
1950                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1951                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1952                                                              HCI_DISCOVERABLE);
1953         }
1954 
1955         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1956                 return;
1957 
1958         if (conn_changed || discov_changed) {
1959                 /* In case this was disabled through mgmt */
1960                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1961 
1962                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1963                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1964 
1965                 mgmt_new_settings(hdev);
1966         }
1967 }
1968 
1969 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1970 {
1971         struct hci_dev *hdev;
1972         struct hci_dev_req dr;
1973         int err = 0;
1974 
1975         if (copy_from_user(&dr, arg, sizeof(dr)))
1976                 return -EFAULT;
1977 
1978         hdev = hci_dev_get(dr.dev_id);
1979         if (!hdev)
1980                 return -ENODEV;
1981 
1982         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1983                 err = -EBUSY;
1984                 goto done;
1985         }
1986 
1987         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1988                 err = -EOPNOTSUPP;
1989                 goto done;
1990         }
1991 
1992         if (hdev->dev_type != HCI_PRIMARY) {
1993                 err = -EOPNOTSUPP;
1994                 goto done;
1995         }
1996 
1997         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1998                 err = -EOPNOTSUPP;
1999                 goto done;
2000         }
2001 
2002         switch (cmd) {
2003         case HCISETAUTH:
2004                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2005                                    HCI_INIT_TIMEOUT, NULL);
2006                 break;
2007 
2008         case HCISETENCRYPT:
2009                 if (!lmp_encrypt_capable(hdev)) {
2010                         err = -EOPNOTSUPP;
2011                         break;
2012                 }
2013 
2014                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2015                         /* Auth must be enabled first */
2016                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2017                                            HCI_INIT_TIMEOUT, NULL);
2018                         if (err)
2019                                 break;
2020                 }
2021 
2022                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2023                                    HCI_INIT_TIMEOUT, NULL);
2024                 break;
2025 
2026         case HCISETSCAN:
2027                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2028                                    HCI_INIT_TIMEOUT, NULL);
2029 
2030                 /* Ensure that the connectable and discoverable states
2031                  * get correctly modified as this was a non-mgmt change.
2032                  */
2033                 if (!err)
2034                         hci_update_scan_state(hdev, dr.dev_opt);
2035                 break;
2036 
2037         case HCISETLINKPOL:
2038                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2039                                    HCI_INIT_TIMEOUT, NULL);
2040                 break;
2041 
2042         case HCISETLINKMODE:
2043                 hdev->link_mode = ((__u16) dr.dev_opt) &
2044                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2045                 break;
2046 
2047         case HCISETPTYPE:
2048                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2049                         break;
2050 
2051                 hdev->pkt_type = (__u16) dr.dev_opt;
2052                 mgmt_phy_configuration_changed(hdev, NULL);
2053                 break;
2054 
2055         case HCISETACLMTU:
2056                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2057                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2058                 break;
2059 
2060         case HCISETSCOMTU:
2061                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2062                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2063                 break;
2064 
2065         default:
2066                 err = -EINVAL;
2067                 break;
2068         }
2069 
2070 done:
2071         hci_dev_put(hdev);
2072         return err;
2073 }
2074 
2075 int hci_get_dev_list(void __user *arg)
2076 {
2077         struct hci_dev *hdev;
2078         struct hci_dev_list_req *dl;
2079         struct hci_dev_req *dr;
2080         int n = 0, size, err;
2081         __u16 dev_num;
2082 
2083         if (get_user(dev_num, (__u16 __user *) arg))
2084                 return -EFAULT;
2085 
2086         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2087                 return -EINVAL;
2088 
2089         size = sizeof(*dl) + dev_num * sizeof(*dr);
2090 
2091         dl = kzalloc(size, GFP_KERNEL);
2092         if (!dl)
2093                 return -ENOMEM;
2094 
2095         dr = dl->dev_req;
2096 
2097         read_lock(&hci_dev_list_lock);
2098         list_for_each_entry(hdev, &hci_dev_list, list) {
2099                 unsigned long flags = hdev->flags;
2100 
2101                 /* When the auto-off is configured it means the transport
2102                  * is running, but in that case still indicate that the
2103                  * device is actually down.
2104                  */
2105                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2106                         flags &= ~BIT(HCI_UP);
2107 
2108                 (dr + n)->dev_id  = hdev->id;
2109                 (dr + n)->dev_opt = flags;
2110 
2111                 if (++n >= dev_num)
2112                         break;
2113         }
2114         read_unlock(&hci_dev_list_lock);
2115 
2116         dl->dev_num = n;
2117         size = sizeof(*dl) + n * sizeof(*dr);
2118 
2119         err = copy_to_user(arg, dl, size);
2120         kfree(dl);
2121 
2122         return err ? -EFAULT : 0;
2123 }
2124 
2125 int hci_get_dev_info(void __user *arg)
2126 {
2127         struct hci_dev *hdev;
2128         struct hci_dev_info di;
2129         unsigned long flags;
2130         int err = 0;
2131 
2132         if (copy_from_user(&di, arg, sizeof(di)))
2133                 return -EFAULT;
2134 
2135         hdev = hci_dev_get(di.dev_id);
2136         if (!hdev)
2137                 return -ENODEV;
2138 
2139         /* When the auto-off is configured it means the transport
2140          * is running, but in that case still indicate that the
2141          * device is actually down.
2142          */
2143         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2144                 flags = hdev->flags & ~BIT(HCI_UP);
2145         else
2146                 flags = hdev->flags;
2147 
2148         strcpy(di.name, hdev->name);
2149         di.bdaddr   = hdev->bdaddr;
2150         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2151         di.flags    = flags;
2152         di.pkt_type = hdev->pkt_type;
2153         if (lmp_bredr_capable(hdev)) {
2154                 di.acl_mtu  = hdev->acl_mtu;
2155                 di.acl_pkts = hdev->acl_pkts;
2156                 di.sco_mtu  = hdev->sco_mtu;
2157                 di.sco_pkts = hdev->sco_pkts;
2158         } else {
2159                 di.acl_mtu  = hdev->le_mtu;
2160                 di.acl_pkts = hdev->le_pkts;
2161                 di.sco_mtu  = 0;
2162                 di.sco_pkts = 0;
2163         }
2164         di.link_policy = hdev->link_policy;
2165         di.link_mode   = hdev->link_mode;
2166 
2167         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2168         memcpy(&di.features, &hdev->features, sizeof(di.features));
2169 
2170         if (copy_to_user(arg, &di, sizeof(di)))
2171                 err = -EFAULT;
2172 
2173         hci_dev_put(hdev);
2174 
2175         return err;
2176 }
2177 
2178 /* ---- Interface to HCI drivers ---- */
2179 
2180 static int hci_rfkill_set_block(void *data, bool blocked)
2181 {
2182         struct hci_dev *hdev = data;
2183 
2184         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2185 
2186         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2187                 return -EBUSY;
2188 
2189         if (blocked) {
2190                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2191                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2192                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2193                         hci_dev_do_close(hdev);
2194         } else {
2195                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2196         }
2197 
2198         return 0;
2199 }
2200 
2201 static const struct rfkill_ops hci_rfkill_ops = {
2202         .set_block = hci_rfkill_set_block,
2203 };
2204 
2205 static void hci_power_on(struct work_struct *work)
2206 {
2207         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2208         int err;
2209 
2210         BT_DBG("%s", hdev->name);
2211 
2212         if (test_bit(HCI_UP, &hdev->flags) &&
2213             hci_dev_test_flag(hdev, HCI_MGMT) &&
2214             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2215                 cancel_delayed_work(&hdev->power_off);
2216                 hci_req_sync_lock(hdev);
2217                 err = __hci_req_hci_power_on(hdev);
2218                 hci_req_sync_unlock(hdev);
2219                 mgmt_power_on(hdev, err);
2220                 return;
2221         }
2222 
2223         err = hci_dev_do_open(hdev);
2224         if (err < 0) {
2225                 hci_dev_lock(hdev);
2226                 mgmt_set_powered_failed(hdev, err);
2227                 hci_dev_unlock(hdev);
2228                 return;
2229         }
2230 
2231         /* During the HCI setup phase, a few error conditions are
2232          * ignored and they need to be checked now. If they are still
2233          * valid, it is important to turn the device back off.
2234          */
2235         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2236             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2237             (hdev->dev_type == HCI_PRIMARY &&
2238              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2239              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2240                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2241                 hci_dev_do_close(hdev);
2242         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2243                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2244                                    HCI_AUTO_OFF_TIMEOUT);
2245         }
2246 
2247         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2248                 /* For unconfigured devices, set the HCI_RAW flag
2249                  * so that userspace can easily identify them.
2250                  */
2251                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2252                         set_bit(HCI_RAW, &hdev->flags);
2253 
2254                 /* For fully configured devices, this will send
2255                  * the Index Added event. For unconfigured devices,
2256                  * it will send Unconfigued Index Added event.
2257                  *
2258                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2259                  * and no event will be send.
2260                  */
2261                 mgmt_index_added(hdev);
2262         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2263                 /* When the controller is now configured, then it
2264                  * is important to clear the HCI_RAW flag.
2265                  */
2266                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2267                         clear_bit(HCI_RAW, &hdev->flags);
2268 
2269                 /* Powering on the controller with HCI_CONFIG set only
2270                  * happens with the transition from unconfigured to
2271                  * configured. This will send the Index Added event.
2272                  */
2273                 mgmt_index_added(hdev);
2274         }
2275 }
2276 
2277 static void hci_power_off(struct work_struct *work)
2278 {
2279         struct hci_dev *hdev = container_of(work, struct hci_dev,
2280                                             power_off.work);
2281 
2282         BT_DBG("%s", hdev->name);
2283 
2284         hci_dev_do_close(hdev);
2285 }
2286 
2287 static void hci_error_reset(struct work_struct *work)
2288 {
2289         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2290 
2291         BT_DBG("%s", hdev->name);
2292 
2293         if (hdev->hw_error)
2294                 hdev->hw_error(hdev, hdev->hw_error_code);
2295         else
2296                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2297 
2298         if (hci_dev_do_close(hdev))
2299                 return;
2300 
2301         hci_dev_do_open(hdev);
2302 }
2303 
2304 void hci_uuids_clear(struct hci_dev *hdev)
2305 {
2306         struct bt_uuid *uuid, *tmp;
2307 
2308         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2309                 list_del(&uuid->list);
2310                 kfree(uuid);
2311         }
2312 }
2313 
2314 void hci_link_keys_clear(struct hci_dev *hdev)
2315 {
2316         struct link_key *key;
2317 
2318         list_for_each_entry(key, &hdev->link_keys, list) {
2319                 list_del_rcu(&key->list);
2320                 kfree_rcu(key, rcu);
2321         }
2322 }
2323 
2324 void hci_smp_ltks_clear(struct hci_dev *hdev)
2325 {
2326         struct smp_ltk *k;
2327 
2328         list_for_each_entry(k, &hdev->long_term_keys, list) {
2329                 list_del_rcu(&k->list);
2330                 kfree_rcu(k, rcu);
2331         }
2332 }
2333 
2334 void hci_smp_irks_clear(struct hci_dev *hdev)
2335 {
2336         struct smp_irk *k;
2337 
2338         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2339                 list_del_rcu(&k->list);
2340                 kfree_rcu(k, rcu);
2341         }
2342 }
2343 
2344 void hci_blocked_keys_clear(struct hci_dev *hdev)
2345 {
2346         struct blocked_key *b;
2347 
2348         list_for_each_entry(b, &hdev->blocked_keys, list) {
2349                 list_del_rcu(&b->list);
2350                 kfree_rcu(b, rcu);
2351         }
2352 }
2353 
2354 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2355 {
2356         bool blocked = false;
2357         struct blocked_key *b;
2358 
2359         rcu_read_lock();
2360         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2361                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2362                         blocked = true;
2363                         break;
2364                 }
2365         }
2366 
2367         rcu_read_unlock();
2368         return blocked;
2369 }
2370 
2371 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2372 {
2373         struct link_key *k;
2374 
2375         rcu_read_lock();
2376         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2377                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2378                         rcu_read_unlock();
2379 
2380                         if (hci_is_blocked_key(hdev,
2381                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2382                                                k->val)) {
2383                                 bt_dev_warn_ratelimited(hdev,
2384                                                         "Link key blocked for %pMR",
2385                                                         &k->bdaddr);
2386                                 return NULL;
2387                         }
2388 
2389                         return k;
2390                 }
2391         }
2392         rcu_read_unlock();
2393 
2394         return NULL;
2395 }
2396 
2397 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2398                                u8 key_type, u8 old_key_type)
2399 {
2400         /* Legacy key */
2401         if (key_type < 0x03)
2402                 return true;
2403 
2404         /* Debug keys are insecure so don't store them persistently */
2405         if (key_type == HCI_LK_DEBUG_COMBINATION)
2406                 return false;
2407 
2408         /* Changed combination key and there's no previous one */
2409         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2410                 return false;
2411 
2412         /* Security mode 3 case */
2413         if (!conn)
2414                 return true;
2415 
2416         /* BR/EDR key derived using SC from an LE link */
2417         if (conn->type == LE_LINK)
2418                 return true;
2419 
2420         /* Neither local nor remote side had no-bonding as requirement */
2421         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2422                 return true;
2423 
2424         /* Local side had dedicated bonding as requirement */
2425         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2426                 return true;
2427 
2428         /* Remote side had dedicated bonding as requirement */
2429         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2430                 return true;
2431 
2432         /* If none of the above criteria match, then don't store the key
2433          * persistently */
2434         return false;
2435 }
2436 
2437 static u8 ltk_role(u8 type)
2438 {
2439         if (type == SMP_LTK)
2440                 return HCI_ROLE_MASTER;
2441 
2442         return HCI_ROLE_SLAVE;
2443 }
2444 
2445 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2446                              u8 addr_type, u8 role)
2447 {
2448         struct smp_ltk *k;
2449 
2450         rcu_read_lock();
2451         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2452                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2453                         continue;
2454 
2455                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2456                         rcu_read_unlock();
2457 
2458                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2459                                                k->val)) {
2460                                 bt_dev_warn_ratelimited(hdev,
2461                                                         "LTK blocked for %pMR",
2462                                                         &k->bdaddr);
2463                                 return NULL;
2464                         }
2465 
2466                         return k;
2467                 }
2468         }
2469         rcu_read_unlock();
2470 
2471         return NULL;
2472 }
2473 
2474 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2475 {
2476         struct smp_irk *irk_to_return = NULL;
2477         struct smp_irk *irk;
2478 
2479         rcu_read_lock();
2480         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2481                 if (!bacmp(&irk->rpa, rpa)) {
2482                         irk_to_return = irk;
2483                         goto done;
2484                 }
2485         }
2486 
2487         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2488                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2489                         bacpy(&irk->rpa, rpa);
2490                         irk_to_return = irk;
2491                         goto done;
2492                 }
2493         }
2494 
2495 done:
2496         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2497                                                 irk_to_return->val)) {
2498                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2499                                         &irk_to_return->bdaddr);
2500                 irk_to_return = NULL;
2501         }
2502 
2503         rcu_read_unlock();
2504 
2505         return irk_to_return;
2506 }
2507 
2508 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2509                                      u8 addr_type)
2510 {
2511         struct smp_irk *irk_to_return = NULL;
2512         struct smp_irk *irk;
2513 
2514         /* Identity Address must be public or static random */
2515         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2516                 return NULL;
2517 
2518         rcu_read_lock();
2519         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2520                 if (addr_type == irk->addr_type &&
2521                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2522                         irk_to_return = irk;
2523                         goto done;
2524                 }
2525         }
2526 
2527 done:
2528 
2529         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2530                                                 irk_to_return->val)) {
2531                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2532                                         &irk_to_return->bdaddr);
2533                 irk_to_return = NULL;
2534         }
2535 
2536         rcu_read_unlock();
2537 
2538         return irk_to_return;
2539 }
2540 
2541 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2542                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2543                                   u8 pin_len, bool *persistent)
2544 {
2545         struct link_key *key, *old_key;
2546         u8 old_key_type;
2547 
2548         old_key = hci_find_link_key(hdev, bdaddr);
2549         if (old_key) {
2550                 old_key_type = old_key->type;
2551                 key = old_key;
2552         } else {
2553                 old_key_type = conn ? conn->key_type : 0xff;
2554                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2555                 if (!key)
2556                         return NULL;
2557                 list_add_rcu(&key->list, &hdev->link_keys);
2558         }
2559 
2560         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2561 
2562         /* Some buggy controller combinations generate a changed
2563          * combination key for legacy pairing even when there's no
2564          * previous key */
2565         if (type == HCI_LK_CHANGED_COMBINATION &&
2566             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2567                 type = HCI_LK_COMBINATION;
2568                 if (conn)
2569                         conn->key_type = type;
2570         }
2571 
2572         bacpy(&key->bdaddr, bdaddr);
2573         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2574         key->pin_len = pin_len;
2575 
2576         if (type == HCI_LK_CHANGED_COMBINATION)
2577                 key->type = old_key_type;
2578         else
2579                 key->type = type;
2580 
2581         if (persistent)
2582                 *persistent = hci_persistent_key(hdev, conn, type,
2583                                                  old_key_type);
2584 
2585         return key;
2586 }
2587 
2588 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2589                             u8 addr_type, u8 type, u8 authenticated,
2590                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2591 {
2592         struct smp_ltk *key, *old_key;
2593         u8 role = ltk_role(type);
2594 
2595         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2596         if (old_key)
2597                 key = old_key;
2598         else {
2599                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2600                 if (!key)
2601                         return NULL;
2602                 list_add_rcu(&key->list, &hdev->long_term_keys);
2603         }
2604 
2605         bacpy(&key->bdaddr, bdaddr);
2606         key->bdaddr_type = addr_type;
2607         memcpy(key->val, tk, sizeof(key->val));
2608         key->authenticated = authenticated;
2609         key->ediv = ediv;
2610         key->rand = rand;
2611         key->enc_size = enc_size;
2612         key->type = type;
2613 
2614         return key;
2615 }
2616 
2617 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2618                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2619 {
2620         struct smp_irk *irk;
2621 
2622         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2623         if (!irk) {
2624                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2625                 if (!irk)
2626                         return NULL;
2627 
2628                 bacpy(&irk->bdaddr, bdaddr);
2629                 irk->addr_type = addr_type;
2630 
2631                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2632         }
2633 
2634         memcpy(irk->val, val, 16);
2635         bacpy(&irk->rpa, rpa);
2636 
2637         return irk;
2638 }
2639 
2640 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2641 {
2642         struct link_key *key;
2643 
2644         key = hci_find_link_key(hdev, bdaddr);
2645         if (!key)
2646                 return -ENOENT;
2647 
2648         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2649 
2650         list_del_rcu(&key->list);
2651         kfree_rcu(key, rcu);
2652 
2653         return 0;
2654 }
2655 
2656 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2657 {
2658         struct smp_ltk *k;
2659         int removed = 0;
2660 
2661         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2662                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2663                         continue;
2664 
2665                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2666 
2667                 list_del_rcu(&k->list);
2668                 kfree_rcu(k, rcu);
2669                 removed++;
2670         }
2671 
2672         return removed ? 0 : -ENOENT;
2673 }
2674 
2675 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2676 {
2677         struct smp_irk *k;
2678 
2679         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2680                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2681                         continue;
2682 
2683                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2684 
2685                 list_del_rcu(&k->list);
2686                 kfree_rcu(k, rcu);
2687         }
2688 }
2689 
2690 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2691 {
2692         struct smp_ltk *k;
2693         struct smp_irk *irk;
2694         u8 addr_type;
2695 
2696         if (type == BDADDR_BREDR) {
2697                 if (hci_find_link_key(hdev, bdaddr))
2698                         return true;
2699                 return false;
2700         }
2701 
2702         /* Convert to HCI addr type which struct smp_ltk uses */
2703         if (type == BDADDR_LE_PUBLIC)
2704                 addr_type = ADDR_LE_DEV_PUBLIC;
2705         else
2706                 addr_type = ADDR_LE_DEV_RANDOM;
2707 
2708         irk = hci_get_irk(hdev, bdaddr, addr_type);
2709         if (irk) {
2710                 bdaddr = &irk->bdaddr;
2711                 addr_type = irk->addr_type;
2712         }
2713 
2714         rcu_read_lock();
2715         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2716                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2717                         rcu_read_unlock();
2718                         return true;
2719                 }
2720         }
2721         rcu_read_unlock();
2722 
2723         return false;
2724 }
2725 
2726 /* HCI command timer function */
2727 static void hci_cmd_timeout(struct work_struct *work)
2728 {
2729         struct hci_dev *hdev = container_of(work, struct hci_dev,
2730                                             cmd_timer.work);
2731 
2732         if (hdev->sent_cmd) {
2733                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2734                 u16 opcode = __le16_to_cpu(sent->opcode);
2735 
2736                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2737         } else {
2738                 bt_dev_err(hdev, "command tx timeout");
2739         }
2740 
2741         if (hdev->cmd_timeout)
2742                 hdev->cmd_timeout(hdev);
2743 
2744         atomic_set(&hdev->cmd_cnt, 1);
2745         queue_work(hdev->workqueue, &hdev->cmd_work);
2746 }
2747 
2748 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2749                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2750 {
2751         struct oob_data *data;
2752 
2753         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2754                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2755                         continue;
2756                 if (data->bdaddr_type != bdaddr_type)
2757                         continue;
2758                 return data;
2759         }
2760 
2761         return NULL;
2762 }
2763 
2764 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2765                                u8 bdaddr_type)
2766 {
2767         struct oob_data *data;
2768 
2769         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2770         if (!data)
2771                 return -ENOENT;
2772 
2773         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2774 
2775         list_del(&data->list);
2776         kfree(data);
2777 
2778         return 0;
2779 }
2780 
2781 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2782 {
2783         struct oob_data *data, *n;
2784 
2785         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2786                 list_del(&data->list);
2787                 kfree(data);
2788         }
2789 }
2790 
2791 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2792                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2793                             u8 *hash256, u8 *rand256)
2794 {
2795         struct oob_data *data;
2796 
2797         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2798         if (!data) {
2799                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2800                 if (!data)
2801                         return -ENOMEM;
2802 
2803                 bacpy(&data->bdaddr, bdaddr);
2804                 data->bdaddr_type = bdaddr_type;
2805                 list_add(&data->list, &hdev->remote_oob_data);
2806         }
2807 
2808         if (hash192 && rand192) {
2809                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2810                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2811                 if (hash256 && rand256)
2812                         data->present = 0x03;
2813         } else {
2814                 memset(data->hash192, 0, sizeof(data->hash192));
2815                 memset(data->rand192, 0, sizeof(data->rand192));
2816                 if (hash256 && rand256)
2817                         data->present = 0x02;
2818                 else
2819                         data->present = 0x00;
2820         }
2821 
2822         if (hash256 && rand256) {
2823                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2824                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2825         } else {
2826                 memset(data->hash256, 0, sizeof(data->hash256));
2827                 memset(data->rand256, 0, sizeof(data->rand256));
2828                 if (hash192 && rand192)
2829                         data->present = 0x01;
2830         }
2831 
2832         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2833 
2834         return 0;
2835 }
2836 
2837 /* This function requires the caller holds hdev->lock */
2838 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2839 {
2840         struct adv_info *adv_instance;
2841 
2842         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2843                 if (adv_instance->instance == instance)
2844                         return adv_instance;
2845         }
2846 
2847         return NULL;
2848 }
2849 
2850 /* This function requires the caller holds hdev->lock */
2851 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2852 {
2853         struct adv_info *cur_instance;
2854 
2855         cur_instance = hci_find_adv_instance(hdev, instance);
2856         if (!cur_instance)
2857                 return NULL;
2858 
2859         if (cur_instance == list_last_entry(&hdev->adv_instances,
2860                                             struct adv_info, list))
2861                 return list_first_entry(&hdev->adv_instances,
2862                                                  struct adv_info, list);
2863         else
2864                 return list_next_entry(cur_instance, list);
2865 }
2866 
2867 /* This function requires the caller holds hdev->lock */
2868 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2869 {
2870         struct adv_info *adv_instance;
2871 
2872         adv_instance = hci_find_adv_instance(hdev, instance);
2873         if (!adv_instance)
2874                 return -ENOENT;
2875 
2876         BT_DBG("%s removing %dMR", hdev->name, instance);
2877 
2878         if (hdev->cur_adv_instance == instance) {
2879                 if (hdev->adv_instance_timeout) {
2880                         cancel_delayed_work(&hdev->adv_instance_expire);
2881                         hdev->adv_instance_timeout = 0;
2882                 }
2883                 hdev->cur_adv_instance = 0x00;
2884         }
2885 
2886         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2887 
2888         list_del(&adv_instance->list);
2889         kfree(adv_instance);
2890 
2891         hdev->adv_instance_cnt--;
2892 
2893         return 0;
2894 }
2895 
2896 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2897 {
2898         struct adv_info *adv_instance, *n;
2899 
2900         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2901                 adv_instance->rpa_expired = rpa_expired;
2902 }
2903 
2904 /* This function requires the caller holds hdev->lock */
2905 void hci_adv_instances_clear(struct hci_dev *hdev)
2906 {
2907         struct adv_info *adv_instance, *n;
2908 
2909         if (hdev->adv_instance_timeout) {
2910                 cancel_delayed_work(&hdev->adv_instance_expire);
2911                 hdev->adv_instance_timeout = 0;
2912         }
2913 
2914         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2915                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2916                 list_del(&adv_instance->list);
2917                 kfree(adv_instance);
2918         }
2919 
2920         hdev->adv_instance_cnt = 0;
2921         hdev->cur_adv_instance = 0x00;
2922 }
2923 
2924 static void adv_instance_rpa_expired(struct work_struct *work)
2925 {
2926         struct adv_info *adv_instance = container_of(work, struct adv_info,
2927                                                      rpa_expired_cb.work);
2928 
2929         BT_DBG("");
2930 
2931         adv_instance->rpa_expired = true;
2932 }
2933 
2934 /* This function requires the caller holds hdev->lock */
2935 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2936                          u16 adv_data_len, u8 *adv_data,
2937                          u16 scan_rsp_len, u8 *scan_rsp_data,
2938                          u16 timeout, u16 duration)
2939 {
2940         struct adv_info *adv_instance;
2941 
2942         adv_instance = hci_find_adv_instance(hdev, instance);
2943         if (adv_instance) {
2944                 memset(adv_instance->adv_data, 0,
2945                        sizeof(adv_instance->adv_data));
2946                 memset(adv_instance->scan_rsp_data, 0,
2947                        sizeof(adv_instance->scan_rsp_data));
2948         } else {
2949                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2950                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2951                         return -EOVERFLOW;
2952 
2953                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2954                 if (!adv_instance)
2955                         return -ENOMEM;
2956 
2957                 adv_instance->pending = true;
2958                 adv_instance->instance = instance;
2959                 list_add(&adv_instance->list, &hdev->adv_instances);
2960                 hdev->adv_instance_cnt++;
2961         }
2962 
2963         adv_instance->flags = flags;
2964         adv_instance->adv_data_len = adv_data_len;
2965         adv_instance->scan_rsp_len = scan_rsp_len;
2966 
2967         if (adv_data_len)
2968                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2969 
2970         if (scan_rsp_len)
2971                 memcpy(adv_instance->scan_rsp_data,
2972                        scan_rsp_data, scan_rsp_len);
2973 
2974         adv_instance->timeout = timeout;
2975         adv_instance->remaining_time = timeout;
2976 
2977         if (duration == 0)
2978                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2979         else
2980                 adv_instance->duration = duration;
2981 
2982         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2983 
2984         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2985                           adv_instance_rpa_expired);
2986 
2987         BT_DBG("%s for %dMR", hdev->name, instance);
2988 
2989         return 0;
2990 }
2991 
2992 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2993                                          bdaddr_t *bdaddr, u8 type)
2994 {
2995         struct bdaddr_list *b;
2996 
2997         list_for_each_entry(b, bdaddr_list, list) {
2998                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2999                         return b;
3000         }
3001 
3002         return NULL;
3003 }
3004 
3005 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3006                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3007                                 u8 type)
3008 {
3009         struct bdaddr_list_with_irk *b;
3010 
3011         list_for_each_entry(b, bdaddr_list, list) {
3012                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3013                         return b;
3014         }
3015 
3016         return NULL;
3017 }
3018 
3019 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3020 {
3021         struct bdaddr_list *b, *n;
3022 
3023         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3024                 list_del(&b->list);
3025                 kfree(b);
3026         }
3027 }
3028 
3029 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3030 {
3031         struct bdaddr_list *entry;
3032 
3033         if (!bacmp(bdaddr, BDADDR_ANY))
3034                 return -EBADF;
3035 
3036         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3037                 return -EEXIST;
3038 
3039         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3040         if (!entry)
3041                 return -ENOMEM;
3042 
3043         bacpy(&entry->bdaddr, bdaddr);
3044         entry->bdaddr_type = type;
3045 
3046         list_add(&entry->list, list);
3047 
3048         return 0;
3049 }
3050 
3051 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3052                                         u8 type, u8 *peer_irk, u8 *local_irk)
3053 {
3054         struct bdaddr_list_with_irk *entry;
3055 
3056         if (!bacmp(bdaddr, BDADDR_ANY))
3057                 return -EBADF;
3058 
3059         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3060                 return -EEXIST;
3061 
3062         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3063         if (!entry)
3064                 return -ENOMEM;
3065 
3066         bacpy(&entry->bdaddr, bdaddr);
3067         entry->bdaddr_type = type;
3068 
3069         if (peer_irk)
3070                 memcpy(entry->peer_irk, peer_irk, 16);
3071 
3072         if (local_irk)
3073                 memcpy(entry->local_irk, local_irk, 16);
3074 
3075         list_add(&entry->list, list);
3076 
3077         return 0;
3078 }
3079 
3080 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3081 {
3082         struct bdaddr_list *entry;
3083 
3084         if (!bacmp(bdaddr, BDADDR_ANY)) {
3085                 hci_bdaddr_list_clear(list);
3086                 return 0;
3087         }
3088 
3089         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3090         if (!entry)
3091                 return -ENOENT;
3092 
3093         list_del(&entry->list);
3094         kfree(entry);
3095 
3096         return 0;
3097 }
3098 
3099 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3100                                                         u8 type)
3101 {
3102         struct bdaddr_list_with_irk *entry;
3103 
3104         if (!bacmp(bdaddr, BDADDR_ANY)) {
3105                 hci_bdaddr_list_clear(list);
3106                 return 0;
3107         }
3108 
3109         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3110         if (!entry)
3111                 return -ENOENT;
3112 
3113         list_del(&entry->list);
3114         kfree(entry);
3115 
3116         return 0;
3117 }
3118 
3119 /* This function requires the caller holds hdev->lock */
3120 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3121                                                bdaddr_t *addr, u8 addr_type)
3122 {
3123         struct hci_conn_params *params;
3124 
3125         list_for_each_entry(params, &hdev->le_conn_params, list) {
3126                 if (bacmp(&params->addr, addr) == 0 &&
3127                     params->addr_type == addr_type) {
3128                         return params;
3129                 }
3130         }
3131 
3132         return NULL;
3133 }
3134 
3135 /* This function requires the caller holds hdev->lock */
3136 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3137                                                   bdaddr_t *addr, u8 addr_type)
3138 {
3139         struct hci_conn_params *param;
3140 
3141         list_for_each_entry(param, list, action) {
3142                 if (bacmp(&param->addr, addr) == 0 &&
3143                     param->addr_type == addr_type)
3144                         return param;
3145         }
3146 
3147         return NULL;
3148 }
3149 
3150 /* This function requires the caller holds hdev->lock */
3151 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3152                                             bdaddr_t *addr, u8 addr_type)
3153 {
3154         struct hci_conn_params *params;
3155 
3156         params = hci_conn_params_lookup(hdev, addr, addr_type);
3157         if (params)
3158                 return params;
3159 
3160         params = kzalloc(sizeof(*params), GFP_KERNEL);
3161         if (!params) {
3162                 bt_dev_err(hdev, "out of memory");
3163                 return NULL;
3164         }
3165 
3166         bacpy(&params->addr, addr);
3167         params->addr_type = addr_type;
3168 
3169         list_add(&params->list, &hdev->le_conn_params);
3170         INIT_LIST_HEAD(&params->action);
3171 
3172         params->conn_min_interval = hdev->le_conn_min_interval;
3173         params->conn_max_interval = hdev->le_conn_max_interval;
3174         params->conn_latency = hdev->le_conn_latency;
3175         params->supervision_timeout = hdev->le_supv_timeout;
3176         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3177 
3178         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3179 
3180         return params;
3181 }
3182 
3183 static void hci_conn_params_free(struct hci_conn_params *params)
3184 {
3185         if (params->conn) {
3186                 hci_conn_drop(params->conn);
3187                 hci_conn_put(params->conn);
3188         }
3189 
3190         list_del(&params->action);
3191         list_del(&params->list);
3192         kfree(params);
3193 }
3194 
3195 /* This function requires the caller holds hdev->lock */
3196 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3197 {
3198         struct hci_conn_params *params;
3199 
3200         params = hci_conn_params_lookup(hdev, addr, addr_type);
3201         if (!params)
3202                 return;
3203 
3204         hci_conn_params_free(params);
3205 
3206         hci_update_background_scan(hdev);
3207 
3208         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3209 }
3210 
3211 /* This function requires the caller holds hdev->lock */
3212 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3213 {
3214         struct hci_conn_params *params, *tmp;
3215 
3216         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3217                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3218                         continue;
3219 
3220                 /* If trying to estabilish one time connection to disabled
3221                  * device, leave the params, but mark them as just once.
3222                  */
3223                 if (params->explicit_connect) {
3224                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3225                         continue;
3226                 }
3227 
3228                 list_del(&params->list);
3229                 kfree(params);
3230         }
3231 
3232         BT_DBG("All LE disabled connection parameters were removed");
3233 }
3234 
3235 /* This function requires the caller holds hdev->lock */
3236 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3237 {
3238         struct hci_conn_params *params, *tmp;
3239 
3240         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3241                 hci_conn_params_free(params);
3242 
3243         BT_DBG("All LE connection parameters were removed");
3244 }
3245 
3246 /* Copy the Identity Address of the controller.
3247  *
3248  * If the controller has a public BD_ADDR, then by default use that one.
3249  * If this is a LE only controller without a public address, default to
3250  * the static random address.
3251  *
3252  * For debugging purposes it is possible to force controllers with a
3253  * public address to use the static random address instead.
3254  *
3255  * In case BR/EDR has been disabled on a dual-mode controller and
3256  * userspace has configured a static address, then that address
3257  * becomes the identity address instead of the public BR/EDR address.
3258  */
3259 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3260                                u8 *bdaddr_type)
3261 {
3262         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3263             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3264             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3265              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3266                 bacpy(bdaddr, &hdev->static_addr);
3267                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3268         } else {
3269                 bacpy(bdaddr, &hdev->bdaddr);
3270                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3271         }
3272 }
3273 
3274 static int hci_suspend_wait_event(struct hci_dev *hdev)
3275 {
3276 #define WAKE_COND                                                              \
3277         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3278          __SUSPEND_NUM_TASKS)
3279 
3280         int i;
3281         int ret = wait_event_timeout(hdev->suspend_wait_q,
3282                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3283 
3284         if (ret == 0) {
3285                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3286                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3287                         if (test_bit(i, hdev->suspend_tasks))
3288                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3289                         clear_bit(i, hdev->suspend_tasks);
3290                 }
3291 
3292                 ret = -ETIMEDOUT;
3293         } else {
3294                 ret = 0;
3295         }
3296 
3297         return ret;
3298 }
3299 
3300 static void hci_prepare_suspend(struct work_struct *work)
3301 {
3302         struct hci_dev *hdev =
3303                 container_of(work, struct hci_dev, suspend_prepare);
3304 
3305         hci_dev_lock(hdev);
3306         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3307         hci_dev_unlock(hdev);
3308 }
3309 
3310 static int hci_change_suspend_state(struct hci_dev *hdev,
3311                                     enum suspended_state next)
3312 {
3313         hdev->suspend_state_next = next;
3314         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3315         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3316         return hci_suspend_wait_event(hdev);
3317 }
3318 
3319 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3320                                 void *data)
3321 {
3322         struct hci_dev *hdev =
3323                 container_of(nb, struct hci_dev, suspend_notifier);
3324         int ret = 0;
3325 
3326         /* If powering down, wait for completion. */
3327         if (mgmt_powering_down(hdev)) {
3328                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3329                 ret = hci_suspend_wait_event(hdev);
3330                 if (ret)
3331                         goto done;
3332         }
3333 
3334         /* Suspend notifier should only act on events when powered. */
3335         if (!hdev_is_powered(hdev))
3336                 goto done;
3337 
3338         if (action == PM_SUSPEND_PREPARE) {
3339                 /* Suspend consists of two actions:
3340                  *  - First, disconnect everything and make the controller not
3341                  *    connectable (disabling scanning)
3342                  *  - Second, program event filter/whitelist and enable scan
3343                  */
3344                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3345 
3346                 /* Only configure whitelist if disconnect succeeded */
3347                 if (!ret)
3348                         ret = hci_change_suspend_state(hdev,
3349                                                        BT_SUSPEND_COMPLETE);
3350         } else if (action == PM_POST_SUSPEND) {
3351                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3352         }
3353 
3354 done:
3355         /* We always allow suspend even if suspend preparation failed and
3356          * attempt to recover in resume.
3357          */
3358         if (ret)
3359                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3360                            action, ret);
3361 
3362         return NOTIFY_STOP;
3363 }
3364 
3365 /* Alloc HCI device */
3366 struct hci_dev *hci_alloc_dev(void)
3367 {
3368         struct hci_dev *hdev;
3369 
3370         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3371         if (!hdev)
3372                 return NULL;
3373 
3374         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3375         hdev->esco_type = (ESCO_HV1);
3376         hdev->link_mode = (HCI_LM_ACCEPT);
3377         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3378         hdev->io_capability = 0x03;     /* No Input No Output */
3379         hdev->manufacturer = 0xffff;    /* Default to internal use */
3380         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3381         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3382         hdev->adv_instance_cnt = 0;
3383         hdev->cur_adv_instance = 0x00;
3384         hdev->adv_instance_timeout = 0;
3385 
3386         hdev->sniff_max_interval = 800;
3387         hdev->sniff_min_interval = 80;
3388 
3389         hdev->le_adv_channel_map = 0x07;
3390         hdev->le_adv_min_interval = 0x0800;
3391         hdev->le_adv_max_interval = 0x0800;
3392         hdev->le_scan_interval = 0x0060;
3393         hdev->le_scan_window = 0x0030;
3394         hdev->le_conn_min_interval = 0x0018;
3395         hdev->le_conn_max_interval = 0x0028;
3396         hdev->le_conn_latency = 0x0000;
3397         hdev->le_supv_timeout = 0x002a;
3398         hdev->le_def_tx_len = 0x001b;
3399         hdev->le_def_tx_time = 0x0148;
3400         hdev->le_max_tx_len = 0x001b;
3401         hdev->le_max_tx_time = 0x0148;
3402         hdev->le_max_rx_len = 0x001b;
3403         hdev->le_max_rx_time = 0x0148;
3404         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3405         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3406         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3407         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3408         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3409 
3410         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3411         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3412         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3413         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3414         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3415         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3416 
3417         mutex_init(&hdev->lock);
3418         mutex_init(&hdev->req_lock);
3419 
3420         INIT_LIST_HEAD(&hdev->mgmt_pending);
3421         INIT_LIST_HEAD(&hdev->blacklist);
3422         INIT_LIST_HEAD(&hdev->whitelist);
3423         INIT_LIST_HEAD(&hdev->wakeable);
3424         INIT_LIST_HEAD(&hdev->uuids);
3425         INIT_LIST_HEAD(&hdev->link_keys);
3426         INIT_LIST_HEAD(&hdev->long_term_keys);
3427         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3428         INIT_LIST_HEAD(&hdev->remote_oob_data);
3429         INIT_LIST_HEAD(&hdev->le_white_list);
3430         INIT_LIST_HEAD(&hdev->le_resolv_list);
3431         INIT_LIST_HEAD(&hdev->le_conn_params);
3432         INIT_LIST_HEAD(&hdev->pend_le_conns);
3433         INIT_LIST_HEAD(&hdev->pend_le_reports);
3434         INIT_LIST_HEAD(&hdev->conn_hash.list);
3435         INIT_LIST_HEAD(&hdev->adv_instances);
3436         INIT_LIST_HEAD(&hdev->blocked_keys);
3437 
3438         INIT_WORK(&hdev->rx_work, hci_rx_work);
3439         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3440         INIT_WORK(&hdev->tx_work, hci_tx_work);
3441         INIT_WORK(&hdev->power_on, hci_power_on);
3442         INIT_WORK(&hdev->error_reset, hci_error_reset);
3443         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3444 
3445         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3446 
3447         skb_queue_head_init(&hdev->rx_q);
3448         skb_queue_head_init(&hdev->cmd_q);
3449         skb_queue_head_init(&hdev->raw_q);
3450 
3451         init_waitqueue_head(&hdev->req_wait_q);
3452         init_waitqueue_head(&hdev->suspend_wait_q);
3453 
3454         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3455 
3456         hci_request_setup(hdev);
3457 
3458         hci_init_sysfs(hdev);
3459         discovery_init(hdev);
3460 
3461         return hdev;
3462 }
3463 EXPORT_SYMBOL(hci_alloc_dev);
3464 
3465 /* Free HCI device */
3466 void hci_free_dev(struct hci_dev *hdev)
3467 {
3468         /* will free via device release */
3469         put_device(&hdev->dev);
3470 }
3471 EXPORT_SYMBOL(hci_free_dev);
3472 
3473 /* Register HCI device */
3474 int hci_register_dev(struct hci_dev *hdev)
3475 {
3476         int id, error;
3477 
3478         if (!hdev->open || !hdev->close || !hdev->send)
3479                 return -EINVAL;
3480 
3481         /* Do not allow HCI_AMP devices to register at index 0,
3482          * so the index can be used as the AMP controller ID.
3483          */
3484         switch (hdev->dev_type) {
3485         case HCI_PRIMARY:
3486                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3487                 break;
3488         case HCI_AMP:
3489                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3490                 break;
3491         default:
3492                 return -EINVAL;
3493         }
3494 
3495         if (id < 0)
3496                 return id;
3497 
3498         sprintf(hdev->name, "hci%d", id);
3499         hdev->id = id;
3500 
3501         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3502 
3503         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3504         if (!hdev->workqueue) {
3505                 error = -ENOMEM;
3506                 goto err;
3507         }
3508 
3509         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3510                                                       hdev->name);
3511         if (!hdev->req_workqueue) {
3512                 destroy_workqueue(hdev->workqueue);
3513                 error = -ENOMEM;
3514                 goto err;
3515         }
3516 
3517         if (!IS_ERR_OR_NULL(bt_debugfs))
3518                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3519 
3520         dev_set_name(&hdev->dev, "%s", hdev->name);
3521 
3522         error = device_add(&hdev->dev);
3523         if (error < 0)
3524                 goto err_wqueue;
3525 
3526         hci_leds_init(hdev);
3527 
3528         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3529                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3530                                     hdev);
3531         if (hdev->rfkill) {
3532                 if (rfkill_register(hdev->rfkill) < 0) {
3533                         rfkill_destroy(hdev->rfkill);
3534                         hdev->rfkill = NULL;
3535                 }
3536         }
3537 
3538         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3539                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3540 
3541         hci_dev_set_flag(hdev, HCI_SETUP);
3542         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3543 
3544         if (hdev->dev_type == HCI_PRIMARY) {
3545                 /* Assume BR/EDR support until proven otherwise (such as
3546                  * through reading supported features during init.
3547                  */
3548                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3549         }
3550 
3551         write_lock(&hci_dev_list_lock);
3552         list_add(&hdev->list, &hci_dev_list);
3553         write_unlock(&hci_dev_list_lock);
3554 
3555         /* Devices that are marked for raw-only usage are unconfigured
3556          * and should not be included in normal operation.
3557          */
3558         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3559                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3560 
3561         hci_sock_dev_event(hdev, HCI_DEV_REG);
3562         hci_dev_hold(hdev);
3563 
3564         hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3565         error = register_pm_notifier(&hdev->suspend_notifier);
3566         if (error)
3567                 goto err_wqueue;
3568 
3569         queue_work(hdev->req_workqueue, &hdev->power_on);
3570 
3571         return id;
3572 
3573 err_wqueue:
3574         destroy_workqueue(hdev->workqueue);
3575         destroy_workqueue(hdev->req_workqueue);
3576 err:
3577         ida_simple_remove(&hci_index_ida, hdev->id);
3578 
3579         return error;
3580 }
3581 EXPORT_SYMBOL(hci_register_dev);
3582 
3583 /* Unregister HCI device */
3584 void hci_unregister_dev(struct hci_dev *hdev)
3585 {
3586         int id;
3587 
3588         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3589 
3590         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3591 
3592         id = hdev->id;
3593 
3594         write_lock(&hci_dev_list_lock);
3595         list_del(&hdev->list);
3596         write_unlock(&hci_dev_list_lock);
3597 
3598         cancel_work_sync(&hdev->power_on);
3599 
3600         unregister_pm_notifier(&hdev->suspend_notifier);
3601         cancel_work_sync(&hdev->suspend_prepare);
3602 
3603         hci_dev_do_close(hdev);
3604 
3605         if (!test_bit(HCI_INIT, &hdev->flags) &&
3606             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3607             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3608                 hci_dev_lock(hdev);
3609                 mgmt_index_removed(hdev);
3610                 hci_dev_unlock(hdev);
3611         }
3612 
3613         /* mgmt_index_removed should take care of emptying the
3614          * pending list */
3615         BUG_ON(!list_empty(&hdev->mgmt_pending));
3616 
3617         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3618 
3619         if (hdev->rfkill) {
3620                 rfkill_unregister(hdev->rfkill);
3621                 rfkill_destroy(hdev->rfkill);
3622         }
3623 
3624         device_del(&hdev->dev);
3625 
3626         debugfs_remove_recursive(hdev->debugfs);
3627         kfree_const(hdev->hw_info);
3628         kfree_const(hdev->fw_info);
3629 
3630         destroy_workqueue(hdev->workqueue);
3631         destroy_workqueue(hdev->req_workqueue);
3632 
3633         hci_dev_lock(hdev);
3634         hci_bdaddr_list_clear(&hdev->blacklist);
3635         hci_bdaddr_list_clear(&hdev->whitelist);
3636         hci_uuids_clear(hdev);
3637         hci_link_keys_clear(hdev);
3638         hci_smp_ltks_clear(hdev);
3639         hci_smp_irks_clear(hdev);
3640         hci_remote_oob_data_clear(hdev);
3641         hci_adv_instances_clear(hdev);
3642         hci_bdaddr_list_clear(&hdev->le_white_list);
3643         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3644         hci_conn_params_clear_all(hdev);
3645         hci_discovery_filter_clear(hdev);
3646         hci_blocked_keys_clear(hdev);
3647         hci_dev_unlock(hdev);
3648 
3649         hci_dev_put(hdev);
3650 
3651         ida_simple_remove(&hci_index_ida, id);
3652 }
3653 EXPORT_SYMBOL(hci_unregister_dev);
3654 
3655 /* Suspend HCI device */
3656 int hci_suspend_dev(struct hci_dev *hdev)
3657 {
3658         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3659         return 0;
3660 }
3661 EXPORT_SYMBOL(hci_suspend_dev);
3662 
3663 /* Resume HCI device */
3664 int hci_resume_dev(struct hci_dev *hdev)
3665 {
3666         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3667         return 0;
3668 }
3669 EXPORT_SYMBOL(hci_resume_dev);
3670 
3671 /* Reset HCI device */
3672 int hci_reset_dev(struct hci_dev *hdev)
3673 {
3674         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3675         struct sk_buff *skb;
3676 
3677         skb = bt_skb_alloc(3, GFP_ATOMIC);
3678         if (!skb)
3679                 return -ENOMEM;
3680 
3681         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3682         skb_put_data(skb, hw_err, 3);
3683 
3684         /* Send Hardware Error to upper stack */
3685         return hci_recv_frame(hdev, skb);
3686 }
3687 EXPORT_SYMBOL(hci_reset_dev);
3688 
3689 /* Receive frame from HCI drivers */
3690 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3691 {
3692         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3693                       && !test_bit(HCI_INIT, &hdev->flags))) {
3694                 kfree_skb(skb);
3695                 return -ENXIO;
3696         }
3697 
3698         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3699             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3700             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3701             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3702                 kfree_skb(skb);
3703                 return -EINVAL;
3704         }
3705 
3706         /* Incoming skb */
3707         bt_cb(skb)->incoming = 1;
3708 
3709         /* Time stamp */
3710         __net_timestamp(skb);
3711 
3712         skb_queue_tail(&hdev->rx_q, skb);
3713         queue_work(hdev->workqueue, &hdev->rx_work);
3714 
3715         return 0;
3716 }
3717 EXPORT_SYMBOL(hci_recv_frame);
3718 
3719 /* Receive diagnostic message from HCI drivers */
3720 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3721 {
3722         /* Mark as diagnostic packet */
3723         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3724 
3725         /* Time stamp */
3726         __net_timestamp(skb);
3727 
3728         skb_queue_tail(&hdev->rx_q, skb);
3729         queue_work(hdev->workqueue, &hdev->rx_work);
3730 
3731         return 0;
3732 }
3733 EXPORT_SYMBOL(hci_recv_diag);
3734 
3735 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3736 {
3737         va_list vargs;
3738 
3739         va_start(vargs, fmt);
3740         kfree_const(hdev->hw_info);
3741         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3742         va_end(vargs);
3743 }
3744 EXPORT_SYMBOL(hci_set_hw_info);
3745 
3746 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3747 {
3748         va_list vargs;
3749 
3750         va_start(vargs, fmt);
3751         kfree_const(hdev->fw_info);
3752         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3753         va_end(vargs);
3754 }
3755 EXPORT_SYMBOL(hci_set_fw_info);
3756 
3757 /* ---- Interface to upper protocols ---- */
3758 
3759 int hci_register_cb(struct hci_cb *cb)
3760 {
3761         BT_DBG("%p name %s", cb, cb->name);
3762 
3763         mutex_lock(&hci_cb_list_lock);
3764         list_add_tail(&cb->list, &hci_cb_list);
3765         mutex_unlock(&hci_cb_list_lock);
3766 
3767         return 0;
3768 }
3769 EXPORT_SYMBOL(hci_register_cb);
3770 
3771 int hci_unregister_cb(struct hci_cb *cb)
3772 {
3773         BT_DBG("%p name %s", cb, cb->name);
3774 
3775         mutex_lock(&hci_cb_list_lock);
3776         list_del(&cb->list);
3777         mutex_unlock(&hci_cb_list_lock);
3778 
3779         return 0;
3780 }
3781 EXPORT_SYMBOL(hci_unregister_cb);
3782 
3783 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3784 {
3785         int err;
3786 
3787         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3788                skb->len);
3789 
3790         /* Time stamp */
3791         __net_timestamp(skb);
3792 
3793         /* Send copy to monitor */
3794         hci_send_to_monitor(hdev, skb);
3795 
3796         if (atomic_read(&hdev->promisc)) {
3797                 /* Send copy to the sockets */
3798                 hci_send_to_sock(hdev, skb);
3799         }
3800 
3801         /* Get rid of skb owner, prior to sending to the driver. */
3802         skb_orphan(skb);
3803 
3804         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3805                 kfree_skb(skb);
3806                 return;
3807         }
3808 
3809         err = hdev->send(hdev, skb);
3810         if (err < 0) {
3811                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3812                 kfree_skb(skb);
3813         }
3814 }
3815 
3816 /* Send HCI command */
3817 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3818                  const void *param)
3819 {
3820         struct sk_buff *skb;
3821 
3822         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3823 
3824         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3825         if (!skb) {
3826                 bt_dev_err(hdev, "no memory for command");
3827                 return -ENOMEM;
3828         }
3829 
3830         /* Stand-alone HCI commands must be flagged as
3831          * single-command requests.
3832          */
3833         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3834 
3835         skb_queue_tail(&hdev->cmd_q, skb);
3836         queue_work(hdev->workqueue, &hdev->cmd_work);
3837 
3838         return 0;
3839 }
3840 
3841 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3842                    const void *param)
3843 {
3844         struct sk_buff *skb;
3845 
3846         if (hci_opcode_ogf(opcode) != 0x3f) {
3847                 /* A controller receiving a command shall respond with either
3848                  * a Command Status Event or a Command Complete Event.
3849                  * Therefore, all standard HCI commands must be sent via the
3850                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3851                  * Some vendors do not comply with this rule for vendor-specific
3852                  * commands and do not return any event. We want to support
3853                  * unresponded commands for such cases only.
3854                  */
3855                 bt_dev_err(hdev, "unresponded command not supported");
3856                 return -EINVAL;
3857         }
3858 
3859         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3860         if (!skb) {
3861                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3862                            opcode);
3863                 return -ENOMEM;
3864         }
3865 
3866         hci_send_frame(hdev, skb);
3867 
3868         return 0;
3869 }
3870 EXPORT_SYMBOL(__hci_cmd_send);
3871 
3872 /* Get data from the previously sent command */
3873 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3874 {
3875         struct hci_command_hdr *hdr;
3876 
3877         if (!hdev->sent_cmd)
3878                 return NULL;
3879 
3880         hdr = (void *) hdev->sent_cmd->data;
3881 
3882         if (hdr->opcode != cpu_to_le16(opcode))
3883                 return NULL;
3884 
3885         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3886 
3887         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3888 }
3889 
3890 /* Send HCI command and wait for command commplete event */
3891 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3892                              const void *param, u32 timeout)
3893 {
3894         struct sk_buff *skb;
3895 
3896         if (!test_bit(HCI_UP, &hdev->flags))
3897                 return ERR_PTR(-ENETDOWN);
3898 
3899         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3900 
3901         hci_req_sync_lock(hdev);
3902         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3903         hci_req_sync_unlock(hdev);
3904 
3905         return skb;
3906 }
3907 EXPORT_SYMBOL(hci_cmd_sync);
3908 
3909 /* Send ACL data */
3910 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3911 {
3912         struct hci_acl_hdr *hdr;
3913         int len = skb->len;
3914 
3915         skb_push(skb, HCI_ACL_HDR_SIZE);
3916         skb_reset_transport_header(skb);
3917         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3918         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3919         hdr->dlen   = cpu_to_le16(len);
3920 }
3921 
3922 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3923                           struct sk_buff *skb, __u16 flags)
3924 {
3925         struct hci_conn *conn = chan->conn;
3926         struct hci_dev *hdev = conn->hdev;
3927         struct sk_buff *list;
3928 
3929         skb->len = skb_headlen(skb);
3930         skb->data_len = 0;
3931 
3932         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3933 
3934         switch (hdev->dev_type) {
3935         case HCI_PRIMARY:
3936                 hci_add_acl_hdr(skb, conn->handle, flags);
3937                 break;
3938         case HCI_AMP:
3939                 hci_add_acl_hdr(skb, chan->handle, flags);
3940                 break;
3941         default:
3942                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3943                 return;
3944         }
3945 
3946         list = skb_shinfo(skb)->frag_list;
3947         if (!list) {
3948                 /* Non fragmented */
3949                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3950 
3951                 skb_queue_tail(queue, skb);
3952         } else {
3953                 /* Fragmented */
3954                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3955 
3956                 skb_shinfo(skb)->frag_list = NULL;
3957 
3958                 /* Queue all fragments atomically. We need to use spin_lock_bh
3959                  * here because of 6LoWPAN links, as there this function is
3960                  * called from softirq and using normal spin lock could cause
3961                  * deadlocks.
3962                  */
3963                 spin_lock_bh(&queue->lock);
3964 
3965                 __skb_queue_tail(queue, skb);
3966 
3967                 flags &= ~ACL_START;
3968                 flags |= ACL_CONT;
3969                 do {
3970                         skb = list; list = list->next;
3971 
3972                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3973                         hci_add_acl_hdr(skb, conn->handle, flags);
3974 
3975                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3976 
3977                         __skb_queue_tail(queue, skb);
3978                 } while (list);
3979 
3980                 spin_unlock_bh(&queue->lock);
3981         }
3982 }
3983 
3984 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3985 {
3986         struct hci_dev *hdev = chan->conn->hdev;
3987 
3988         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3989 
3990         hci_queue_acl(chan, &chan->data_q, skb, flags);
3991 
3992         queue_work(hdev->workqueue, &hdev->tx_work);
3993 }
3994 
3995 /* Send SCO data */
3996 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3997 {
3998         struct hci_dev *hdev = conn->hdev;
3999         struct hci_sco_hdr hdr;
4000 
4001         BT_DBG("%s len %d", hdev->name, skb->len);
4002 
4003         hdr.handle = cpu_to_le16(conn->handle);
4004         hdr.dlen   = skb->len;
4005 
4006         skb_push(skb, HCI_SCO_HDR_SIZE);
4007         skb_reset_transport_header(skb);
4008         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4009 
4010         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4011 
4012         skb_queue_tail(&conn->data_q, skb);
4013         queue_work(hdev->workqueue, &hdev->tx_work);
4014 }
4015 
4016 /* ---- HCI TX task (outgoing data) ---- */
4017 
4018 /* HCI Connection scheduler */
4019 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4020                                      int *quote)
4021 {
4022         struct hci_conn_hash *h = &hdev->conn_hash;
4023         struct hci_conn *conn = NULL, *c;
4024         unsigned int num = 0, min = ~0;
4025 
4026         /* We don't have to lock device here. Connections are always
4027          * added and removed with TX task disabled. */
4028 
4029         rcu_read_lock();
4030 
4031         list_for_each_entry_rcu(c, &h->list, list) {
4032                 if (c->type != type || skb_queue_empty(&c->data_q))
4033                         continue;
4034 
4035                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4036                         continue;
4037 
4038                 num++;
4039 
4040                 if (c->sent < min) {
4041                         min  = c->sent;
4042                         conn = c;
4043                 }
4044 
4045                 if (hci_conn_num(hdev, type) == num)
4046                         break;
4047         }
4048 
4049         rcu_read_unlock();
4050 
4051         if (conn) {
4052                 int cnt, q;
4053 
4054                 switch (conn->type) {
4055                 case ACL_LINK:
4056                         cnt = hdev->acl_cnt;
4057                         break;
4058                 case SCO_LINK:
4059                 case ESCO_LINK:
4060                         cnt = hdev->sco_cnt;
4061                         break;
4062                 case LE_LINK:
4063                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4064                         break;
4065                 default:
4066                         cnt = 0;
4067                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4068                 }
4069 
4070                 q = cnt / num;
4071                 *quote = q ? q : 1;
4072         } else
4073                 *quote = 0;
4074 
4075         BT_DBG("conn %p quote %d", conn, *quote);
4076         return conn;
4077 }
4078 
4079 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4080 {
4081         struct hci_conn_hash *h = &hdev->conn_hash;
4082         struct hci_conn *c;
4083 
4084         bt_dev_err(hdev, "link tx timeout");
4085 
4086         rcu_read_lock();
4087 
4088         /* Kill stalled connections */
4089         list_for_each_entry_rcu(c, &h->list, list) {
4090                 if (c->type == type && c->sent) {
4091                         bt_dev_err(hdev, "killing stalled connection %pMR",
4092                                    &c->dst);
4093                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4094                 }
4095         }
4096 
4097         rcu_read_unlock();
4098 }
4099 
4100 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4101                                       int *quote)
4102 {
4103         struct hci_conn_hash *h = &hdev->conn_hash;
4104         struct hci_chan *chan = NULL;
4105         unsigned int num = 0, min = ~0, cur_prio = 0;
4106         struct hci_conn *conn;
4107         int cnt, q, conn_num = 0;
4108 
4109         BT_DBG("%s", hdev->name);
4110 
4111         rcu_read_lock();
4112 
4113         list_for_each_entry_rcu(conn, &h->list, list) {
4114                 struct hci_chan *tmp;
4115 
4116                 if (conn->type != type)
4117                         continue;
4118 
4119                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4120                         continue;
4121 
4122                 conn_num++;
4123 
4124                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4125                         struct sk_buff *skb;
4126 
4127                         if (skb_queue_empty(&tmp->data_q))
4128                                 continue;
4129 
4130                         skb = skb_peek(&tmp->data_q);
4131                         if (skb->priority < cur_prio)
4132                                 continue;
4133 
4134                         if (skb->priority > cur_prio) {
4135                                 num = 0;
4136                                 min = ~0;
4137                                 cur_prio = skb->priority;
4138                         }
4139 
4140                         num++;
4141 
4142                         if (conn->sent < min) {
4143                                 min  = conn->sent;
4144                                 chan = tmp;
4145                         }
4146                 }
4147 
4148                 if (hci_conn_num(hdev, type) == conn_num)
4149                         break;
4150         }
4151 
4152         rcu_read_unlock();
4153 
4154         if (!chan)
4155                 return NULL;
4156 
4157         switch (chan->conn->type) {
4158         case ACL_LINK:
4159                 cnt = hdev->acl_cnt;
4160                 break;
4161         case AMP_LINK:
4162                 cnt = hdev->block_cnt;
4163                 break;
4164         case SCO_LINK:
4165         case ESCO_LINK:
4166                 cnt = hdev->sco_cnt;
4167                 break;
4168         case LE_LINK:
4169                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4170                 break;
4171         default:
4172                 cnt = 0;
4173                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4174         }
4175 
4176         q = cnt / num;
4177         *quote = q ? q : 1;
4178         BT_DBG("chan %p quote %d", chan, *quote);
4179         return chan;
4180 }
4181 
4182 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4183 {
4184         struct hci_conn_hash *h = &hdev->conn_hash;
4185         struct hci_conn *conn;
4186         int num = 0;
4187 
4188         BT_DBG("%s", hdev->name);
4189 
4190         rcu_read_lock();
4191 
4192         list_for_each_entry_rcu(conn, &h->list, list) {
4193                 struct hci_chan *chan;
4194 
4195                 if (conn->type != type)
4196                         continue;
4197 
4198                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4199                         continue;
4200 
4201                 num++;
4202 
4203                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4204                         struct sk_buff *skb;
4205 
4206                         if (chan->sent) {
4207                                 chan->sent = 0;
4208                                 continue;
4209                         }
4210 
4211                         if (skb_queue_empty(&chan->data_q))
4212                                 continue;
4213 
4214                         skb = skb_peek(&chan->data_q);
4215                         if (skb->priority >= HCI_PRIO_MAX - 1)
4216                                 continue;
4217 
4218                         skb->priority = HCI_PRIO_MAX - 1;
4219 
4220                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4221                                skb->priority);
4222                 }
4223 
4224                 if (hci_conn_num(hdev, type) == num)
4225                         break;
4226         }
4227 
4228         rcu_read_unlock();
4229 
4230 }
4231 
4232 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4233 {
4234         /* Calculate count of blocks used by this packet */
4235         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4236 }
4237 
4238 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4239 {
4240         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4241                 /* ACL tx timeout must be longer than maximum
4242                  * link supervision timeout (40.9 seconds) */
4243                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4244                                        HCI_ACL_TX_TIMEOUT))
4245                         hci_link_tx_to(hdev, ACL_LINK);
4246         }
4247 }
4248 
4249 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4250 {
4251         unsigned int cnt = hdev->acl_cnt;
4252         struct hci_chan *chan;
4253         struct sk_buff *skb;
4254         int quote;
4255 
4256         __check_timeout(hdev, cnt);
4257 
4258         while (hdev->acl_cnt &&
4259                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4260                 u32 priority = (skb_peek(&chan->data_q))->priority;
4261                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4262                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4263                                skb->len, skb->priority);
4264 
4265                         /* Stop if priority has changed */
4266                         if (skb->priority < priority)
4267                                 break;
4268 
4269                         skb = skb_dequeue(&chan->data_q);
4270 
4271                         hci_conn_enter_active_mode(chan->conn,
4272                                                    bt_cb(skb)->force_active);
4273 
4274                         hci_send_frame(hdev, skb);
4275                         hdev->acl_last_tx = jiffies;
4276 
4277                         hdev->acl_cnt--;
4278                         chan->sent++;
4279                         chan->conn->sent++;
4280                 }
4281         }
4282 
4283         if (cnt != hdev->acl_cnt)
4284                 hci_prio_recalculate(hdev, ACL_LINK);
4285 }
4286 
4287 static void hci_sched_acl_blk(struct hci_dev *hdev)
4288 {
4289         unsigned int cnt = hdev->block_cnt;
4290         struct hci_chan *chan;
4291         struct sk_buff *skb;
4292         int quote;
4293         u8 type;
4294 
4295         __check_timeout(hdev, cnt);
4296 
4297         BT_DBG("%s", hdev->name);
4298 
4299         if (hdev->dev_type == HCI_AMP)
4300                 type = AMP_LINK;
4301         else
4302                 type = ACL_LINK;
4303 
4304         while (hdev->block_cnt > 0 &&
4305                (chan = hci_chan_sent(hdev, type, &quote))) {
4306                 u32 priority = (skb_peek(&chan->data_q))->priority;
4307                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4308                         int blocks;
4309 
4310                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4311                                skb->len, skb->priority);
4312 
4313                         /* Stop if priority has changed */
4314                         if (skb->priority < priority)
4315                                 break;
4316 
4317                         skb = skb_dequeue(&chan->data_q);
4318 
4319                         blocks = __get_blocks(hdev, skb);
4320                         if (blocks > hdev->block_cnt)
4321                                 return;
4322 
4323                         hci_conn_enter_active_mode(chan->conn,
4324                                                    bt_cb(skb)->force_active);
4325 
4326                         hci_send_frame(hdev, skb);
4327                         hdev->acl_last_tx = jiffies;
4328 
4329                         hdev->block_cnt -= blocks;
4330                         quote -= blocks;
4331 
4332                         chan->sent += blocks;
4333                         chan->conn->sent += blocks;
4334                 }
4335         }
4336 
4337         if (cnt != hdev->block_cnt)
4338                 hci_prio_recalculate(hdev, type);
4339 }
4340 
4341 static void hci_sched_acl(struct hci_dev *hdev)
4342 {
4343         BT_DBG("%s", hdev->name);
4344 
4345         /* No ACL link over BR/EDR controller */
4346         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4347                 return;
4348 
4349         /* No AMP link over AMP controller */
4350         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4351                 return;
4352 
4353         switch (hdev->flow_ctl_mode) {
4354         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4355                 hci_sched_acl_pkt(hdev);
4356                 break;
4357 
4358         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4359                 hci_sched_acl_blk(hdev);
4360                 break;
4361         }
4362 }
4363 
4364 /* Schedule SCO */
4365 static void hci_sched_sco(struct hci_dev *hdev)
4366 {
4367         struct hci_conn *conn;
4368         struct sk_buff *skb;
4369         int quote;
4370 
4371         BT_DBG("%s", hdev->name);
4372 
4373         if (!hci_conn_num(hdev, SCO_LINK))
4374                 return;
4375 
4376         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4377                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4378                         BT_DBG("skb %p len %d", skb, skb->len);
4379                         hci_send_frame(hdev, skb);
4380 
4381                         conn->sent++;
4382                         if (conn->sent == ~0)
4383                                 conn->sent = 0;
4384                 }
4385         }
4386 }
4387 
4388 static void hci_sched_esco(struct hci_dev *hdev)
4389 {
4390         struct hci_conn *conn;
4391         struct sk_buff *skb;
4392         int quote;
4393 
4394         BT_DBG("%s", hdev->name);
4395 
4396         if (!hci_conn_num(hdev, ESCO_LINK))
4397                 return;
4398 
4399         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4400                                                      &quote))) {
4401                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4402                         BT_DBG("skb %p len %d", skb, skb->len);
4403                         hci_send_frame(hdev, skb);
4404 
4405                         conn->sent++;
4406                         if (conn->sent == ~0)
4407                                 conn->sent = 0;
4408                 }
4409         }
4410 }
4411 
4412 static void hci_sched_le(struct hci_dev *hdev)
4413 {
4414         struct hci_chan *chan;
4415         struct sk_buff *skb;
4416         int quote, cnt, tmp;
4417 
4418         BT_DBG("%s", hdev->name);
4419 
4420         if (!hci_conn_num(hdev, LE_LINK))
4421                 return;
4422 
4423         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4424 
4425         __check_timeout(hdev, cnt);
4426 
4427         tmp = cnt;
4428         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4429                 u32 priority = (skb_peek(&chan->data_q))->priority;
4430                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4431                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4432                                skb->len, skb->priority);
4433 
4434                         /* Stop if priority has changed */
4435                         if (skb->priority < priority)
4436                                 break;
4437 
4438                         skb = skb_dequeue(&chan->data_q);
4439 
4440                         hci_send_frame(hdev, skb);
4441                         hdev->le_last_tx = jiffies;
4442 
4443                         cnt--;
4444                         chan->sent++;
4445                         chan->conn->sent++;
4446                 }
4447         }
4448 
4449         if (hdev->le_pkts)
4450                 hdev->le_cnt = cnt;
4451         else
4452                 hdev->acl_cnt = cnt;
4453 
4454         if (cnt != tmp)
4455                 hci_prio_recalculate(hdev, LE_LINK);
4456 }
4457 
4458 static void hci_tx_work(struct work_struct *work)
4459 {
4460         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4461         struct sk_buff *skb;
4462 
4463         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4464                hdev->sco_cnt, hdev->le_cnt);
4465 
4466         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4467                 /* Schedule queues and send stuff to HCI driver */
4468                 hci_sched_acl(hdev);
4469                 hci_sched_sco(hdev);
4470                 hci_sched_esco(hdev);
4471                 hci_sched_le(hdev);
4472         }
4473 
4474         /* Send next queued raw (unknown type) packet */
4475         while ((skb = skb_dequeue(&hdev->raw_q)))
4476                 hci_send_frame(hdev, skb);
4477 }
4478 
4479 /* ----- HCI RX task (incoming data processing) ----- */
4480 
4481 /* ACL data packet */
4482 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4483 {
4484         struct hci_acl_hdr *hdr = (void *) skb->data;
4485         struct hci_conn *conn;
4486         __u16 handle, flags;
4487 
4488         skb_pull(skb, HCI_ACL_HDR_SIZE);
4489 
4490         handle = __le16_to_cpu(hdr->handle);
4491         flags  = hci_flags(handle);
4492         handle = hci_handle(handle);
4493 
4494         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4495                handle, flags);
4496 
4497         hdev->stat.acl_rx++;
4498 
4499         hci_dev_lock(hdev);
4500         conn = hci_conn_hash_lookup_handle(hdev, handle);
4501         hci_dev_unlock(hdev);
4502 
4503         if (conn) {
4504                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4505 
4506                 /* Send to upper protocol */
4507                 l2cap_recv_acldata(conn, skb, flags);
4508                 return;
4509         } else {
4510                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4511                            handle);
4512         }
4513 
4514         kfree_skb(skb);
4515 }
4516 
4517 /* SCO data packet */
4518 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4519 {
4520         struct hci_sco_hdr *hdr = (void *) skb->data;
4521         struct hci_conn *conn;
4522         __u16 handle, flags;
4523 
4524         skb_pull(skb, HCI_SCO_HDR_SIZE);
4525 
4526         handle = __le16_to_cpu(hdr->handle);
4527         flags  = hci_flags(handle);
4528         handle = hci_handle(handle);
4529 
4530         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4531                handle, flags);
4532 
4533         hdev->stat.sco_rx++;
4534 
4535         hci_dev_lock(hdev);
4536         conn = hci_conn_hash_lookup_handle(hdev, handle);
4537         hci_dev_unlock(hdev);
4538 
4539         if (conn) {
4540                 /* Send to upper protocol */
4541                 sco_recv_scodata(conn, skb);
4542                 return;
4543         } else {
4544                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4545                            handle);
4546         }
4547 
4548         kfree_skb(skb);
4549 }
4550 
4551 static bool hci_req_is_complete(struct hci_dev *hdev)
4552 {
4553         struct sk_buff *skb;
4554 
4555         skb = skb_peek(&hdev->cmd_q);
4556         if (!skb)
4557                 return true;
4558 
4559         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4560 }
4561 
4562 static void hci_resend_last(struct hci_dev *hdev)
4563 {
4564         struct hci_command_hdr *sent;
4565         struct sk_buff *skb;
4566         u16 opcode;
4567 
4568         if (!hdev->sent_cmd)
4569                 return;
4570 
4571         sent = (void *) hdev->sent_cmd->data;
4572         opcode = __le16_to_cpu(sent->opcode);
4573         if (opcode == HCI_OP_RESET)
4574                 return;
4575 
4576         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4577         if (!skb)
4578                 return;
4579 
4580         skb_queue_head(&hdev->cmd_q, skb);
4581         queue_work(hdev->workqueue, &hdev->cmd_work);
4582 }
4583 
4584 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4585                           hci_req_complete_t *req_complete,
4586                           hci_req_complete_skb_t *req_complete_skb)
4587 {
4588         struct sk_buff *skb;
4589         unsigned long flags;
4590 
4591         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4592 
4593         /* If the completed command doesn't match the last one that was
4594          * sent we need to do special handling of it.
4595          */
4596         if (!hci_sent_cmd_data(hdev, opcode)) {
4597                 /* Some CSR based controllers generate a spontaneous
4598                  * reset complete event during init and any pending
4599                  * command will never be completed. In such a case we
4600                  * need to resend whatever was the last sent
4601                  * command.
4602                  */
4603                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4604                         hci_resend_last(hdev);
4605 
4606                 return;
4607         }
4608 
4609         /* If we reach this point this event matches the last command sent */
4610         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4611 
4612         /* If the command succeeded and there's still more commands in
4613          * this request the request is not yet complete.
4614          */
4615         if (!status && !hci_req_is_complete(hdev))
4616                 return;
4617 
4618         /* If this was the last command in a request the complete
4619          * callback would be found in hdev->sent_cmd instead of the
4620          * command queue (hdev->cmd_q).
4621          */
4622         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4623                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4624                 return;
4625         }
4626 
4627         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4628                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4629                 return;
4630         }
4631 
4632         /* Remove all pending commands belonging to this request */
4633         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4634         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4635                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4636                         __skb_queue_head(&hdev->cmd_q, skb);
4637                         break;
4638                 }
4639 
4640                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4641                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4642                 else
4643                         *req_complete = bt_cb(skb)->hci.req_complete;
4644                 kfree_skb(skb);
4645         }
4646         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4647 }
4648 
4649 static void hci_rx_work(struct work_struct *work)
4650 {
4651         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4652         struct sk_buff *skb;
4653 
4654         BT_DBG("%s", hdev->name);
4655 
4656         while ((skb = skb_dequeue(&hdev->rx_q))) {
4657                 /* Send copy to monitor */
4658                 hci_send_to_monitor(hdev, skb);
4659 
4660                 if (atomic_read(&hdev->promisc)) {
4661                         /* Send copy to the sockets */
4662                         hci_send_to_sock(hdev, skb);
4663                 }
4664 
4665                 /* If the device has been opened in HCI_USER_CHANNEL,
4666                  * the userspace has exclusive access to device.
4667                  * When device is HCI_INIT, we still need to process
4668                  * the data packets to the driver in order
4669                  * to complete its setup().
4670                  */
4671                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4672                     !test_bit(HCI_INIT, &hdev->flags)) {
4673                         kfree_skb(skb);
4674                         continue;
4675                 }
4676 
4677                 if (test_bit(HCI_INIT, &hdev->flags)) {
4678                         /* Don't process data packets in this states. */
4679                         switch (hci_skb_pkt_type(skb)) {
4680                         case HCI_ACLDATA_PKT:
4681                         case HCI_SCODATA_PKT:
4682                         case HCI_ISODATA_PKT:
4683                                 kfree_skb(skb);
4684                                 continue;
4685                         }
4686                 }
4687 
4688                 /* Process frame */
4689                 switch (hci_skb_pkt_type(skb)) {
4690                 case HCI_EVENT_PKT:
4691                         BT_DBG("%s Event packet", hdev->name);
4692                         hci_event_packet(hdev, skb);
4693                         break;
4694 
4695                 case HCI_ACLDATA_PKT:
4696                         BT_DBG("%s ACL data packet", hdev->name);
4697                         hci_acldata_packet(hdev, skb);
4698                         break;
4699 
4700                 case HCI_SCODATA_PKT:
4701                         BT_DBG("%s SCO data packet", hdev->name);
4702                         hci_scodata_packet(hdev, skb);
4703                         break;
4704 
4705                 default:
4706                         kfree_skb(skb);
4707                         break;
4708                 }
4709         }
4710 }
4711 
4712 static void hci_cmd_work(struct work_struct *work)
4713 {
4714         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4715         struct sk_buff *skb;
4716 
4717         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4718                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4719 
4720         /* Send queued commands */
4721         if (atomic_read(&hdev->cmd_cnt)) {
4722                 skb = skb_dequeue(&hdev->cmd_q);
4723                 if (!skb)
4724                         return;
4725 
4726                 kfree_skb(hdev->sent_cmd);
4727 
4728                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4729                 if (hdev->sent_cmd) {
4730                         if (hci_req_status_pend(hdev))
4731                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4732                         atomic_dec(&hdev->cmd_cnt);
4733                         hci_send_frame(hdev, skb);
4734                         if (test_bit(HCI_RESET, &hdev->flags))
4735                                 cancel_delayed_work(&hdev->cmd_timer);
4736                         else
4737                                 schedule_delayed_work(&hdev->cmd_timer,
4738                                                       HCI_CMD_TIMEOUT);
4739                 } else {
4740                         skb_queue_head(&hdev->cmd_q, skb);
4741                         queue_work(hdev->workqueue, &hdev->cmd_work);
4742                 }
4743         }
4744 }
4745 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp