~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/idr.h>
 30 #include <linux/rfkill.h>
 31 #include <linux/debugfs.h>
 32 #include <linux/crypto.h>
 33 #include <asm/unaligned.h>
 34 
 35 #include <net/bluetooth/bluetooth.h>
 36 #include <net/bluetooth/hci_core.h>
 37 #include <net/bluetooth/l2cap.h>
 38 #include <net/bluetooth/mgmt.h>
 39 
 40 #include "hci_request.h"
 41 #include "hci_debugfs.h"
 42 #include "smp.h"
 43 
 44 static void hci_rx_work(struct work_struct *work);
 45 static void hci_cmd_work(struct work_struct *work);
 46 static void hci_tx_work(struct work_struct *work);
 47 
 48 /* HCI device list */
 49 LIST_HEAD(hci_dev_list);
 50 DEFINE_RWLOCK(hci_dev_list_lock);
 51 
 52 /* HCI callback list */
 53 LIST_HEAD(hci_cb_list);
 54 DEFINE_MUTEX(hci_cb_list_lock);
 55 
 56 /* HCI ID Numbering */
 57 static DEFINE_IDA(hci_index_ida);
 58 
 59 /* ----- HCI requests ----- */
 60 
 61 #define HCI_REQ_DONE      0
 62 #define HCI_REQ_PEND      1
 63 #define HCI_REQ_CANCELED  2
 64 
 65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
 66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
 67 
 68 /* ---- HCI notifications ---- */
 69 
 70 static void hci_notify(struct hci_dev *hdev, int event)
 71 {
 72         hci_sock_dev_event(hdev, event);
 73 }
 74 
 75 /* ---- HCI debugfs entries ---- */
 76 
 77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
 78                              size_t count, loff_t *ppos)
 79 {
 80         struct hci_dev *hdev = file->private_data;
 81         char buf[3];
 82 
 83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
 84         buf[1] = '\n';
 85         buf[2] = '\0';
 86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 87 }
 88 
 89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
 90                               size_t count, loff_t *ppos)
 91 {
 92         struct hci_dev *hdev = file->private_data;
 93         struct sk_buff *skb;
 94         char buf[32];
 95         size_t buf_size = min(count, (sizeof(buf)-1));
 96         bool enable;
 97         int err;
 98 
 99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101 
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104 
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108 
109         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
110                 return -EALREADY;
111 
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120 
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123 
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126 
127         if (err < 0)
128                 return err;
129 
130         hci_dev_change_flag(hdev, HCI_DUT_MODE);
131 
132         return count;
133 }
134 
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141 
142 /* ---- HCI requests ---- */
143 
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145                                   struct sk_buff *skb)
146 {
147         BT_DBG("%s result 0x%2.2x", hdev->name, result);
148 
149         if (hdev->req_status == HCI_REQ_PEND) {
150                 hdev->req_result = result;
151                 hdev->req_status = HCI_REQ_DONE;
152                 if (skb)
153                         hdev->req_skb = skb_get(skb);
154                 wake_up_interruptible(&hdev->req_wait_q);
155         }
156 }
157 
158 static void hci_req_cancel(struct hci_dev *hdev, int err)
159 {
160         BT_DBG("%s err 0x%2.2x", hdev->name, err);
161 
162         if (hdev->req_status == HCI_REQ_PEND) {
163                 hdev->req_result = err;
164                 hdev->req_status = HCI_REQ_CANCELED;
165                 wake_up_interruptible(&hdev->req_wait_q);
166         }
167 }
168 
169 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
170                                   const void *param, u8 event, u32 timeout)
171 {
172         DECLARE_WAITQUEUE(wait, current);
173         struct hci_request req;
174         struct sk_buff *skb;
175         int err = 0;
176 
177         BT_DBG("%s", hdev->name);
178 
179         hci_req_init(&req, hdev);
180 
181         hci_req_add_ev(&req, opcode, plen, param, event);
182 
183         hdev->req_status = HCI_REQ_PEND;
184 
185         add_wait_queue(&hdev->req_wait_q, &wait);
186         set_current_state(TASK_INTERRUPTIBLE);
187 
188         err = hci_req_run_skb(&req, hci_req_sync_complete);
189         if (err < 0) {
190                 remove_wait_queue(&hdev->req_wait_q, &wait);
191                 set_current_state(TASK_RUNNING);
192                 return ERR_PTR(err);
193         }
194 
195         schedule_timeout(timeout);
196 
197         remove_wait_queue(&hdev->req_wait_q, &wait);
198 
199         if (signal_pending(current))
200                 return ERR_PTR(-EINTR);
201 
202         switch (hdev->req_status) {
203         case HCI_REQ_DONE:
204                 err = -bt_to_errno(hdev->req_result);
205                 break;
206 
207         case HCI_REQ_CANCELED:
208                 err = -hdev->req_result;
209                 break;
210 
211         default:
212                 err = -ETIMEDOUT;
213                 break;
214         }
215 
216         hdev->req_status = hdev->req_result = 0;
217         skb = hdev->req_skb;
218         hdev->req_skb = NULL;
219 
220         BT_DBG("%s end: err %d", hdev->name, err);
221 
222         if (err < 0) {
223                 kfree_skb(skb);
224                 return ERR_PTR(err);
225         }
226 
227         if (!skb)
228                 return ERR_PTR(-ENODATA);
229 
230         return skb;
231 }
232 EXPORT_SYMBOL(__hci_cmd_sync_ev);
233 
234 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
235                                const void *param, u32 timeout)
236 {
237         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
238 }
239 EXPORT_SYMBOL(__hci_cmd_sync);
240 
241 /* Execute request and wait for completion. */
242 static int __hci_req_sync(struct hci_dev *hdev,
243                           void (*func)(struct hci_request *req,
244                                       unsigned long opt),
245                           unsigned long opt, __u32 timeout)
246 {
247         struct hci_request req;
248         DECLARE_WAITQUEUE(wait, current);
249         int err = 0;
250 
251         BT_DBG("%s start", hdev->name);
252 
253         hci_req_init(&req, hdev);
254 
255         hdev->req_status = HCI_REQ_PEND;
256 
257         func(&req, opt);
258 
259         add_wait_queue(&hdev->req_wait_q, &wait);
260         set_current_state(TASK_INTERRUPTIBLE);
261 
262         err = hci_req_run_skb(&req, hci_req_sync_complete);
263         if (err < 0) {
264                 hdev->req_status = 0;
265 
266                 remove_wait_queue(&hdev->req_wait_q, &wait);
267                 set_current_state(TASK_RUNNING);
268 
269                 /* ENODATA means the HCI request command queue is empty.
270                  * This can happen when a request with conditionals doesn't
271                  * trigger any commands to be sent. This is normal behavior
272                  * and should not trigger an error return.
273                  */
274                 if (err == -ENODATA)
275                         return 0;
276 
277                 return err;
278         }
279 
280         schedule_timeout(timeout);
281 
282         remove_wait_queue(&hdev->req_wait_q, &wait);
283 
284         if (signal_pending(current))
285                 return -EINTR;
286 
287         switch (hdev->req_status) {
288         case HCI_REQ_DONE:
289                 err = -bt_to_errno(hdev->req_result);
290                 break;
291 
292         case HCI_REQ_CANCELED:
293                 err = -hdev->req_result;
294                 break;
295 
296         default:
297                 err = -ETIMEDOUT;
298                 break;
299         }
300 
301         hdev->req_status = hdev->req_result = 0;
302 
303         BT_DBG("%s end: err %d", hdev->name, err);
304 
305         return err;
306 }
307 
308 static int hci_req_sync(struct hci_dev *hdev,
309                         void (*req)(struct hci_request *req,
310                                     unsigned long opt),
311                         unsigned long opt, __u32 timeout)
312 {
313         int ret;
314 
315         if (!test_bit(HCI_UP, &hdev->flags))
316                 return -ENETDOWN;
317 
318         /* Serialize all requests */
319         hci_req_lock(hdev);
320         ret = __hci_req_sync(hdev, req, opt, timeout);
321         hci_req_unlock(hdev);
322 
323         return ret;
324 }
325 
326 static void hci_reset_req(struct hci_request *req, unsigned long opt)
327 {
328         BT_DBG("%s %ld", req->hdev->name, opt);
329 
330         /* Reset device */
331         set_bit(HCI_RESET, &req->hdev->flags);
332         hci_req_add(req, HCI_OP_RESET, 0, NULL);
333 }
334 
335 static void bredr_init(struct hci_request *req)
336 {
337         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
338 
339         /* Read Local Supported Features */
340         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
341 
342         /* Read Local Version */
343         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
344 
345         /* Read BD Address */
346         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
347 }
348 
349 static void amp_init1(struct hci_request *req)
350 {
351         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
352 
353         /* Read Local Version */
354         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
355 
356         /* Read Local Supported Commands */
357         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358 
359         /* Read Local AMP Info */
360         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
361 
362         /* Read Data Blk size */
363         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
364 
365         /* Read Flow Control Mode */
366         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367 
368         /* Read Location Data */
369         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
370 }
371 
372 static void amp_init2(struct hci_request *req)
373 {
374         /* Read Local Supported Features. Not all AMP controllers
375          * support this so it's placed conditionally in the second
376          * stage init.
377          */
378         if (req->hdev->commands[14] & 0x20)
379                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380 }
381 
382 static void hci_init1_req(struct hci_request *req, unsigned long opt)
383 {
384         struct hci_dev *hdev = req->hdev;
385 
386         BT_DBG("%s %ld", hdev->name, opt);
387 
388         /* Reset */
389         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
390                 hci_reset_req(req, 0);
391 
392         switch (hdev->dev_type) {
393         case HCI_BREDR:
394                 bredr_init(req);
395                 break;
396 
397         case HCI_AMP:
398                 amp_init1(req);
399                 break;
400 
401         default:
402                 BT_ERR("Unknown device type %d", hdev->dev_type);
403                 break;
404         }
405 }
406 
407 static void bredr_setup(struct hci_request *req)
408 {
409         __le16 param;
410         __u8 flt_type;
411 
412         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
413         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
414 
415         /* Read Class of Device */
416         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
417 
418         /* Read Local Name */
419         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
420 
421         /* Read Voice Setting */
422         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
423 
424         /* Read Number of Supported IAC */
425         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426 
427         /* Read Current IAC LAP */
428         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429 
430         /* Clear Event Filters */
431         flt_type = HCI_FLT_CLEAR_ALL;
432         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
433 
434         /* Connection accept timeout ~20 secs */
435         param = cpu_to_le16(0x7d00);
436         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
437 }
438 
439 static void le_setup(struct hci_request *req)
440 {
441         struct hci_dev *hdev = req->hdev;
442 
443         /* Read LE Buffer Size */
444         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
445 
446         /* Read LE Local Supported Features */
447         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
448 
449         /* Read LE Supported States */
450         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451 
452         /* Read LE White List Size */
453         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
454 
455         /* Clear LE White List */
456         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
457 
458         /* LE-only controllers have LE implicitly enabled */
459         if (!lmp_bredr_capable(hdev))
460                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
461 }
462 
463 static void hci_setup_event_mask(struct hci_request *req)
464 {
465         struct hci_dev *hdev = req->hdev;
466 
467         /* The second byte is 0xff instead of 0x9f (two reserved bits
468          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469          * command otherwise.
470          */
471         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472 
473         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474          * any event mask for pre 1.2 devices.
475          */
476         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477                 return;
478 
479         if (lmp_bredr_capable(hdev)) {
480                 events[4] |= 0x01; /* Flow Specification Complete */
481                 events[4] |= 0x02; /* Inquiry Result with RSSI */
482                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483                 events[5] |= 0x08; /* Synchronous Connection Complete */
484                 events[5] |= 0x10; /* Synchronous Connection Changed */
485         } else {
486                 /* Use a different default for LE-only devices */
487                 memset(events, 0, sizeof(events));
488                 events[0] |= 0x10; /* Disconnection Complete */
489                 events[1] |= 0x08; /* Read Remote Version Information Complete */
490                 events[1] |= 0x20; /* Command Complete */
491                 events[1] |= 0x40; /* Command Status */
492                 events[1] |= 0x80; /* Hardware Error */
493                 events[2] |= 0x04; /* Number of Completed Packets */
494                 events[3] |= 0x02; /* Data Buffer Overflow */
495 
496                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497                         events[0] |= 0x80; /* Encryption Change */
498                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
499                 }
500         }
501 
502         if (lmp_inq_rssi_capable(hdev))
503                 events[4] |= 0x02; /* Inquiry Result with RSSI */
504 
505         if (lmp_sniffsubr_capable(hdev))
506                 events[5] |= 0x20; /* Sniff Subrating */
507 
508         if (lmp_pause_enc_capable(hdev))
509                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510 
511         if (lmp_ext_inq_capable(hdev))
512                 events[5] |= 0x40; /* Extended Inquiry Result */
513 
514         if (lmp_no_flush_capable(hdev))
515                 events[7] |= 0x01; /* Enhanced Flush Complete */
516 
517         if (lmp_lsto_capable(hdev))
518                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519 
520         if (lmp_ssp_capable(hdev)) {
521                 events[6] |= 0x01;      /* IO Capability Request */
522                 events[6] |= 0x02;      /* IO Capability Response */
523                 events[6] |= 0x04;      /* User Confirmation Request */
524                 events[6] |= 0x08;      /* User Passkey Request */
525                 events[6] |= 0x10;      /* Remote OOB Data Request */
526                 events[6] |= 0x20;      /* Simple Pairing Complete */
527                 events[7] |= 0x04;      /* User Passkey Notification */
528                 events[7] |= 0x08;      /* Keypress Notification */
529                 events[7] |= 0x10;      /* Remote Host Supported
530                                          * Features Notification
531                                          */
532         }
533 
534         if (lmp_le_capable(hdev))
535                 events[7] |= 0x20;      /* LE Meta-Event */
536 
537         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
538 }
539 
540 static void hci_init2_req(struct hci_request *req, unsigned long opt)
541 {
542         struct hci_dev *hdev = req->hdev;
543 
544         if (hdev->dev_type == HCI_AMP)
545                 return amp_init2(req);
546 
547         if (lmp_bredr_capable(hdev))
548                 bredr_setup(req);
549         else
550                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
551 
552         if (lmp_le_capable(hdev))
553                 le_setup(req);
554 
555         /* All Bluetooth 1.2 and later controllers should support the
556          * HCI command for reading the local supported commands.
557          *
558          * Unfortunately some controllers indicate Bluetooth 1.2 support,
559          * but do not have support for this command. If that is the case,
560          * the driver can quirk the behavior and skip reading the local
561          * supported commands.
562          */
563         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
565                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566 
567         if (lmp_ssp_capable(hdev)) {
568                 /* When SSP is available, then the host features page
569                  * should also be available as well. However some
570                  * controllers list the max_page as 0 as long as SSP
571                  * has not been enabled. To achieve proper debugging
572                  * output, force the minimum max_page to 1 at least.
573                  */
574                 hdev->max_page = 0x01;
575 
576                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
577                         u8 mode = 0x01;
578 
579                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580                                     sizeof(mode), &mode);
581                 } else {
582                         struct hci_cp_write_eir cp;
583 
584                         memset(hdev->eir, 0, sizeof(hdev->eir));
585                         memset(&cp, 0, sizeof(cp));
586 
587                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
588                 }
589         }
590 
591         if (lmp_inq_rssi_capable(hdev) ||
592             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
593                 u8 mode;
594 
595                 /* If Extended Inquiry Result events are supported, then
596                  * they are clearly preferred over Inquiry Result with RSSI
597                  * events.
598                  */
599                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600 
601                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602         }
603 
604         if (lmp_inq_tx_pwr_capable(hdev))
605                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
606 
607         if (lmp_ext_feat_capable(hdev)) {
608                 struct hci_cp_read_local_ext_features cp;
609 
610                 cp.page = 0x01;
611                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612                             sizeof(cp), &cp);
613         }
614 
615         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
616                 u8 enable = 1;
617                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618                             &enable);
619         }
620 }
621 
622 static void hci_setup_link_policy(struct hci_request *req)
623 {
624         struct hci_dev *hdev = req->hdev;
625         struct hci_cp_write_def_link_policy cp;
626         u16 link_policy = 0;
627 
628         if (lmp_rswitch_capable(hdev))
629                 link_policy |= HCI_LP_RSWITCH;
630         if (lmp_hold_capable(hdev))
631                 link_policy |= HCI_LP_HOLD;
632         if (lmp_sniff_capable(hdev))
633                 link_policy |= HCI_LP_SNIFF;
634         if (lmp_park_capable(hdev))
635                 link_policy |= HCI_LP_PARK;
636 
637         cp.policy = cpu_to_le16(link_policy);
638         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
639 }
640 
641 static void hci_set_le_support(struct hci_request *req)
642 {
643         struct hci_dev *hdev = req->hdev;
644         struct hci_cp_write_le_host_supported cp;
645 
646         /* LE-only devices do not support explicit enablement */
647         if (!lmp_bredr_capable(hdev))
648                 return;
649 
650         memset(&cp, 0, sizeof(cp));
651 
652         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
653                 cp.le = 0x01;
654                 cp.simul = 0x00;
655         }
656 
657         if (cp.le != lmp_host_le_capable(hdev))
658                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659                             &cp);
660 }
661 
662 static void hci_set_event_mask_page_2(struct hci_request *req)
663 {
664         struct hci_dev *hdev = req->hdev;
665         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666         bool changed = false;
667 
668         /* If Connectionless Slave Broadcast master role is supported
669          * enable all necessary events for it.
670          */
671         if (lmp_csb_master_capable(hdev)) {
672                 events[1] |= 0x40;      /* Triggered Clock Capture */
673                 events[1] |= 0x80;      /* Synchronization Train Complete */
674                 events[2] |= 0x10;      /* Slave Page Response Timeout */
675                 events[2] |= 0x20;      /* CSB Channel Map Change */
676                 changed = true;
677         }
678 
679         /* If Connectionless Slave Broadcast slave role is supported
680          * enable all necessary events for it.
681          */
682         if (lmp_csb_slave_capable(hdev)) {
683                 events[2] |= 0x01;      /* Synchronization Train Received */
684                 events[2] |= 0x02;      /* CSB Receive */
685                 events[2] |= 0x04;      /* CSB Timeout */
686                 events[2] |= 0x08;      /* Truncated Page Complete */
687                 changed = true;
688         }
689 
690         /* Enable Authenticated Payload Timeout Expired event if supported */
691         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
692                 events[2] |= 0x80;
693                 changed = true;
694         }
695 
696         /* Some Broadcom based controllers indicate support for Set Event
697          * Mask Page 2 command, but then actually do not support it. Since
698          * the default value is all bits set to zero, the command is only
699          * required if the event mask has to be changed. In case no change
700          * to the event mask is needed, skip this command.
701          */
702         if (changed)
703                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
704                             sizeof(events), events);
705 }
706 
707 static void hci_init3_req(struct hci_request *req, unsigned long opt)
708 {
709         struct hci_dev *hdev = req->hdev;
710         u8 p;
711 
712         hci_setup_event_mask(req);
713 
714         if (hdev->commands[6] & 0x20) {
715                 struct hci_cp_read_stored_link_key cp;
716 
717                 bacpy(&cp.bdaddr, BDADDR_ANY);
718                 cp.read_all = 0x01;
719                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
720         }
721 
722         if (hdev->commands[5] & 0x10)
723                 hci_setup_link_policy(req);
724 
725         if (hdev->commands[8] & 0x01)
726                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
727 
728         /* Some older Broadcom based Bluetooth 1.2 controllers do not
729          * support the Read Page Scan Type command. Check support for
730          * this command in the bit mask of supported commands.
731          */
732         if (hdev->commands[13] & 0x01)
733                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
734 
735         if (lmp_le_capable(hdev)) {
736                 u8 events[8];
737 
738                 memset(events, 0, sizeof(events));
739                 events[0] = 0x0f;
740 
741                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
742                         events[0] |= 0x10;      /* LE Long Term Key Request */
743 
744                 /* If controller supports the Connection Parameters Request
745                  * Link Layer Procedure, enable the corresponding event.
746                  */
747                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
748                         events[0] |= 0x20;      /* LE Remote Connection
749                                                  * Parameter Request
750                                                  */
751 
752                 /* If the controller supports the Data Length Extension
753                  * feature, enable the corresponding event.
754                  */
755                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
756                         events[0] |= 0x40;      /* LE Data Length Change */
757 
758                 /* If the controller supports Extended Scanner Filter
759                  * Policies, enable the correspondig event.
760                  */
761                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
762                         events[1] |= 0x04;      /* LE Direct Advertising
763                                                  * Report
764                                                  */
765 
766                 /* If the controller supports the LE Read Local P-256
767                  * Public Key command, enable the corresponding event.
768                  */
769                 if (hdev->commands[34] & 0x02)
770                         events[0] |= 0x80;      /* LE Read Local P-256
771                                                  * Public Key Complete
772                                                  */
773 
774                 /* If the controller supports the LE Generate DHKey
775                  * command, enable the corresponding event.
776                  */
777                 if (hdev->commands[34] & 0x04)
778                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
779 
780                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
781                             events);
782 
783                 if (hdev->commands[25] & 0x40) {
784                         /* Read LE Advertising Channel TX Power */
785                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
786                 }
787 
788                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
789                         /* Read LE Maximum Data Length */
790                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
791 
792                         /* Read LE Suggested Default Data Length */
793                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
794                 }
795 
796                 hci_set_le_support(req);
797         }
798 
799         /* Read features beyond page 1 if available */
800         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801                 struct hci_cp_read_local_ext_features cp;
802 
803                 cp.page = p;
804                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
805                             sizeof(cp), &cp);
806         }
807 }
808 
809 static void hci_init4_req(struct hci_request *req, unsigned long opt)
810 {
811         struct hci_dev *hdev = req->hdev;
812 
813         /* Some Broadcom based Bluetooth controllers do not support the
814          * Delete Stored Link Key command. They are clearly indicating its
815          * absence in the bit mask of supported commands.
816          *
817          * Check the supported commands and only if the the command is marked
818          * as supported send it. If not supported assume that the controller
819          * does not have actual support for stored link keys which makes this
820          * command redundant anyway.
821          *
822          * Some controllers indicate that they support handling deleting
823          * stored link keys, but they don't. The quirk lets a driver
824          * just disable this command.
825          */
826         if (hdev->commands[6] & 0x80 &&
827             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
828                 struct hci_cp_delete_stored_link_key cp;
829 
830                 bacpy(&cp.bdaddr, BDADDR_ANY);
831                 cp.delete_all = 0x01;
832                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
833                             sizeof(cp), &cp);
834         }
835 
836         /* Set event mask page 2 if the HCI command for it is supported */
837         if (hdev->commands[22] & 0x04)
838                 hci_set_event_mask_page_2(req);
839 
840         /* Read local codec list if the HCI command is supported */
841         if (hdev->commands[29] & 0x20)
842                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
843 
844         /* Get MWS transport configuration if the HCI command is supported */
845         if (hdev->commands[30] & 0x08)
846                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
847 
848         /* Check for Synchronization Train support */
849         if (lmp_sync_train_capable(hdev))
850                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
851 
852         /* Enable Secure Connections if supported and configured */
853         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
854             bredr_sc_enabled(hdev)) {
855                 u8 support = 0x01;
856 
857                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
858                             sizeof(support), &support);
859         }
860 }
861 
862 static int __hci_init(struct hci_dev *hdev)
863 {
864         int err;
865 
866         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
867         if (err < 0)
868                 return err;
869 
870         /* The Device Under Test (DUT) mode is special and available for
871          * all controller types. So just create it early on.
872          */
873         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
874                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
875                                     &dut_mode_fops);
876         }
877 
878         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
879         if (err < 0)
880                 return err;
881 
882         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
883          * BR/EDR/LE type controllers. AMP controllers only need the
884          * first two stages of init.
885          */
886         if (hdev->dev_type != HCI_BREDR)
887                 return 0;
888 
889         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
890         if (err < 0)
891                 return err;
892 
893         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
894         if (err < 0)
895                 return err;
896 
897         /* This function is only called when the controller is actually in
898          * configured state. When the controller is marked as unconfigured,
899          * this initialization procedure is not run.
900          *
901          * It means that it is possible that a controller runs through its
902          * setup phase and then discovers missing settings. If that is the
903          * case, then this function will not be called. It then will only
904          * be called during the config phase.
905          *
906          * So only when in setup phase or config phase, create the debugfs
907          * entries and register the SMP channels.
908          */
909         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
910             !hci_dev_test_flag(hdev, HCI_CONFIG))
911                 return 0;
912 
913         hci_debugfs_create_common(hdev);
914 
915         if (lmp_bredr_capable(hdev))
916                 hci_debugfs_create_bredr(hdev);
917 
918         if (lmp_le_capable(hdev))
919                 hci_debugfs_create_le(hdev);
920 
921         return 0;
922 }
923 
924 static void hci_init0_req(struct hci_request *req, unsigned long opt)
925 {
926         struct hci_dev *hdev = req->hdev;
927 
928         BT_DBG("%s %ld", hdev->name, opt);
929 
930         /* Reset */
931         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
932                 hci_reset_req(req, 0);
933 
934         /* Read Local Version */
935         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
936 
937         /* Read BD Address */
938         if (hdev->set_bdaddr)
939                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
940 }
941 
942 static int __hci_unconf_init(struct hci_dev *hdev)
943 {
944         int err;
945 
946         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
947                 return 0;
948 
949         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
950         if (err < 0)
951                 return err;
952 
953         return 0;
954 }
955 
956 static void hci_scan_req(struct hci_request *req, unsigned long opt)
957 {
958         __u8 scan = opt;
959 
960         BT_DBG("%s %x", req->hdev->name, scan);
961 
962         /* Inquiry and Page scans */
963         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
964 }
965 
966 static void hci_auth_req(struct hci_request *req, unsigned long opt)
967 {
968         __u8 auth = opt;
969 
970         BT_DBG("%s %x", req->hdev->name, auth);
971 
972         /* Authentication */
973         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
974 }
975 
976 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
977 {
978         __u8 encrypt = opt;
979 
980         BT_DBG("%s %x", req->hdev->name, encrypt);
981 
982         /* Encryption */
983         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
984 }
985 
986 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
987 {
988         __le16 policy = cpu_to_le16(opt);
989 
990         BT_DBG("%s %x", req->hdev->name, policy);
991 
992         /* Default link policy */
993         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
994 }
995 
996 /* Get HCI device by index.
997  * Device is held on return. */
998 struct hci_dev *hci_dev_get(int index)
999 {
1000         struct hci_dev *hdev = NULL, *d;
1001 
1002         BT_DBG("%d", index);
1003 
1004         if (index < 0)
1005                 return NULL;
1006 
1007         read_lock(&hci_dev_list_lock);
1008         list_for_each_entry(d, &hci_dev_list, list) {
1009                 if (d->id == index) {
1010                         hdev = hci_dev_hold(d);
1011                         break;
1012                 }
1013         }
1014         read_unlock(&hci_dev_list_lock);
1015         return hdev;
1016 }
1017 
1018 /* ---- Inquiry support ---- */
1019 
1020 bool hci_discovery_active(struct hci_dev *hdev)
1021 {
1022         struct discovery_state *discov = &hdev->discovery;
1023 
1024         switch (discov->state) {
1025         case DISCOVERY_FINDING:
1026         case DISCOVERY_RESOLVING:
1027                 return true;
1028 
1029         default:
1030                 return false;
1031         }
1032 }
1033 
1034 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1035 {
1036         int old_state = hdev->discovery.state;
1037 
1038         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1039 
1040         if (old_state == state)
1041                 return;
1042 
1043         hdev->discovery.state = state;
1044 
1045         switch (state) {
1046         case DISCOVERY_STOPPED:
1047                 hci_update_background_scan(hdev);
1048 
1049                 if (old_state != DISCOVERY_STARTING)
1050                         mgmt_discovering(hdev, 0);
1051                 break;
1052         case DISCOVERY_STARTING:
1053                 break;
1054         case DISCOVERY_FINDING:
1055                 mgmt_discovering(hdev, 1);
1056                 break;
1057         case DISCOVERY_RESOLVING:
1058                 break;
1059         case DISCOVERY_STOPPING:
1060                 break;
1061         }
1062 }
1063 
1064 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1065 {
1066         struct discovery_state *cache = &hdev->discovery;
1067         struct inquiry_entry *p, *n;
1068 
1069         list_for_each_entry_safe(p, n, &cache->all, all) {
1070                 list_del(&p->all);
1071                 kfree(p);
1072         }
1073 
1074         INIT_LIST_HEAD(&cache->unknown);
1075         INIT_LIST_HEAD(&cache->resolve);
1076 }
1077 
1078 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1079                                                bdaddr_t *bdaddr)
1080 {
1081         struct discovery_state *cache = &hdev->discovery;
1082         struct inquiry_entry *e;
1083 
1084         BT_DBG("cache %p, %pMR", cache, bdaddr);
1085 
1086         list_for_each_entry(e, &cache->all, all) {
1087                 if (!bacmp(&e->data.bdaddr, bdaddr))
1088                         return e;
1089         }
1090 
1091         return NULL;
1092 }
1093 
1094 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1095                                                        bdaddr_t *bdaddr)
1096 {
1097         struct discovery_state *cache = &hdev->discovery;
1098         struct inquiry_entry *e;
1099 
1100         BT_DBG("cache %p, %pMR", cache, bdaddr);
1101 
1102         list_for_each_entry(e, &cache->unknown, list) {
1103                 if (!bacmp(&e->data.bdaddr, bdaddr))
1104                         return e;
1105         }
1106 
1107         return NULL;
1108 }
1109 
1110 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1111                                                        bdaddr_t *bdaddr,
1112                                                        int state)
1113 {
1114         struct discovery_state *cache = &hdev->discovery;
1115         struct inquiry_entry *e;
1116 
1117         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1118 
1119         list_for_each_entry(e, &cache->resolve, list) {
1120                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1121                         return e;
1122                 if (!bacmp(&e->data.bdaddr, bdaddr))
1123                         return e;
1124         }
1125 
1126         return NULL;
1127 }
1128 
1129 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1130                                       struct inquiry_entry *ie)
1131 {
1132         struct discovery_state *cache = &hdev->discovery;
1133         struct list_head *pos = &cache->resolve;
1134         struct inquiry_entry *p;
1135 
1136         list_del(&ie->list);
1137 
1138         list_for_each_entry(p, &cache->resolve, list) {
1139                 if (p->name_state != NAME_PENDING &&
1140                     abs(p->data.rssi) >= abs(ie->data.rssi))
1141                         break;
1142                 pos = &p->list;
1143         }
1144 
1145         list_add(&ie->list, pos);
1146 }
1147 
1148 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1149                              bool name_known)
1150 {
1151         struct discovery_state *cache = &hdev->discovery;
1152         struct inquiry_entry *ie;
1153         u32 flags = 0;
1154 
1155         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1156 
1157         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1158 
1159         if (!data->ssp_mode)
1160                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1161 
1162         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1163         if (ie) {
1164                 if (!ie->data.ssp_mode)
1165                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1166 
1167                 if (ie->name_state == NAME_NEEDED &&
1168                     data->rssi != ie->data.rssi) {
1169                         ie->data.rssi = data->rssi;
1170                         hci_inquiry_cache_update_resolve(hdev, ie);
1171                 }
1172 
1173                 goto update;
1174         }
1175 
1176         /* Entry not in the cache. Add new one. */
1177         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1178         if (!ie) {
1179                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1180                 goto done;
1181         }
1182 
1183         list_add(&ie->all, &cache->all);
1184 
1185         if (name_known) {
1186                 ie->name_state = NAME_KNOWN;
1187         } else {
1188                 ie->name_state = NAME_NOT_KNOWN;
1189                 list_add(&ie->list, &cache->unknown);
1190         }
1191 
1192 update:
1193         if (name_known && ie->name_state != NAME_KNOWN &&
1194             ie->name_state != NAME_PENDING) {
1195                 ie->name_state = NAME_KNOWN;
1196                 list_del(&ie->list);
1197         }
1198 
1199         memcpy(&ie->data, data, sizeof(*data));
1200         ie->timestamp = jiffies;
1201         cache->timestamp = jiffies;
1202 
1203         if (ie->name_state == NAME_NOT_KNOWN)
1204                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1205 
1206 done:
1207         return flags;
1208 }
1209 
1210 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1211 {
1212         struct discovery_state *cache = &hdev->discovery;
1213         struct inquiry_info *info = (struct inquiry_info *) buf;
1214         struct inquiry_entry *e;
1215         int copied = 0;
1216 
1217         list_for_each_entry(e, &cache->all, all) {
1218                 struct inquiry_data *data = &e->data;
1219 
1220                 if (copied >= num)
1221                         break;
1222 
1223                 bacpy(&info->bdaddr, &data->bdaddr);
1224                 info->pscan_rep_mode    = data->pscan_rep_mode;
1225                 info->pscan_period_mode = data->pscan_period_mode;
1226                 info->pscan_mode        = data->pscan_mode;
1227                 memcpy(info->dev_class, data->dev_class, 3);
1228                 info->clock_offset      = data->clock_offset;
1229 
1230                 info++;
1231                 copied++;
1232         }
1233 
1234         BT_DBG("cache %p, copied %d", cache, copied);
1235         return copied;
1236 }
1237 
1238 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1239 {
1240         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1241         struct hci_dev *hdev = req->hdev;
1242         struct hci_cp_inquiry cp;
1243 
1244         BT_DBG("%s", hdev->name);
1245 
1246         if (test_bit(HCI_INQUIRY, &hdev->flags))
1247                 return;
1248 
1249         /* Start Inquiry */
1250         memcpy(&cp.lap, &ir->lap, 3);
1251         cp.length  = ir->length;
1252         cp.num_rsp = ir->num_rsp;
1253         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1254 }
1255 
1256 int hci_inquiry(void __user *arg)
1257 {
1258         __u8 __user *ptr = arg;
1259         struct hci_inquiry_req ir;
1260         struct hci_dev *hdev;
1261         int err = 0, do_inquiry = 0, max_rsp;
1262         long timeo;
1263         __u8 *buf;
1264 
1265         if (copy_from_user(&ir, ptr, sizeof(ir)))
1266                 return -EFAULT;
1267 
1268         hdev = hci_dev_get(ir.dev_id);
1269         if (!hdev)
1270                 return -ENODEV;
1271 
1272         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1273                 err = -EBUSY;
1274                 goto done;
1275         }
1276 
1277         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1278                 err = -EOPNOTSUPP;
1279                 goto done;
1280         }
1281 
1282         if (hdev->dev_type != HCI_BREDR) {
1283                 err = -EOPNOTSUPP;
1284                 goto done;
1285         }
1286 
1287         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1288                 err = -EOPNOTSUPP;
1289                 goto done;
1290         }
1291 
1292         hci_dev_lock(hdev);
1293         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1294             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1295                 hci_inquiry_cache_flush(hdev);
1296                 do_inquiry = 1;
1297         }
1298         hci_dev_unlock(hdev);
1299 
1300         timeo = ir.length * msecs_to_jiffies(2000);
1301 
1302         if (do_inquiry) {
1303                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1304                                    timeo);
1305                 if (err < 0)
1306                         goto done;
1307 
1308                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1309                  * cleared). If it is interrupted by a signal, return -EINTR.
1310                  */
1311                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1312                                 TASK_INTERRUPTIBLE))
1313                         return -EINTR;
1314         }
1315 
1316         /* for unlimited number of responses we will use buffer with
1317          * 255 entries
1318          */
1319         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1320 
1321         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1322          * copy it to the user space.
1323          */
1324         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1325         if (!buf) {
1326                 err = -ENOMEM;
1327                 goto done;
1328         }
1329 
1330         hci_dev_lock(hdev);
1331         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1332         hci_dev_unlock(hdev);
1333 
1334         BT_DBG("num_rsp %d", ir.num_rsp);
1335 
1336         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1337                 ptr += sizeof(ir);
1338                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1339                                  ir.num_rsp))
1340                         err = -EFAULT;
1341         } else
1342                 err = -EFAULT;
1343 
1344         kfree(buf);
1345 
1346 done:
1347         hci_dev_put(hdev);
1348         return err;
1349 }
1350 
1351 static int hci_dev_do_open(struct hci_dev *hdev)
1352 {
1353         int ret = 0;
1354 
1355         BT_DBG("%s %p", hdev->name, hdev);
1356 
1357         hci_req_lock(hdev);
1358 
1359         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1360                 ret = -ENODEV;
1361                 goto done;
1362         }
1363 
1364         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1365             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1366                 /* Check for rfkill but allow the HCI setup stage to
1367                  * proceed (which in itself doesn't cause any RF activity).
1368                  */
1369                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1370                         ret = -ERFKILL;
1371                         goto done;
1372                 }
1373 
1374                 /* Check for valid public address or a configured static
1375                  * random adddress, but let the HCI setup proceed to
1376                  * be able to determine if there is a public address
1377                  * or not.
1378                  *
1379                  * In case of user channel usage, it is not important
1380                  * if a public address or static random address is
1381                  * available.
1382                  *
1383                  * This check is only valid for BR/EDR controllers
1384                  * since AMP controllers do not have an address.
1385                  */
1386                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1387                     hdev->dev_type == HCI_BREDR &&
1388                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1389                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1390                         ret = -EADDRNOTAVAIL;
1391                         goto done;
1392                 }
1393         }
1394 
1395         if (test_bit(HCI_UP, &hdev->flags)) {
1396                 ret = -EALREADY;
1397                 goto done;
1398         }
1399 
1400         if (hdev->open(hdev)) {
1401                 ret = -EIO;
1402                 goto done;
1403         }
1404 
1405         atomic_set(&hdev->cmd_cnt, 1);
1406         set_bit(HCI_INIT, &hdev->flags);
1407 
1408         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1409                 if (hdev->setup)
1410                         ret = hdev->setup(hdev);
1411 
1412                 /* The transport driver can set these quirks before
1413                  * creating the HCI device or in its setup callback.
1414                  *
1415                  * In case any of them is set, the controller has to
1416                  * start up as unconfigured.
1417                  */
1418                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1419                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1420                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1421 
1422                 /* For an unconfigured controller it is required to
1423                  * read at least the version information provided by
1424                  * the Read Local Version Information command.
1425                  *
1426                  * If the set_bdaddr driver callback is provided, then
1427                  * also the original Bluetooth public device address
1428                  * will be read using the Read BD Address command.
1429                  */
1430                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1431                         ret = __hci_unconf_init(hdev);
1432         }
1433 
1434         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1435                 /* If public address change is configured, ensure that
1436                  * the address gets programmed. If the driver does not
1437                  * support changing the public address, fail the power
1438                  * on procedure.
1439                  */
1440                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1441                     hdev->set_bdaddr)
1442                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1443                 else
1444                         ret = -EADDRNOTAVAIL;
1445         }
1446 
1447         if (!ret) {
1448                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1449                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1450                         ret = __hci_init(hdev);
1451         }
1452 
1453         clear_bit(HCI_INIT, &hdev->flags);
1454 
1455         if (!ret) {
1456                 hci_dev_hold(hdev);
1457                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1458                 set_bit(HCI_UP, &hdev->flags);
1459                 hci_notify(hdev, HCI_DEV_UP);
1460                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1461                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1462                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1463                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1464                     hdev->dev_type == HCI_BREDR) {
1465                         hci_dev_lock(hdev);
1466                         mgmt_powered(hdev, 1);
1467                         hci_dev_unlock(hdev);
1468                 }
1469         } else {
1470                 /* Init failed, cleanup */
1471                 flush_work(&hdev->tx_work);
1472                 flush_work(&hdev->cmd_work);
1473                 flush_work(&hdev->rx_work);
1474 
1475                 skb_queue_purge(&hdev->cmd_q);
1476                 skb_queue_purge(&hdev->rx_q);
1477 
1478                 if (hdev->flush)
1479                         hdev->flush(hdev);
1480 
1481                 if (hdev->sent_cmd) {
1482                         kfree_skb(hdev->sent_cmd);
1483                         hdev->sent_cmd = NULL;
1484                 }
1485 
1486                 hdev->close(hdev);
1487                 hdev->flags &= BIT(HCI_RAW);
1488         }
1489 
1490 done:
1491         hci_req_unlock(hdev);
1492         return ret;
1493 }
1494 
1495 /* ---- HCI ioctl helpers ---- */
1496 
1497 int hci_dev_open(__u16 dev)
1498 {
1499         struct hci_dev *hdev;
1500         int err;
1501 
1502         hdev = hci_dev_get(dev);
1503         if (!hdev)
1504                 return -ENODEV;
1505 
1506         /* Devices that are marked as unconfigured can only be powered
1507          * up as user channel. Trying to bring them up as normal devices
1508          * will result into a failure. Only user channel operation is
1509          * possible.
1510          *
1511          * When this function is called for a user channel, the flag
1512          * HCI_USER_CHANNEL will be set first before attempting to
1513          * open the device.
1514          */
1515         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1516             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1517                 err = -EOPNOTSUPP;
1518                 goto done;
1519         }
1520 
1521         /* We need to ensure that no other power on/off work is pending
1522          * before proceeding to call hci_dev_do_open. This is
1523          * particularly important if the setup procedure has not yet
1524          * completed.
1525          */
1526         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1527                 cancel_delayed_work(&hdev->power_off);
1528 
1529         /* After this call it is guaranteed that the setup procedure
1530          * has finished. This means that error conditions like RFKILL
1531          * or no valid public or static random address apply.
1532          */
1533         flush_workqueue(hdev->req_workqueue);
1534 
1535         /* For controllers not using the management interface and that
1536          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1537          * so that pairing works for them. Once the management interface
1538          * is in use this bit will be cleared again and userspace has
1539          * to explicitly enable it.
1540          */
1541         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1542             !hci_dev_test_flag(hdev, HCI_MGMT))
1543                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1544 
1545         err = hci_dev_do_open(hdev);
1546 
1547 done:
1548         hci_dev_put(hdev);
1549         return err;
1550 }
1551 
1552 /* This function requires the caller holds hdev->lock */
1553 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1554 {
1555         struct hci_conn_params *p;
1556 
1557         list_for_each_entry(p, &hdev->le_conn_params, list) {
1558                 if (p->conn) {
1559                         hci_conn_drop(p->conn);
1560                         hci_conn_put(p->conn);
1561                         p->conn = NULL;
1562                 }
1563                 list_del_init(&p->action);
1564         }
1565 
1566         BT_DBG("All LE pending actions cleared");
1567 }
1568 
1569 static int hci_dev_do_close(struct hci_dev *hdev)
1570 {
1571         BT_DBG("%s %p", hdev->name, hdev);
1572 
1573         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1574             test_bit(HCI_UP, &hdev->flags)) {
1575                 /* Execute vendor specific shutdown routine */
1576                 if (hdev->shutdown)
1577                         hdev->shutdown(hdev);
1578         }
1579 
1580         cancel_delayed_work(&hdev->power_off);
1581 
1582         hci_req_cancel(hdev, ENODEV);
1583         hci_req_lock(hdev);
1584 
1585         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1586                 cancel_delayed_work_sync(&hdev->cmd_timer);
1587                 hci_req_unlock(hdev);
1588                 return 0;
1589         }
1590 
1591         /* Flush RX and TX works */
1592         flush_work(&hdev->tx_work);
1593         flush_work(&hdev->rx_work);
1594 
1595         if (hdev->discov_timeout > 0) {
1596                 cancel_delayed_work(&hdev->discov_off);
1597                 hdev->discov_timeout = 0;
1598                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1599                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1600         }
1601 
1602         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1603                 cancel_delayed_work(&hdev->service_cache);
1604 
1605         cancel_delayed_work_sync(&hdev->le_scan_disable);
1606         cancel_delayed_work_sync(&hdev->le_scan_restart);
1607 
1608         if (hci_dev_test_flag(hdev, HCI_MGMT))
1609                 cancel_delayed_work_sync(&hdev->rpa_expired);
1610 
1611         /* Avoid potential lockdep warnings from the *_flush() calls by
1612          * ensuring the workqueue is empty up front.
1613          */
1614         drain_workqueue(hdev->workqueue);
1615 
1616         hci_dev_lock(hdev);
1617 
1618         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1619 
1620         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1621                 if (hdev->dev_type == HCI_BREDR)
1622                         mgmt_powered(hdev, 0);
1623         }
1624 
1625         hci_inquiry_cache_flush(hdev);
1626         hci_pend_le_actions_clear(hdev);
1627         hci_conn_hash_flush(hdev);
1628         hci_dev_unlock(hdev);
1629 
1630         smp_unregister(hdev);
1631 
1632         hci_notify(hdev, HCI_DEV_DOWN);
1633 
1634         if (hdev->flush)
1635                 hdev->flush(hdev);
1636 
1637         /* Reset device */
1638         skb_queue_purge(&hdev->cmd_q);
1639         atomic_set(&hdev->cmd_cnt, 1);
1640         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1641             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1642             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1643                 set_bit(HCI_INIT, &hdev->flags);
1644                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1645                 clear_bit(HCI_INIT, &hdev->flags);
1646         }
1647 
1648         /* flush cmd  work */
1649         flush_work(&hdev->cmd_work);
1650 
1651         /* Drop queues */
1652         skb_queue_purge(&hdev->rx_q);
1653         skb_queue_purge(&hdev->cmd_q);
1654         skb_queue_purge(&hdev->raw_q);
1655 
1656         /* Drop last sent command */
1657         if (hdev->sent_cmd) {
1658                 cancel_delayed_work_sync(&hdev->cmd_timer);
1659                 kfree_skb(hdev->sent_cmd);
1660                 hdev->sent_cmd = NULL;
1661         }
1662 
1663         /* After this point our queues are empty
1664          * and no tasks are scheduled. */
1665         hdev->close(hdev);
1666 
1667         /* Clear flags */
1668         hdev->flags &= BIT(HCI_RAW);
1669         hci_dev_clear_volatile_flags(hdev);
1670 
1671         /* Controller radio is available but is currently powered down */
1672         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1673 
1674         memset(hdev->eir, 0, sizeof(hdev->eir));
1675         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1676         bacpy(&hdev->random_addr, BDADDR_ANY);
1677 
1678         hci_req_unlock(hdev);
1679 
1680         hci_dev_put(hdev);
1681         return 0;
1682 }
1683 
1684 int hci_dev_close(__u16 dev)
1685 {
1686         struct hci_dev *hdev;
1687         int err;
1688 
1689         hdev = hci_dev_get(dev);
1690         if (!hdev)
1691                 return -ENODEV;
1692 
1693         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1694                 err = -EBUSY;
1695                 goto done;
1696         }
1697 
1698         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1699                 cancel_delayed_work(&hdev->power_off);
1700 
1701         err = hci_dev_do_close(hdev);
1702 
1703 done:
1704         hci_dev_put(hdev);
1705         return err;
1706 }
1707 
1708 static int hci_dev_do_reset(struct hci_dev *hdev)
1709 {
1710         int ret;
1711 
1712         BT_DBG("%s %p", hdev->name, hdev);
1713 
1714         hci_req_lock(hdev);
1715 
1716         /* Drop queues */
1717         skb_queue_purge(&hdev->rx_q);
1718         skb_queue_purge(&hdev->cmd_q);
1719 
1720         /* Avoid potential lockdep warnings from the *_flush() calls by
1721          * ensuring the workqueue is empty up front.
1722          */
1723         drain_workqueue(hdev->workqueue);
1724 
1725         hci_dev_lock(hdev);
1726         hci_inquiry_cache_flush(hdev);
1727         hci_conn_hash_flush(hdev);
1728         hci_dev_unlock(hdev);
1729 
1730         if (hdev->flush)
1731                 hdev->flush(hdev);
1732 
1733         atomic_set(&hdev->cmd_cnt, 1);
1734         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1735 
1736         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1737 
1738         hci_req_unlock(hdev);
1739         return ret;
1740 }
1741 
1742 int hci_dev_reset(__u16 dev)
1743 {
1744         struct hci_dev *hdev;
1745         int err;
1746 
1747         hdev = hci_dev_get(dev);
1748         if (!hdev)
1749                 return -ENODEV;
1750 
1751         if (!test_bit(HCI_UP, &hdev->flags)) {
1752                 err = -ENETDOWN;
1753                 goto done;
1754         }
1755 
1756         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1757                 err = -EBUSY;
1758                 goto done;
1759         }
1760 
1761         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1762                 err = -EOPNOTSUPP;
1763                 goto done;
1764         }
1765 
1766         err = hci_dev_do_reset(hdev);
1767 
1768 done:
1769         hci_dev_put(hdev);
1770         return err;
1771 }
1772 
1773 int hci_dev_reset_stat(__u16 dev)
1774 {
1775         struct hci_dev *hdev;
1776         int ret = 0;
1777 
1778         hdev = hci_dev_get(dev);
1779         if (!hdev)
1780                 return -ENODEV;
1781 
1782         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1783                 ret = -EBUSY;
1784                 goto done;
1785         }
1786 
1787         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1788                 ret = -EOPNOTSUPP;
1789                 goto done;
1790         }
1791 
1792         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1793 
1794 done:
1795         hci_dev_put(hdev);
1796         return ret;
1797 }
1798 
1799 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1800 {
1801         bool conn_changed, discov_changed;
1802 
1803         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1804 
1805         if ((scan & SCAN_PAGE))
1806                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1807                                                           HCI_CONNECTABLE);
1808         else
1809                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1810                                                            HCI_CONNECTABLE);
1811 
1812         if ((scan & SCAN_INQUIRY)) {
1813                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1814                                                             HCI_DISCOVERABLE);
1815         } else {
1816                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1817                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1818                                                              HCI_DISCOVERABLE);
1819         }
1820 
1821         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1822                 return;
1823 
1824         if (conn_changed || discov_changed) {
1825                 /* In case this was disabled through mgmt */
1826                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1827 
1828                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1829                         mgmt_update_adv_data(hdev);
1830 
1831                 mgmt_new_settings(hdev);
1832         }
1833 }
1834 
1835 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1836 {
1837         struct hci_dev *hdev;
1838         struct hci_dev_req dr;
1839         int err = 0;
1840 
1841         if (copy_from_user(&dr, arg, sizeof(dr)))
1842                 return -EFAULT;
1843 
1844         hdev = hci_dev_get(dr.dev_id);
1845         if (!hdev)
1846                 return -ENODEV;
1847 
1848         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1849                 err = -EBUSY;
1850                 goto done;
1851         }
1852 
1853         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1854                 err = -EOPNOTSUPP;
1855                 goto done;
1856         }
1857 
1858         if (hdev->dev_type != HCI_BREDR) {
1859                 err = -EOPNOTSUPP;
1860                 goto done;
1861         }
1862 
1863         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1864                 err = -EOPNOTSUPP;
1865                 goto done;
1866         }
1867 
1868         switch (cmd) {
1869         case HCISETAUTH:
1870                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871                                    HCI_INIT_TIMEOUT);
1872                 break;
1873 
1874         case HCISETENCRYPT:
1875                 if (!lmp_encrypt_capable(hdev)) {
1876                         err = -EOPNOTSUPP;
1877                         break;
1878                 }
1879 
1880                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1881                         /* Auth must be enabled first */
1882                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1883                                            HCI_INIT_TIMEOUT);
1884                         if (err)
1885                                 break;
1886                 }
1887 
1888                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1889                                    HCI_INIT_TIMEOUT);
1890                 break;
1891 
1892         case HCISETSCAN:
1893                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1894                                    HCI_INIT_TIMEOUT);
1895 
1896                 /* Ensure that the connectable and discoverable states
1897                  * get correctly modified as this was a non-mgmt change.
1898                  */
1899                 if (!err)
1900                         hci_update_scan_state(hdev, dr.dev_opt);
1901                 break;
1902 
1903         case HCISETLINKPOL:
1904                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1905                                    HCI_INIT_TIMEOUT);
1906                 break;
1907 
1908         case HCISETLINKMODE:
1909                 hdev->link_mode = ((__u16) dr.dev_opt) &
1910                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1911                 break;
1912 
1913         case HCISETPTYPE:
1914                 hdev->pkt_type = (__u16) dr.dev_opt;
1915                 break;
1916 
1917         case HCISETACLMTU:
1918                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1919                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1920                 break;
1921 
1922         case HCISETSCOMTU:
1923                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1924                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1925                 break;
1926 
1927         default:
1928                 err = -EINVAL;
1929                 break;
1930         }
1931 
1932 done:
1933         hci_dev_put(hdev);
1934         return err;
1935 }
1936 
1937 int hci_get_dev_list(void __user *arg)
1938 {
1939         struct hci_dev *hdev;
1940         struct hci_dev_list_req *dl;
1941         struct hci_dev_req *dr;
1942         int n = 0, size, err;
1943         __u16 dev_num;
1944 
1945         if (get_user(dev_num, (__u16 __user *) arg))
1946                 return -EFAULT;
1947 
1948         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1949                 return -EINVAL;
1950 
1951         size = sizeof(*dl) + dev_num * sizeof(*dr);
1952 
1953         dl = kzalloc(size, GFP_KERNEL);
1954         if (!dl)
1955                 return -ENOMEM;
1956 
1957         dr = dl->dev_req;
1958 
1959         read_lock(&hci_dev_list_lock);
1960         list_for_each_entry(hdev, &hci_dev_list, list) {
1961                 unsigned long flags = hdev->flags;
1962 
1963                 /* When the auto-off is configured it means the transport
1964                  * is running, but in that case still indicate that the
1965                  * device is actually down.
1966                  */
1967                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1968                         flags &= ~BIT(HCI_UP);
1969 
1970                 (dr + n)->dev_id  = hdev->id;
1971                 (dr + n)->dev_opt = flags;
1972 
1973                 if (++n >= dev_num)
1974                         break;
1975         }
1976         read_unlock(&hci_dev_list_lock);
1977 
1978         dl->dev_num = n;
1979         size = sizeof(*dl) + n * sizeof(*dr);
1980 
1981         err = copy_to_user(arg, dl, size);
1982         kfree(dl);
1983 
1984         return err ? -EFAULT : 0;
1985 }
1986 
1987 int hci_get_dev_info(void __user *arg)
1988 {
1989         struct hci_dev *hdev;
1990         struct hci_dev_info di;
1991         unsigned long flags;
1992         int err = 0;
1993 
1994         if (copy_from_user(&di, arg, sizeof(di)))
1995                 return -EFAULT;
1996 
1997         hdev = hci_dev_get(di.dev_id);
1998         if (!hdev)
1999                 return -ENODEV;
2000 
2001         /* When the auto-off is configured it means the transport
2002          * is running, but in that case still indicate that the
2003          * device is actually down.
2004          */
2005         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2006                 flags = hdev->flags & ~BIT(HCI_UP);
2007         else
2008                 flags = hdev->flags;
2009 
2010         strcpy(di.name, hdev->name);
2011         di.bdaddr   = hdev->bdaddr;
2012         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2013         di.flags    = flags;
2014         di.pkt_type = hdev->pkt_type;
2015         if (lmp_bredr_capable(hdev)) {
2016                 di.acl_mtu  = hdev->acl_mtu;
2017                 di.acl_pkts = hdev->acl_pkts;
2018                 di.sco_mtu  = hdev->sco_mtu;
2019                 di.sco_pkts = hdev->sco_pkts;
2020         } else {
2021                 di.acl_mtu  = hdev->le_mtu;
2022                 di.acl_pkts = hdev->le_pkts;
2023                 di.sco_mtu  = 0;
2024                 di.sco_pkts = 0;
2025         }
2026         di.link_policy = hdev->link_policy;
2027         di.link_mode   = hdev->link_mode;
2028 
2029         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2030         memcpy(&di.features, &hdev->features, sizeof(di.features));
2031 
2032         if (copy_to_user(arg, &di, sizeof(di)))
2033                 err = -EFAULT;
2034 
2035         hci_dev_put(hdev);
2036 
2037         return err;
2038 }
2039 
2040 /* ---- Interface to HCI drivers ---- */
2041 
2042 static int hci_rfkill_set_block(void *data, bool blocked)
2043 {
2044         struct hci_dev *hdev = data;
2045 
2046         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2047 
2048         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2049                 return -EBUSY;
2050 
2051         if (blocked) {
2052                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2053                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2054                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2055                         hci_dev_do_close(hdev);
2056         } else {
2057                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2058         }
2059 
2060         return 0;
2061 }
2062 
2063 static const struct rfkill_ops hci_rfkill_ops = {
2064         .set_block = hci_rfkill_set_block,
2065 };
2066 
2067 static void hci_power_on(struct work_struct *work)
2068 {
2069         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2070         int err;
2071 
2072         BT_DBG("%s", hdev->name);
2073 
2074         err = hci_dev_do_open(hdev);
2075         if (err < 0) {
2076                 hci_dev_lock(hdev);
2077                 mgmt_set_powered_failed(hdev, err);
2078                 hci_dev_unlock(hdev);
2079                 return;
2080         }
2081 
2082         /* During the HCI setup phase, a few error conditions are
2083          * ignored and they need to be checked now. If they are still
2084          * valid, it is important to turn the device back off.
2085          */
2086         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2087             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2088             (hdev->dev_type == HCI_BREDR &&
2089              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2090              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2091                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2092                 hci_dev_do_close(hdev);
2093         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2094                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2095                                    HCI_AUTO_OFF_TIMEOUT);
2096         }
2097 
2098         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2099                 /* For unconfigured devices, set the HCI_RAW flag
2100                  * so that userspace can easily identify them.
2101                  */
2102                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2103                         set_bit(HCI_RAW, &hdev->flags);
2104 
2105                 /* For fully configured devices, this will send
2106                  * the Index Added event. For unconfigured devices,
2107                  * it will send Unconfigued Index Added event.
2108                  *
2109                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2110                  * and no event will be send.
2111                  */
2112                 mgmt_index_added(hdev);
2113         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2114                 /* When the controller is now configured, then it
2115                  * is important to clear the HCI_RAW flag.
2116                  */
2117                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2118                         clear_bit(HCI_RAW, &hdev->flags);
2119 
2120                 /* Powering on the controller with HCI_CONFIG set only
2121                  * happens with the transition from unconfigured to
2122                  * configured. This will send the Index Added event.
2123                  */
2124                 mgmt_index_added(hdev);
2125         }
2126 }
2127 
2128 static void hci_power_off(struct work_struct *work)
2129 {
2130         struct hci_dev *hdev = container_of(work, struct hci_dev,
2131                                             power_off.work);
2132 
2133         BT_DBG("%s", hdev->name);
2134 
2135         hci_dev_do_close(hdev);
2136 }
2137 
2138 static void hci_error_reset(struct work_struct *work)
2139 {
2140         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2141 
2142         BT_DBG("%s", hdev->name);
2143 
2144         if (hdev->hw_error)
2145                 hdev->hw_error(hdev, hdev->hw_error_code);
2146         else
2147                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2148                        hdev->hw_error_code);
2149 
2150         if (hci_dev_do_close(hdev))
2151                 return;
2152 
2153         hci_dev_do_open(hdev);
2154 }
2155 
2156 static void hci_discov_off(struct work_struct *work)
2157 {
2158         struct hci_dev *hdev;
2159 
2160         hdev = container_of(work, struct hci_dev, discov_off.work);
2161 
2162         BT_DBG("%s", hdev->name);
2163 
2164         mgmt_discoverable_timeout(hdev);
2165 }
2166 
2167 void hci_uuids_clear(struct hci_dev *hdev)
2168 {
2169         struct bt_uuid *uuid, *tmp;
2170 
2171         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2172                 list_del(&uuid->list);
2173                 kfree(uuid);
2174         }
2175 }
2176 
2177 void hci_link_keys_clear(struct hci_dev *hdev)
2178 {
2179         struct link_key *key;
2180 
2181         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2182                 list_del_rcu(&key->list);
2183                 kfree_rcu(key, rcu);
2184         }
2185 }
2186 
2187 void hci_smp_ltks_clear(struct hci_dev *hdev)
2188 {
2189         struct smp_ltk *k;
2190 
2191         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2192                 list_del_rcu(&k->list);
2193                 kfree_rcu(k, rcu);
2194         }
2195 }
2196 
2197 void hci_smp_irks_clear(struct hci_dev *hdev)
2198 {
2199         struct smp_irk *k;
2200 
2201         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2202                 list_del_rcu(&k->list);
2203                 kfree_rcu(k, rcu);
2204         }
2205 }
2206 
2207 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2208 {
2209         struct link_key *k;
2210 
2211         rcu_read_lock();
2212         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2213                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2214                         rcu_read_unlock();
2215                         return k;
2216                 }
2217         }
2218         rcu_read_unlock();
2219 
2220         return NULL;
2221 }
2222 
2223 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2224                                u8 key_type, u8 old_key_type)
2225 {
2226         /* Legacy key */
2227         if (key_type < 0x03)
2228                 return true;
2229 
2230         /* Debug keys are insecure so don't store them persistently */
2231         if (key_type == HCI_LK_DEBUG_COMBINATION)
2232                 return false;
2233 
2234         /* Changed combination key and there's no previous one */
2235         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2236                 return false;
2237 
2238         /* Security mode 3 case */
2239         if (!conn)
2240                 return true;
2241 
2242         /* BR/EDR key derived using SC from an LE link */
2243         if (conn->type == LE_LINK)
2244                 return true;
2245 
2246         /* Neither local nor remote side had no-bonding as requirement */
2247         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2248                 return true;
2249 
2250         /* Local side had dedicated bonding as requirement */
2251         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2252                 return true;
2253 
2254         /* Remote side had dedicated bonding as requirement */
2255         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2256                 return true;
2257 
2258         /* If none of the above criteria match, then don't store the key
2259          * persistently */
2260         return false;
2261 }
2262 
2263 static u8 ltk_role(u8 type)
2264 {
2265         if (type == SMP_LTK)
2266                 return HCI_ROLE_MASTER;
2267 
2268         return HCI_ROLE_SLAVE;
2269 }
2270 
2271 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2272                              u8 addr_type, u8 role)
2273 {
2274         struct smp_ltk *k;
2275 
2276         rcu_read_lock();
2277         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2278                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2279                         continue;
2280 
2281                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2282                         rcu_read_unlock();
2283                         return k;
2284                 }
2285         }
2286         rcu_read_unlock();
2287 
2288         return NULL;
2289 }
2290 
2291 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2292 {
2293         struct smp_irk *irk;
2294 
2295         rcu_read_lock();
2296         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2297                 if (!bacmp(&irk->rpa, rpa)) {
2298                         rcu_read_unlock();
2299                         return irk;
2300                 }
2301         }
2302 
2303         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2304                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2305                         bacpy(&irk->rpa, rpa);
2306                         rcu_read_unlock();
2307                         return irk;
2308                 }
2309         }
2310         rcu_read_unlock();
2311 
2312         return NULL;
2313 }
2314 
2315 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2316                                      u8 addr_type)
2317 {
2318         struct smp_irk *irk;
2319 
2320         /* Identity Address must be public or static random */
2321         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2322                 return NULL;
2323 
2324         rcu_read_lock();
2325         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2326                 if (addr_type == irk->addr_type &&
2327                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2328                         rcu_read_unlock();
2329                         return irk;
2330                 }
2331         }
2332         rcu_read_unlock();
2333 
2334         return NULL;
2335 }
2336 
2337 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2338                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2339                                   u8 pin_len, bool *persistent)
2340 {
2341         struct link_key *key, *old_key;
2342         u8 old_key_type;
2343 
2344         old_key = hci_find_link_key(hdev, bdaddr);
2345         if (old_key) {
2346                 old_key_type = old_key->type;
2347                 key = old_key;
2348         } else {
2349                 old_key_type = conn ? conn->key_type : 0xff;
2350                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2351                 if (!key)
2352                         return NULL;
2353                 list_add_rcu(&key->list, &hdev->link_keys);
2354         }
2355 
2356         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2357 
2358         /* Some buggy controller combinations generate a changed
2359          * combination key for legacy pairing even when there's no
2360          * previous key */
2361         if (type == HCI_LK_CHANGED_COMBINATION &&
2362             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2363                 type = HCI_LK_COMBINATION;
2364                 if (conn)
2365                         conn->key_type = type;
2366         }
2367 
2368         bacpy(&key->bdaddr, bdaddr);
2369         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2370         key->pin_len = pin_len;
2371 
2372         if (type == HCI_LK_CHANGED_COMBINATION)
2373                 key->type = old_key_type;
2374         else
2375                 key->type = type;
2376 
2377         if (persistent)
2378                 *persistent = hci_persistent_key(hdev, conn, type,
2379                                                  old_key_type);
2380 
2381         return key;
2382 }
2383 
2384 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2385                             u8 addr_type, u8 type, u8 authenticated,
2386                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2387 {
2388         struct smp_ltk *key, *old_key;
2389         u8 role = ltk_role(type);
2390 
2391         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2392         if (old_key)
2393                 key = old_key;
2394         else {
2395                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2396                 if (!key)
2397                         return NULL;
2398                 list_add_rcu(&key->list, &hdev->long_term_keys);
2399         }
2400 
2401         bacpy(&key->bdaddr, bdaddr);
2402         key->bdaddr_type = addr_type;
2403         memcpy(key->val, tk, sizeof(key->val));
2404         key->authenticated = authenticated;
2405         key->ediv = ediv;
2406         key->rand = rand;
2407         key->enc_size = enc_size;
2408         key->type = type;
2409 
2410         return key;
2411 }
2412 
2413 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2414                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2415 {
2416         struct smp_irk *irk;
2417 
2418         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2419         if (!irk) {
2420                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2421                 if (!irk)
2422                         return NULL;
2423 
2424                 bacpy(&irk->bdaddr, bdaddr);
2425                 irk->addr_type = addr_type;
2426 
2427                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2428         }
2429 
2430         memcpy(irk->val, val, 16);
2431         bacpy(&irk->rpa, rpa);
2432 
2433         return irk;
2434 }
2435 
2436 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2437 {
2438         struct link_key *key;
2439 
2440         key = hci_find_link_key(hdev, bdaddr);
2441         if (!key)
2442                 return -ENOENT;
2443 
2444         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445 
2446         list_del_rcu(&key->list);
2447         kfree_rcu(key, rcu);
2448 
2449         return 0;
2450 }
2451 
2452 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2453 {
2454         struct smp_ltk *k;
2455         int removed = 0;
2456 
2457         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2458                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2459                         continue;
2460 
2461                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462 
2463                 list_del_rcu(&k->list);
2464                 kfree_rcu(k, rcu);
2465                 removed++;
2466         }
2467 
2468         return removed ? 0 : -ENOENT;
2469 }
2470 
2471 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2472 {
2473         struct smp_irk *k;
2474 
2475         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2476                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2477                         continue;
2478 
2479                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2480 
2481                 list_del_rcu(&k->list);
2482                 kfree_rcu(k, rcu);
2483         }
2484 }
2485 
2486 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2487 {
2488         struct smp_ltk *k;
2489         struct smp_irk *irk;
2490         u8 addr_type;
2491 
2492         if (type == BDADDR_BREDR) {
2493                 if (hci_find_link_key(hdev, bdaddr))
2494                         return true;
2495                 return false;
2496         }
2497 
2498         /* Convert to HCI addr type which struct smp_ltk uses */
2499         if (type == BDADDR_LE_PUBLIC)
2500                 addr_type = ADDR_LE_DEV_PUBLIC;
2501         else
2502                 addr_type = ADDR_LE_DEV_RANDOM;
2503 
2504         irk = hci_get_irk(hdev, bdaddr, addr_type);
2505         if (irk) {
2506                 bdaddr = &irk->bdaddr;
2507                 addr_type = irk->addr_type;
2508         }
2509 
2510         rcu_read_lock();
2511         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2512                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2513                         rcu_read_unlock();
2514                         return true;
2515                 }
2516         }
2517         rcu_read_unlock();
2518 
2519         return false;
2520 }
2521 
2522 /* HCI command timer function */
2523 static void hci_cmd_timeout(struct work_struct *work)
2524 {
2525         struct hci_dev *hdev = container_of(work, struct hci_dev,
2526                                             cmd_timer.work);
2527 
2528         if (hdev->sent_cmd) {
2529                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2530                 u16 opcode = __le16_to_cpu(sent->opcode);
2531 
2532                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2533         } else {
2534                 BT_ERR("%s command tx timeout", hdev->name);
2535         }
2536 
2537         atomic_set(&hdev->cmd_cnt, 1);
2538         queue_work(hdev->workqueue, &hdev->cmd_work);
2539 }
2540 
2541 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2542                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2543 {
2544         struct oob_data *data;
2545 
2546         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2547                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2548                         continue;
2549                 if (data->bdaddr_type != bdaddr_type)
2550                         continue;
2551                 return data;
2552         }
2553 
2554         return NULL;
2555 }
2556 
2557 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2558                                u8 bdaddr_type)
2559 {
2560         struct oob_data *data;
2561 
2562         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2563         if (!data)
2564                 return -ENOENT;
2565 
2566         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2567 
2568         list_del(&data->list);
2569         kfree(data);
2570 
2571         return 0;
2572 }
2573 
2574 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2575 {
2576         struct oob_data *data, *n;
2577 
2578         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2579                 list_del(&data->list);
2580                 kfree(data);
2581         }
2582 }
2583 
2584 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2585                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2586                             u8 *hash256, u8 *rand256)
2587 {
2588         struct oob_data *data;
2589 
2590         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2591         if (!data) {
2592                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2593                 if (!data)
2594                         return -ENOMEM;
2595 
2596                 bacpy(&data->bdaddr, bdaddr);
2597                 data->bdaddr_type = bdaddr_type;
2598                 list_add(&data->list, &hdev->remote_oob_data);
2599         }
2600 
2601         if (hash192 && rand192) {
2602                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2603                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2604                 if (hash256 && rand256)
2605                         data->present = 0x03;
2606         } else {
2607                 memset(data->hash192, 0, sizeof(data->hash192));
2608                 memset(data->rand192, 0, sizeof(data->rand192));
2609                 if (hash256 && rand256)
2610                         data->present = 0x02;
2611                 else
2612                         data->present = 0x00;
2613         }
2614 
2615         if (hash256 && rand256) {
2616                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2617                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2618         } else {
2619                 memset(data->hash256, 0, sizeof(data->hash256));
2620                 memset(data->rand256, 0, sizeof(data->rand256));
2621                 if (hash192 && rand192)
2622                         data->present = 0x01;
2623         }
2624 
2625         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2626 
2627         return 0;
2628 }
2629 
2630 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2631                                          bdaddr_t *bdaddr, u8 type)
2632 {
2633         struct bdaddr_list *b;
2634 
2635         list_for_each_entry(b, bdaddr_list, list) {
2636                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2637                         return b;
2638         }
2639 
2640         return NULL;
2641 }
2642 
2643 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2644 {
2645         struct list_head *p, *n;
2646 
2647         list_for_each_safe(p, n, bdaddr_list) {
2648                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2649 
2650                 list_del(p);
2651                 kfree(b);
2652         }
2653 }
2654 
2655 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2656 {
2657         struct bdaddr_list *entry;
2658 
2659         if (!bacmp(bdaddr, BDADDR_ANY))
2660                 return -EBADF;
2661 
2662         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2663                 return -EEXIST;
2664 
2665         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2666         if (!entry)
2667                 return -ENOMEM;
2668 
2669         bacpy(&entry->bdaddr, bdaddr);
2670         entry->bdaddr_type = type;
2671 
2672         list_add(&entry->list, list);
2673 
2674         return 0;
2675 }
2676 
2677 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2678 {
2679         struct bdaddr_list *entry;
2680 
2681         if (!bacmp(bdaddr, BDADDR_ANY)) {
2682                 hci_bdaddr_list_clear(list);
2683                 return 0;
2684         }
2685 
2686         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2687         if (!entry)
2688                 return -ENOENT;
2689 
2690         list_del(&entry->list);
2691         kfree(entry);
2692 
2693         return 0;
2694 }
2695 
2696 /* This function requires the caller holds hdev->lock */
2697 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2698                                                bdaddr_t *addr, u8 addr_type)
2699 {
2700         struct hci_conn_params *params;
2701 
2702         /* The conn params list only contains identity addresses */
2703         if (!hci_is_identity_address(addr, addr_type))
2704                 return NULL;
2705 
2706         list_for_each_entry(params, &hdev->le_conn_params, list) {
2707                 if (bacmp(&params->addr, addr) == 0 &&
2708                     params->addr_type == addr_type) {
2709                         return params;
2710                 }
2711         }
2712 
2713         return NULL;
2714 }
2715 
2716 /* This function requires the caller holds hdev->lock */
2717 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2718                                                   bdaddr_t *addr, u8 addr_type)
2719 {
2720         struct hci_conn_params *param;
2721 
2722         /* The list only contains identity addresses */
2723         if (!hci_is_identity_address(addr, addr_type))
2724                 return NULL;
2725 
2726         list_for_each_entry(param, list, action) {
2727                 if (bacmp(&param->addr, addr) == 0 &&
2728                     param->addr_type == addr_type)
2729                         return param;
2730         }
2731 
2732         return NULL;
2733 }
2734 
2735 /* This function requires the caller holds hdev->lock */
2736 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2737                                             bdaddr_t *addr, u8 addr_type)
2738 {
2739         struct hci_conn_params *params;
2740 
2741         if (!hci_is_identity_address(addr, addr_type))
2742                 return NULL;
2743 
2744         params = hci_conn_params_lookup(hdev, addr, addr_type);
2745         if (params)
2746                 return params;
2747 
2748         params = kzalloc(sizeof(*params), GFP_KERNEL);
2749         if (!params) {
2750                 BT_ERR("Out of memory");
2751                 return NULL;
2752         }
2753 
2754         bacpy(&params->addr, addr);
2755         params->addr_type = addr_type;
2756 
2757         list_add(&params->list, &hdev->le_conn_params);
2758         INIT_LIST_HEAD(&params->action);
2759 
2760         params->conn_min_interval = hdev->le_conn_min_interval;
2761         params->conn_max_interval = hdev->le_conn_max_interval;
2762         params->conn_latency = hdev->le_conn_latency;
2763         params->supervision_timeout = hdev->le_supv_timeout;
2764         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2765 
2766         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2767 
2768         return params;
2769 }
2770 
2771 static void hci_conn_params_free(struct hci_conn_params *params)
2772 {
2773         if (params->conn) {
2774                 hci_conn_drop(params->conn);
2775                 hci_conn_put(params->conn);
2776         }
2777 
2778         list_del(&params->action);
2779         list_del(&params->list);
2780         kfree(params);
2781 }
2782 
2783 /* This function requires the caller holds hdev->lock */
2784 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2785 {
2786         struct hci_conn_params *params;
2787 
2788         params = hci_conn_params_lookup(hdev, addr, addr_type);
2789         if (!params)
2790                 return;
2791 
2792         hci_conn_params_free(params);
2793 
2794         hci_update_background_scan(hdev);
2795 
2796         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2797 }
2798 
2799 /* This function requires the caller holds hdev->lock */
2800 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2801 {
2802         struct hci_conn_params *params, *tmp;
2803 
2804         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2805                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2806                         continue;
2807                 list_del(&params->list);
2808                 kfree(params);
2809         }
2810 
2811         BT_DBG("All LE disabled connection parameters were removed");
2812 }
2813 
2814 /* This function requires the caller holds hdev->lock */
2815 void hci_conn_params_clear_all(struct hci_dev *hdev)
2816 {
2817         struct hci_conn_params *params, *tmp;
2818 
2819         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2820                 hci_conn_params_free(params);
2821 
2822         hci_update_background_scan(hdev);
2823 
2824         BT_DBG("All LE connection parameters were removed");
2825 }
2826 
2827 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2828 {
2829         if (status) {
2830                 BT_ERR("Failed to start inquiry: status %d", status);
2831 
2832                 hci_dev_lock(hdev);
2833                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2834                 hci_dev_unlock(hdev);
2835                 return;
2836         }
2837 }
2838 
2839 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2840                                           u16 opcode)
2841 {
2842         /* General inquiry access code (GIAC) */
2843         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2844         struct hci_cp_inquiry cp;
2845         int err;
2846 
2847         if (status) {
2848                 BT_ERR("Failed to disable LE scanning: status %d", status);
2849                 return;
2850         }
2851 
2852         hdev->discovery.scan_start = 0;
2853 
2854         switch (hdev->discovery.type) {
2855         case DISCOV_TYPE_LE:
2856                 hci_dev_lock(hdev);
2857                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2858                 hci_dev_unlock(hdev);
2859                 break;
2860 
2861         case DISCOV_TYPE_INTERLEAVED:
2862                 hci_dev_lock(hdev);
2863 
2864                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2865                              &hdev->quirks)) {
2866                         /* If we were running LE only scan, change discovery
2867                          * state. If we were running both LE and BR/EDR inquiry
2868                          * simultaneously, and BR/EDR inquiry is already
2869                          * finished, stop discovery, otherwise BR/EDR inquiry
2870                          * will stop discovery when finished. If we will resolve
2871                          * remote device name, do not change discovery state.
2872                          */
2873                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2874                             hdev->discovery.state != DISCOVERY_RESOLVING)
2875                                 hci_discovery_set_state(hdev,
2876                                                         DISCOVERY_STOPPED);
2877                 } else {
2878                         struct hci_request req;
2879 
2880                         hci_inquiry_cache_flush(hdev);
2881 
2882                         hci_req_init(&req, hdev);
2883 
2884                         memset(&cp, 0, sizeof(cp));
2885                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2886                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2887                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2888 
2889                         err = hci_req_run(&req, inquiry_complete);
2890                         if (err) {
2891                                 BT_ERR("Inquiry request failed: err %d", err);
2892                                 hci_discovery_set_state(hdev,
2893                                                         DISCOVERY_STOPPED);
2894                         }
2895                 }
2896 
2897                 hci_dev_unlock(hdev);
2898                 break;
2899         }
2900 }
2901 
2902 static void le_scan_disable_work(struct work_struct *work)
2903 {
2904         struct hci_dev *hdev = container_of(work, struct hci_dev,
2905                                             le_scan_disable.work);
2906         struct hci_request req;
2907         int err;
2908 
2909         BT_DBG("%s", hdev->name);
2910 
2911         cancel_delayed_work_sync(&hdev->le_scan_restart);
2912 
2913         hci_req_init(&req, hdev);
2914 
2915         hci_req_add_le_scan_disable(&req);
2916 
2917         err = hci_req_run(&req, le_scan_disable_work_complete);
2918         if (err)
2919                 BT_ERR("Disable LE scanning request failed: err %d", err);
2920 }
2921 
2922 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2923                                           u16 opcode)
2924 {
2925         unsigned long timeout, duration, scan_start, now;
2926 
2927         BT_DBG("%s", hdev->name);
2928 
2929         if (status) {
2930                 BT_ERR("Failed to restart LE scan: status %d", status);
2931                 return;
2932         }
2933 
2934         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2935             !hdev->discovery.scan_start)
2936                 return;
2937 
2938         /* When the scan was started, hdev->le_scan_disable has been queued
2939          * after duration from scan_start. During scan restart this job
2940          * has been canceled, and we need to queue it again after proper
2941          * timeout, to make sure that scan does not run indefinitely.
2942          */
2943         duration = hdev->discovery.scan_duration;
2944         scan_start = hdev->discovery.scan_start;
2945         now = jiffies;
2946         if (now - scan_start <= duration) {
2947                 int elapsed;
2948 
2949                 if (now >= scan_start)
2950                         elapsed = now - scan_start;
2951                 else
2952                         elapsed = ULONG_MAX - scan_start + now;
2953 
2954                 timeout = duration - elapsed;
2955         } else {
2956                 timeout = 0;
2957         }
2958         queue_delayed_work(hdev->workqueue,
2959                            &hdev->le_scan_disable, timeout);
2960 }
2961 
2962 static void le_scan_restart_work(struct work_struct *work)
2963 {
2964         struct hci_dev *hdev = container_of(work, struct hci_dev,
2965                                             le_scan_restart.work);
2966         struct hci_request req;
2967         struct hci_cp_le_set_scan_enable cp;
2968         int err;
2969 
2970         BT_DBG("%s", hdev->name);
2971 
2972         /* If controller is not scanning we are done. */
2973         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2974                 return;
2975 
2976         hci_req_init(&req, hdev);
2977 
2978         hci_req_add_le_scan_disable(&req);
2979 
2980         memset(&cp, 0, sizeof(cp));
2981         cp.enable = LE_SCAN_ENABLE;
2982         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2983         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2984 
2985         err = hci_req_run(&req, le_scan_restart_work_complete);
2986         if (err)
2987                 BT_ERR("Restart LE scan request failed: err %d", err);
2988 }
2989 
2990 /* Copy the Identity Address of the controller.
2991  *
2992  * If the controller has a public BD_ADDR, then by default use that one.
2993  * If this is a LE only controller without a public address, default to
2994  * the static random address.
2995  *
2996  * For debugging purposes it is possible to force controllers with a
2997  * public address to use the static random address instead.
2998  *
2999  * In case BR/EDR has been disabled on a dual-mode controller and
3000  * userspace has configured a static address, then that address
3001  * becomes the identity address instead of the public BR/EDR address.
3002  */
3003 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3004                                u8 *bdaddr_type)
3005 {
3006         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3007             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3008             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3009              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3010                 bacpy(bdaddr, &hdev->static_addr);
3011                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3012         } else {
3013                 bacpy(bdaddr, &hdev->bdaddr);
3014                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3015         }
3016 }
3017 
3018 /* Alloc HCI device */
3019 struct hci_dev *hci_alloc_dev(void)
3020 {
3021         struct hci_dev *hdev;
3022 
3023         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3024         if (!hdev)
3025                 return NULL;
3026 
3027         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3028         hdev->esco_type = (ESCO_HV1);
3029         hdev->link_mode = (HCI_LM_ACCEPT);
3030         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3031         hdev->io_capability = 0x03;     /* No Input No Output */
3032         hdev->manufacturer = 0xffff;    /* Default to internal use */
3033         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3034         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3035 
3036         hdev->sniff_max_interval = 800;
3037         hdev->sniff_min_interval = 80;
3038 
3039         hdev->le_adv_channel_map = 0x07;
3040         hdev->le_adv_min_interval = 0x0800;
3041         hdev->le_adv_max_interval = 0x0800;
3042         hdev->le_scan_interval = 0x0060;
3043         hdev->le_scan_window = 0x0030;
3044         hdev->le_conn_min_interval = 0x0028;
3045         hdev->le_conn_max_interval = 0x0038;
3046         hdev->le_conn_latency = 0x0000;
3047         hdev->le_supv_timeout = 0x002a;
3048         hdev->le_def_tx_len = 0x001b;
3049         hdev->le_def_tx_time = 0x0148;
3050         hdev->le_max_tx_len = 0x001b;
3051         hdev->le_max_tx_time = 0x0148;
3052         hdev->le_max_rx_len = 0x001b;
3053         hdev->le_max_rx_time = 0x0148;
3054 
3055         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3056         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3057         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3058         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3059 
3060         mutex_init(&hdev->lock);
3061         mutex_init(&hdev->req_lock);
3062 
3063         INIT_LIST_HEAD(&hdev->mgmt_pending);
3064         INIT_LIST_HEAD(&hdev->blacklist);
3065         INIT_LIST_HEAD(&hdev->whitelist);
3066         INIT_LIST_HEAD(&hdev->uuids);
3067         INIT_LIST_HEAD(&hdev->link_keys);
3068         INIT_LIST_HEAD(&hdev->long_term_keys);
3069         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3070         INIT_LIST_HEAD(&hdev->remote_oob_data);
3071         INIT_LIST_HEAD(&hdev->le_white_list);
3072         INIT_LIST_HEAD(&hdev->le_conn_params);
3073         INIT_LIST_HEAD(&hdev->pend_le_conns);
3074         INIT_LIST_HEAD(&hdev->pend_le_reports);
3075         INIT_LIST_HEAD(&hdev->conn_hash.list);
3076 
3077         INIT_WORK(&hdev->rx_work, hci_rx_work);
3078         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3079         INIT_WORK(&hdev->tx_work, hci_tx_work);
3080         INIT_WORK(&hdev->power_on, hci_power_on);
3081         INIT_WORK(&hdev->error_reset, hci_error_reset);
3082 
3083         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3084         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3085         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3086         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3087 
3088         skb_queue_head_init(&hdev->rx_q);
3089         skb_queue_head_init(&hdev->cmd_q);
3090         skb_queue_head_init(&hdev->raw_q);
3091 
3092         init_waitqueue_head(&hdev->req_wait_q);
3093 
3094         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3095 
3096         hci_init_sysfs(hdev);
3097         discovery_init(hdev);
3098         adv_info_init(hdev);
3099 
3100         return hdev;
3101 }
3102 EXPORT_SYMBOL(hci_alloc_dev);
3103 
3104 /* Free HCI device */
3105 void hci_free_dev(struct hci_dev *hdev)
3106 {
3107         /* will free via device release */
3108         put_device(&hdev->dev);
3109 }
3110 EXPORT_SYMBOL(hci_free_dev);
3111 
3112 /* Register HCI device */
3113 int hci_register_dev(struct hci_dev *hdev)
3114 {
3115         int id, error;
3116 
3117         if (!hdev->open || !hdev->close || !hdev->send)
3118                 return -EINVAL;
3119 
3120         /* Do not allow HCI_AMP devices to register at index 0,
3121          * so the index can be used as the AMP controller ID.
3122          */
3123         switch (hdev->dev_type) {
3124         case HCI_BREDR:
3125                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3126                 break;
3127         case HCI_AMP:
3128                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3129                 break;
3130         default:
3131                 return -EINVAL;
3132         }
3133 
3134         if (id < 0)
3135                 return id;
3136 
3137         sprintf(hdev->name, "hci%d", id);
3138         hdev->id = id;
3139 
3140         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3141 
3142         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3143                                           WQ_MEM_RECLAIM, 1, hdev->name);
3144         if (!hdev->workqueue) {
3145                 error = -ENOMEM;
3146                 goto err;
3147         }
3148 
3149         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3150                                               WQ_MEM_RECLAIM, 1, hdev->name);
3151         if (!hdev->req_workqueue) {
3152                 destroy_workqueue(hdev->workqueue);
3153                 error = -ENOMEM;
3154                 goto err;
3155         }
3156 
3157         if (!IS_ERR_OR_NULL(bt_debugfs))
3158                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3159 
3160         dev_set_name(&hdev->dev, "%s", hdev->name);
3161 
3162         error = device_add(&hdev->dev);
3163         if (error < 0)
3164                 goto err_wqueue;
3165 
3166         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3167                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3168                                     hdev);
3169         if (hdev->rfkill) {
3170                 if (rfkill_register(hdev->rfkill) < 0) {
3171                         rfkill_destroy(hdev->rfkill);
3172                         hdev->rfkill = NULL;
3173                 }
3174         }
3175 
3176         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3177                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3178 
3179         hci_dev_set_flag(hdev, HCI_SETUP);
3180         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3181 
3182         if (hdev->dev_type == HCI_BREDR) {
3183                 /* Assume BR/EDR support until proven otherwise (such as
3184                  * through reading supported features during init.
3185                  */
3186                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3187         }
3188 
3189         write_lock(&hci_dev_list_lock);
3190         list_add(&hdev->list, &hci_dev_list);
3191         write_unlock(&hci_dev_list_lock);
3192 
3193         /* Devices that are marked for raw-only usage are unconfigured
3194          * and should not be included in normal operation.
3195          */
3196         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3197                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3198 
3199         hci_notify(hdev, HCI_DEV_REG);
3200         hci_dev_hold(hdev);
3201 
3202         queue_work(hdev->req_workqueue, &hdev->power_on);
3203 
3204         return id;
3205 
3206 err_wqueue:
3207         destroy_workqueue(hdev->workqueue);
3208         destroy_workqueue(hdev->req_workqueue);
3209 err:
3210         ida_simple_remove(&hci_index_ida, hdev->id);
3211 
3212         return error;
3213 }
3214 EXPORT_SYMBOL(hci_register_dev);
3215 
3216 /* Unregister HCI device */
3217 void hci_unregister_dev(struct hci_dev *hdev)
3218 {
3219         int id;
3220 
3221         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3222 
3223         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3224 
3225         id = hdev->id;
3226 
3227         write_lock(&hci_dev_list_lock);
3228         list_del(&hdev->list);
3229         write_unlock(&hci_dev_list_lock);
3230 
3231         hci_dev_do_close(hdev);
3232 
3233         cancel_work_sync(&hdev->power_on);
3234 
3235         if (!test_bit(HCI_INIT, &hdev->flags) &&
3236             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3237             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3238                 hci_dev_lock(hdev);
3239                 mgmt_index_removed(hdev);
3240                 hci_dev_unlock(hdev);
3241         }
3242 
3243         /* mgmt_index_removed should take care of emptying the
3244          * pending list */
3245         BUG_ON(!list_empty(&hdev->mgmt_pending));
3246 
3247         hci_notify(hdev, HCI_DEV_UNREG);
3248 
3249         if (hdev->rfkill) {
3250                 rfkill_unregister(hdev->rfkill);
3251                 rfkill_destroy(hdev->rfkill);
3252         }
3253 
3254         device_del(&hdev->dev);
3255 
3256         debugfs_remove_recursive(hdev->debugfs);
3257 
3258         destroy_workqueue(hdev->workqueue);
3259         destroy_workqueue(hdev->req_workqueue);
3260 
3261         hci_dev_lock(hdev);
3262         hci_bdaddr_list_clear(&hdev->blacklist);
3263         hci_bdaddr_list_clear(&hdev->whitelist);
3264         hci_uuids_clear(hdev);
3265         hci_link_keys_clear(hdev);
3266         hci_smp_ltks_clear(hdev);
3267         hci_smp_irks_clear(hdev);
3268         hci_remote_oob_data_clear(hdev);
3269         hci_bdaddr_list_clear(&hdev->le_white_list);
3270         hci_conn_params_clear_all(hdev);
3271         hci_discovery_filter_clear(hdev);
3272         hci_dev_unlock(hdev);
3273 
3274         hci_dev_put(hdev);
3275 
3276         ida_simple_remove(&hci_index_ida, id);
3277 }
3278 EXPORT_SYMBOL(hci_unregister_dev);
3279 
3280 /* Suspend HCI device */
3281 int hci_suspend_dev(struct hci_dev *hdev)
3282 {
3283         hci_notify(hdev, HCI_DEV_SUSPEND);
3284         return 0;
3285 }
3286 EXPORT_SYMBOL(hci_suspend_dev);
3287 
3288 /* Resume HCI device */
3289 int hci_resume_dev(struct hci_dev *hdev)
3290 {
3291         hci_notify(hdev, HCI_DEV_RESUME);
3292         return 0;
3293 }
3294 EXPORT_SYMBOL(hci_resume_dev);
3295 
3296 /* Reset HCI device */
3297 int hci_reset_dev(struct hci_dev *hdev)
3298 {
3299         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3300         struct sk_buff *skb;
3301 
3302         skb = bt_skb_alloc(3, GFP_ATOMIC);
3303         if (!skb)
3304                 return -ENOMEM;
3305 
3306         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3307         memcpy(skb_put(skb, 3), hw_err, 3);
3308 
3309         /* Send Hardware Error to upper stack */
3310         return hci_recv_frame(hdev, skb);
3311 }
3312 EXPORT_SYMBOL(hci_reset_dev);
3313 
3314 /* Receive frame from HCI drivers */
3315 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3316 {
3317         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3318                       && !test_bit(HCI_INIT, &hdev->flags))) {
3319                 kfree_skb(skb);
3320                 return -ENXIO;
3321         }
3322 
3323         /* Incoming skb */
3324         bt_cb(skb)->incoming = 1;
3325 
3326         /* Time stamp */
3327         __net_timestamp(skb);
3328 
3329         skb_queue_tail(&hdev->rx_q, skb);
3330         queue_work(hdev->workqueue, &hdev->rx_work);
3331 
3332         return 0;
3333 }
3334 EXPORT_SYMBOL(hci_recv_frame);
3335 
3336 /* ---- Interface to upper protocols ---- */
3337 
3338 int hci_register_cb(struct hci_cb *cb)
3339 {
3340         BT_DBG("%p name %s", cb, cb->name);
3341 
3342         mutex_lock(&hci_cb_list_lock);
3343         list_add_tail(&cb->list, &hci_cb_list);
3344         mutex_unlock(&hci_cb_list_lock);
3345 
3346         return 0;
3347 }
3348 EXPORT_SYMBOL(hci_register_cb);
3349 
3350 int hci_unregister_cb(struct hci_cb *cb)
3351 {
3352         BT_DBG("%p name %s", cb, cb->name);
3353 
3354         mutex_lock(&hci_cb_list_lock);
3355         list_del(&cb->list);
3356         mutex_unlock(&hci_cb_list_lock);
3357 
3358         return 0;
3359 }
3360 EXPORT_SYMBOL(hci_unregister_cb);
3361 
3362 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3363 {
3364         int err;
3365 
3366         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3367 
3368         /* Time stamp */
3369         __net_timestamp(skb);
3370 
3371         /* Send copy to monitor */
3372         hci_send_to_monitor(hdev, skb);
3373 
3374         if (atomic_read(&hdev->promisc)) {
3375                 /* Send copy to the sockets */
3376                 hci_send_to_sock(hdev, skb);
3377         }
3378 
3379         /* Get rid of skb owner, prior to sending to the driver. */
3380         skb_orphan(skb);
3381 
3382         err = hdev->send(hdev, skb);
3383         if (err < 0) {
3384                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3385                 kfree_skb(skb);
3386         }
3387 }
3388 
3389 /* Send HCI command */
3390 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3391                  const void *param)
3392 {
3393         struct sk_buff *skb;
3394 
3395         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3396 
3397         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3398         if (!skb) {
3399                 BT_ERR("%s no memory for command", hdev->name);
3400                 return -ENOMEM;
3401         }
3402 
3403         /* Stand-alone HCI commands must be flagged as
3404          * single-command requests.
3405          */
3406         bt_cb(skb)->req.start = true;
3407 
3408         skb_queue_tail(&hdev->cmd_q, skb);
3409         queue_work(hdev->workqueue, &hdev->cmd_work);
3410 
3411         return 0;
3412 }
3413 
3414 /* Get data from the previously sent command */
3415 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3416 {
3417         struct hci_command_hdr *hdr;
3418 
3419         if (!hdev->sent_cmd)
3420                 return NULL;
3421 
3422         hdr = (void *) hdev->sent_cmd->data;
3423 
3424         if (hdr->opcode != cpu_to_le16(opcode))
3425                 return NULL;
3426 
3427         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3428 
3429         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3430 }
3431 
3432 /* Send ACL data */
3433 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3434 {
3435         struct hci_acl_hdr *hdr;
3436         int len = skb->len;
3437 
3438         skb_push(skb, HCI_ACL_HDR_SIZE);
3439         skb_reset_transport_header(skb);
3440         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3441         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3442         hdr->dlen   = cpu_to_le16(len);
3443 }
3444 
3445 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3446                           struct sk_buff *skb, __u16 flags)
3447 {
3448         struct hci_conn *conn = chan->conn;
3449         struct hci_dev *hdev = conn->hdev;
3450         struct sk_buff *list;
3451 
3452         skb->len = skb_headlen(skb);
3453         skb->data_len = 0;
3454 
3455         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3456 
3457         switch (hdev->dev_type) {
3458         case HCI_BREDR:
3459                 hci_add_acl_hdr(skb, conn->handle, flags);
3460                 break;
3461         case HCI_AMP:
3462                 hci_add_acl_hdr(skb, chan->handle, flags);
3463                 break;
3464         default:
3465                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3466                 return;
3467         }
3468 
3469         list = skb_shinfo(skb)->frag_list;
3470         if (!list) {
3471                 /* Non fragmented */
3472                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3473 
3474                 skb_queue_tail(queue, skb);
3475         } else {
3476                 /* Fragmented */
3477                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3478 
3479                 skb_shinfo(skb)->frag_list = NULL;
3480 
3481                 /* Queue all fragments atomically. We need to use spin_lock_bh
3482                  * here because of 6LoWPAN links, as there this function is
3483                  * called from softirq and using normal spin lock could cause
3484                  * deadlocks.
3485                  */
3486                 spin_lock_bh(&queue->lock);
3487 
3488                 __skb_queue_tail(queue, skb);
3489 
3490                 flags &= ~ACL_START;
3491                 flags |= ACL_CONT;
3492                 do {
3493                         skb = list; list = list->next;
3494 
3495                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3496                         hci_add_acl_hdr(skb, conn->handle, flags);
3497 
3498                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3499 
3500                         __skb_queue_tail(queue, skb);
3501                 } while (list);
3502 
3503                 spin_unlock_bh(&queue->lock);
3504         }
3505 }
3506 
3507 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3508 {
3509         struct hci_dev *hdev = chan->conn->hdev;
3510 
3511         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3512 
3513         hci_queue_acl(chan, &chan->data_q, skb, flags);
3514 
3515         queue_work(hdev->workqueue, &hdev->tx_work);
3516 }
3517 
3518 /* Send SCO data */
3519 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3520 {
3521         struct hci_dev *hdev = conn->hdev;
3522         struct hci_sco_hdr hdr;
3523 
3524         BT_DBG("%s len %d", hdev->name, skb->len);
3525 
3526         hdr.handle = cpu_to_le16(conn->handle);
3527         hdr.dlen   = skb->len;
3528 
3529         skb_push(skb, HCI_SCO_HDR_SIZE);
3530         skb_reset_transport_header(skb);
3531         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3532 
3533         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3534 
3535         skb_queue_tail(&conn->data_q, skb);
3536         queue_work(hdev->workqueue, &hdev->tx_work);
3537 }
3538 
3539 /* ---- HCI TX task (outgoing data) ---- */
3540 
3541 /* HCI Connection scheduler */
3542 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3543                                      int *quote)
3544 {
3545         struct hci_conn_hash *h = &hdev->conn_hash;
3546         struct hci_conn *conn = NULL, *c;
3547         unsigned int num = 0, min = ~0;
3548 
3549         /* We don't have to lock device here. Connections are always
3550          * added and removed with TX task disabled. */
3551 
3552         rcu_read_lock();
3553 
3554         list_for_each_entry_rcu(c, &h->list, list) {
3555                 if (c->type != type || skb_queue_empty(&c->data_q))
3556                         continue;
3557 
3558                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3559                         continue;
3560 
3561                 num++;
3562 
3563                 if (c->sent < min) {
3564                         min  = c->sent;
3565                         conn = c;
3566                 }
3567 
3568                 if (hci_conn_num(hdev, type) == num)
3569                         break;
3570         }
3571 
3572         rcu_read_unlock();
3573 
3574         if (conn) {
3575                 int cnt, q;
3576 
3577                 switch (conn->type) {
3578                 case ACL_LINK:
3579                         cnt = hdev->acl_cnt;
3580                         break;
3581                 case SCO_LINK:
3582                 case ESCO_LINK:
3583                         cnt = hdev->sco_cnt;
3584                         break;
3585                 case LE_LINK:
3586                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3587                         break;
3588                 default:
3589                         cnt = 0;
3590                         BT_ERR("Unknown link type");
3591                 }
3592 
3593                 q = cnt / num;
3594                 *quote = q ? q : 1;
3595         } else
3596                 *quote = 0;
3597 
3598         BT_DBG("conn %p quote %d", conn, *quote);
3599         return conn;
3600 }
3601 
3602 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3603 {
3604         struct hci_conn_hash *h = &hdev->conn_hash;
3605         struct hci_conn *c;
3606 
3607         BT_ERR("%s link tx timeout", hdev->name);
3608 
3609         rcu_read_lock();
3610 
3611         /* Kill stalled connections */
3612         list_for_each_entry_rcu(c, &h->list, list) {
3613                 if (c->type == type && c->sent) {
3614                         BT_ERR("%s killing stalled connection %pMR",
3615                                hdev->name, &c->dst);
3616                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3617                 }
3618         }
3619 
3620         rcu_read_unlock();
3621 }
3622 
3623 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3624                                       int *quote)
3625 {
3626         struct hci_conn_hash *h = &hdev->conn_hash;
3627         struct hci_chan *chan = NULL;
3628         unsigned int num = 0, min = ~0, cur_prio = 0;
3629         struct hci_conn *conn;
3630         int cnt, q, conn_num = 0;
3631 
3632         BT_DBG("%s", hdev->name);
3633 
3634         rcu_read_lock();
3635 
3636         list_for_each_entry_rcu(conn, &h->list, list) {
3637                 struct hci_chan *tmp;
3638 
3639                 if (conn->type != type)
3640                         continue;
3641 
3642                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3643                         continue;
3644 
3645                 conn_num++;
3646 
3647                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3648                         struct sk_buff *skb;
3649 
3650                         if (skb_queue_empty(&tmp->data_q))
3651                                 continue;
3652 
3653                         skb = skb_peek(&tmp->data_q);
3654                         if (skb->priority < cur_prio)
3655                                 continue;
3656 
3657                         if (skb->priority > cur_prio) {
3658                                 num = 0;
3659                                 min = ~0;
3660                                 cur_prio = skb->priority;
3661                         }
3662 
3663                         num++;
3664 
3665                         if (conn->sent < min) {
3666                                 min  = conn->sent;
3667                                 chan = tmp;
3668                         }
3669                 }
3670 
3671                 if (hci_conn_num(hdev, type) == conn_num)
3672                         break;
3673         }
3674 
3675         rcu_read_unlock();
3676 
3677         if (!chan)
3678                 return NULL;
3679 
3680         switch (chan->conn->type) {
3681         case ACL_LINK:
3682                 cnt = hdev->acl_cnt;
3683                 break;
3684         case AMP_LINK:
3685                 cnt = hdev->block_cnt;
3686                 break;
3687         case SCO_LINK:
3688         case ESCO_LINK:
3689                 cnt = hdev->sco_cnt;
3690                 break;
3691         case LE_LINK:
3692                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3693                 break;
3694         default:
3695                 cnt = 0;
3696                 BT_ERR("Unknown link type");
3697         }
3698 
3699         q = cnt / num;
3700         *quote = q ? q : 1;
3701         BT_DBG("chan %p quote %d", chan, *quote);
3702         return chan;
3703 }
3704 
3705 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3706 {
3707         struct hci_conn_hash *h = &hdev->conn_hash;
3708         struct hci_conn *conn;
3709         int num = 0;
3710 
3711         BT_DBG("%s", hdev->name);
3712 
3713         rcu_read_lock();
3714 
3715         list_for_each_entry_rcu(conn, &h->list, list) {
3716                 struct hci_chan *chan;
3717 
3718                 if (conn->type != type)
3719                         continue;
3720 
3721                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3722                         continue;
3723 
3724                 num++;
3725 
3726                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3727                         struct sk_buff *skb;
3728 
3729                         if (chan->sent) {
3730                                 chan->sent = 0;
3731                                 continue;
3732                         }
3733 
3734                         if (skb_queue_empty(&chan->data_q))
3735                                 continue;
3736 
3737                         skb = skb_peek(&chan->data_q);
3738                         if (skb->priority >= HCI_PRIO_MAX - 1)
3739                                 continue;
3740 
3741                         skb->priority = HCI_PRIO_MAX - 1;
3742 
3743                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3744                                skb->priority);
3745                 }
3746 
3747                 if (hci_conn_num(hdev, type) == num)
3748                         break;
3749         }
3750 
3751         rcu_read_unlock();
3752 
3753 }
3754 
3755 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3756 {
3757         /* Calculate count of blocks used by this packet */
3758         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3759 }
3760 
3761 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3762 {
3763         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3764                 /* ACL tx timeout must be longer than maximum
3765                  * link supervision timeout (40.9 seconds) */
3766                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3767                                        HCI_ACL_TX_TIMEOUT))
3768                         hci_link_tx_to(hdev, ACL_LINK);
3769         }
3770 }
3771 
3772 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3773 {
3774         unsigned int cnt = hdev->acl_cnt;
3775         struct hci_chan *chan;
3776         struct sk_buff *skb;
3777         int quote;
3778 
3779         __check_timeout(hdev, cnt);
3780 
3781         while (hdev->acl_cnt &&
3782                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3783                 u32 priority = (skb_peek(&chan->data_q))->priority;
3784                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3785                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3786                                skb->len, skb->priority);
3787 
3788                         /* Stop if priority has changed */
3789                         if (skb->priority < priority)
3790                                 break;
3791 
3792                         skb = skb_dequeue(&chan->data_q);
3793 
3794                         hci_conn_enter_active_mode(chan->conn,
3795                                                    bt_cb(skb)->force_active);
3796 
3797                         hci_send_frame(hdev, skb);
3798                         hdev->acl_last_tx = jiffies;
3799 
3800                         hdev->acl_cnt--;
3801                         chan->sent++;
3802                         chan->conn->sent++;
3803                 }
3804         }
3805 
3806         if (cnt != hdev->acl_cnt)
3807                 hci_prio_recalculate(hdev, ACL_LINK);
3808 }
3809 
3810 static void hci_sched_acl_blk(struct hci_dev *hdev)
3811 {
3812         unsigned int cnt = hdev->block_cnt;
3813         struct hci_chan *chan;
3814         struct sk_buff *skb;
3815         int quote;
3816         u8 type;
3817 
3818         __check_timeout(hdev, cnt);
3819 
3820         BT_DBG("%s", hdev->name);
3821 
3822         if (hdev->dev_type == HCI_AMP)
3823                 type = AMP_LINK;
3824         else
3825                 type = ACL_LINK;
3826 
3827         while (hdev->block_cnt > 0 &&
3828                (chan = hci_chan_sent(hdev, type, &quote))) {
3829                 u32 priority = (skb_peek(&chan->data_q))->priority;
3830                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3831                         int blocks;
3832 
3833                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3834                                skb->len, skb->priority);
3835 
3836                         /* Stop if priority has changed */
3837                         if (skb->priority < priority)
3838                                 break;
3839 
3840                         skb = skb_dequeue(&chan->data_q);
3841 
3842                         blocks = __get_blocks(hdev, skb);
3843                         if (blocks > hdev->block_cnt)
3844                                 return;
3845 
3846                         hci_conn_enter_active_mode(chan->conn,
3847                                                    bt_cb(skb)->force_active);
3848 
3849                         hci_send_frame(hdev, skb);
3850                         hdev->acl_last_tx = jiffies;
3851 
3852                         hdev->block_cnt -= blocks;
3853                         quote -= blocks;
3854 
3855                         chan->sent += blocks;
3856                         chan->conn->sent += blocks;
3857                 }
3858         }
3859 
3860         if (cnt != hdev->block_cnt)
3861                 hci_prio_recalculate(hdev, type);
3862 }
3863 
3864 static void hci_sched_acl(struct hci_dev *hdev)
3865 {
3866         BT_DBG("%s", hdev->name);
3867 
3868         /* No ACL link over BR/EDR controller */
3869         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3870                 return;
3871 
3872         /* No AMP link over AMP controller */
3873         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3874                 return;
3875 
3876         switch (hdev->flow_ctl_mode) {
3877         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3878                 hci_sched_acl_pkt(hdev);
3879                 break;
3880 
3881         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3882                 hci_sched_acl_blk(hdev);
3883                 break;
3884         }
3885 }
3886 
3887 /* Schedule SCO */
3888 static void hci_sched_sco(struct hci_dev *hdev)
3889 {
3890         struct hci_conn *conn;
3891         struct sk_buff *skb;
3892         int quote;
3893 
3894         BT_DBG("%s", hdev->name);
3895 
3896         if (!hci_conn_num(hdev, SCO_LINK))
3897                 return;
3898 
3899         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3900                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3901                         BT_DBG("skb %p len %d", skb, skb->len);
3902                         hci_send_frame(hdev, skb);
3903 
3904                         conn->sent++;
3905                         if (conn->sent == ~0)
3906                                 conn->sent = 0;
3907                 }
3908         }
3909 }
3910 
3911 static void hci_sched_esco(struct hci_dev *hdev)
3912 {
3913         struct hci_conn *conn;
3914         struct sk_buff *skb;
3915         int quote;
3916 
3917         BT_DBG("%s", hdev->name);
3918 
3919         if (!hci_conn_num(hdev, ESCO_LINK))
3920                 return;
3921 
3922         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3923                                                      &quote))) {
3924                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3925                         BT_DBG("skb %p len %d", skb, skb->len);
3926                         hci_send_frame(hdev, skb);
3927 
3928                         conn->sent++;
3929                         if (conn->sent == ~0)
3930                                 conn->sent = 0;
3931                 }
3932         }
3933 }
3934 
3935 static void hci_sched_le(struct hci_dev *hdev)
3936 {
3937         struct hci_chan *chan;
3938         struct sk_buff *skb;
3939         int quote, cnt, tmp;
3940 
3941         BT_DBG("%s", hdev->name);
3942 
3943         if (!hci_conn_num(hdev, LE_LINK))
3944                 return;
3945 
3946         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3947                 /* LE tx timeout must be longer than maximum
3948                  * link supervision timeout (40.9 seconds) */
3949                 if (!hdev->le_cnt && hdev->le_pkts &&
3950                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3951                         hci_link_tx_to(hdev, LE_LINK);
3952         }
3953 
3954         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3955         tmp = cnt;
3956         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3957                 u32 priority = (skb_peek(&chan->data_q))->priority;
3958                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3959                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3960                                skb->len, skb->priority);
3961 
3962                         /* Stop if priority has changed */
3963                         if (skb->priority < priority)
3964                                 break;
3965 
3966                         skb = skb_dequeue(&chan->data_q);
3967 
3968                         hci_send_frame(hdev, skb);
3969                         hdev->le_last_tx = jiffies;
3970 
3971                         cnt--;
3972                         chan->sent++;
3973                         chan->conn->sent++;
3974                 }
3975         }
3976 
3977         if (hdev->le_pkts)
3978                 hdev->le_cnt = cnt;
3979         else
3980                 hdev->acl_cnt = cnt;
3981 
3982         if (cnt != tmp)
3983                 hci_prio_recalculate(hdev, LE_LINK);
3984 }
3985 
3986 static void hci_tx_work(struct work_struct *work)
3987 {
3988         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3989         struct sk_buff *skb;
3990 
3991         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3992                hdev->sco_cnt, hdev->le_cnt);
3993 
3994         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3995                 /* Schedule queues and send stuff to HCI driver */
3996                 hci_sched_acl(hdev);
3997                 hci_sched_sco(hdev);
3998                 hci_sched_esco(hdev);
3999                 hci_sched_le(hdev);
4000         }
4001 
4002         /* Send next queued raw (unknown type) packet */
4003         while ((skb = skb_dequeue(&hdev->raw_q)))
4004                 hci_send_frame(hdev, skb);
4005 }
4006 
4007 /* ----- HCI RX task (incoming data processing) ----- */
4008 
4009 /* ACL data packet */
4010 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4011 {
4012         struct hci_acl_hdr *hdr = (void *) skb->data;
4013         struct hci_conn *conn;
4014         __u16 handle, flags;
4015 
4016         skb_pull(skb, HCI_ACL_HDR_SIZE);
4017 
4018         handle = __le16_to_cpu(hdr->handle);
4019         flags  = hci_flags(handle);
4020         handle = hci_handle(handle);
4021 
4022         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4023                handle, flags);
4024 
4025         hdev->stat.acl_rx++;
4026 
4027         hci_dev_lock(hdev);
4028         conn = hci_conn_hash_lookup_handle(hdev, handle);
4029         hci_dev_unlock(hdev);
4030 
4031         if (conn) {
4032                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4033 
4034                 /* Send to upper protocol */
4035                 l2cap_recv_acldata(conn, skb, flags);
4036                 return;
4037         } else {
4038                 BT_ERR("%s ACL packet for unknown connection handle %d",
4039                        hdev->name, handle);
4040         }
4041 
4042         kfree_skb(skb);
4043 }
4044 
4045 /* SCO data packet */
4046 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4047 {
4048         struct hci_sco_hdr *hdr = (void *) skb->data;
4049         struct hci_conn *conn;
4050         __u16 handle;
4051 
4052         skb_pull(skb, HCI_SCO_HDR_SIZE);
4053 
4054         handle = __le16_to_cpu(hdr->handle);
4055 
4056         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4057 
4058         hdev->stat.sco_rx++;
4059 
4060         hci_dev_lock(hdev);
4061         conn = hci_conn_hash_lookup_handle(hdev, handle);
4062         hci_dev_unlock(hdev);
4063 
4064         if (conn) {
4065                 /* Send to upper protocol */
4066                 sco_recv_scodata(conn, skb);
4067                 return;
4068         } else {
4069                 BT_ERR("%s SCO packet for unknown connection handle %d",
4070                        hdev->name, handle);
4071         }
4072 
4073         kfree_skb(skb);
4074 }
4075 
4076 static bool hci_req_is_complete(struct hci_dev *hdev)
4077 {
4078         struct sk_buff *skb;
4079 
4080         skb = skb_peek(&hdev->cmd_q);
4081         if (!skb)
4082                 return true;
4083 
4084         return bt_cb(skb)->req.start;
4085 }
4086 
4087 static void hci_resend_last(struct hci_dev *hdev)
4088 {
4089         struct hci_command_hdr *sent;
4090         struct sk_buff *skb;
4091         u16 opcode;
4092 
4093         if (!hdev->sent_cmd)
4094                 return;
4095 
4096         sent = (void *) hdev->sent_cmd->data;
4097         opcode = __le16_to_cpu(sent->opcode);
4098         if (opcode == HCI_OP_RESET)
4099                 return;
4100 
4101         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4102         if (!skb)
4103                 return;
4104 
4105         skb_queue_head(&hdev->cmd_q, skb);
4106         queue_work(hdev->workqueue, &hdev->cmd_work);
4107 }
4108 
4109 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4110                           hci_req_complete_t *req_complete,
4111                           hci_req_complete_skb_t *req_complete_skb)
4112 {
4113         struct sk_buff *skb;
4114         unsigned long flags;
4115 
4116         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4117 
4118         /* If the completed command doesn't match the last one that was
4119          * sent we need to do special handling of it.
4120          */
4121         if (!hci_sent_cmd_data(hdev, opcode)) {
4122                 /* Some CSR based controllers generate a spontaneous
4123                  * reset complete event during init and any pending
4124                  * command will never be completed. In such a case we
4125                  * need to resend whatever was the last sent
4126                  * command.
4127                  */
4128                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4129                         hci_resend_last(hdev);
4130 
4131                 return;
4132         }
4133 
4134         /* If the command succeeded and there's still more commands in
4135          * this request the request is not yet complete.
4136          */
4137         if (!status && !hci_req_is_complete(hdev))
4138                 return;
4139 
4140         /* If this was the last command in a request the complete
4141          * callback would be found in hdev->sent_cmd instead of the
4142          * command queue (hdev->cmd_q).
4143          */
4144         if (bt_cb(hdev->sent_cmd)->req.complete) {
4145                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4146                 return;
4147         }
4148 
4149         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4150                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4151                 return;
4152         }
4153 
4154         /* Remove all pending commands belonging to this request */
4155         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4156         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4157                 if (bt_cb(skb)->req.start) {
4158                         __skb_queue_head(&hdev->cmd_q, skb);
4159                         break;
4160                 }
4161 
4162                 *req_complete = bt_cb(skb)->req.complete;
4163                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4164                 kfree_skb(skb);
4165         }
4166         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4167 }
4168 
4169 static void hci_rx_work(struct work_struct *work)
4170 {
4171         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4172         struct sk_buff *skb;
4173 
4174         BT_DBG("%s", hdev->name);
4175 
4176         while ((skb = skb_dequeue(&hdev->rx_q))) {
4177                 /* Send copy to monitor */
4178                 hci_send_to_monitor(hdev, skb);
4179 
4180                 if (atomic_read(&hdev->promisc)) {
4181                         /* Send copy to the sockets */
4182                         hci_send_to_sock(hdev, skb);
4183                 }
4184 
4185                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4186                         kfree_skb(skb);
4187                         continue;
4188                 }
4189 
4190                 if (test_bit(HCI_INIT, &hdev->flags)) {
4191                         /* Don't process data packets in this states. */
4192                         switch (bt_cb(skb)->pkt_type) {
4193                         case HCI_ACLDATA_PKT:
4194                         case HCI_SCODATA_PKT:
4195                                 kfree_skb(skb);
4196                                 continue;
4197                         }
4198                 }
4199 
4200                 /* Process frame */
4201                 switch (bt_cb(skb)->pkt_type) {
4202                 case HCI_EVENT_PKT:
4203                         BT_DBG("%s Event packet", hdev->name);
4204                         hci_event_packet(hdev, skb);
4205                         break;
4206 
4207                 case HCI_ACLDATA_PKT:
4208                         BT_DBG("%s ACL data packet", hdev->name);
4209                         hci_acldata_packet(hdev, skb);
4210                         break;
4211 
4212                 case HCI_SCODATA_PKT:
4213                         BT_DBG("%s SCO data packet", hdev->name);
4214                         hci_scodata_packet(hdev, skb);
4215                         break;
4216 
4217                 default:
4218                         kfree_skb(skb);
4219                         break;
4220                 }
4221         }
4222 }
4223 
4224 static void hci_cmd_work(struct work_struct *work)
4225 {
4226         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4227         struct sk_buff *skb;
4228 
4229         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4230                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4231 
4232         /* Send queued commands */
4233         if (atomic_read(&hdev->cmd_cnt)) {
4234                 skb = skb_dequeue(&hdev->cmd_q);
4235                 if (!skb)
4236                         return;
4237 
4238                 kfree_skb(hdev->sent_cmd);
4239 
4240                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4241                 if (hdev->sent_cmd) {
4242                         atomic_dec(&hdev->cmd_cnt);
4243                         hci_send_frame(hdev, skb);
4244                         if (test_bit(HCI_RESET, &hdev->flags))
4245                                 cancel_delayed_work(&hdev->cmd_timer);
4246                         else
4247                                 schedule_delayed_work(&hdev->cmd_timer,
4248                                                       HCI_CMD_TIMEOUT);
4249                 } else {
4250                         skb_queue_head(&hdev->cmd_q, skb);
4251                         queue_work(hdev->workqueue, &hdev->cmd_work);
4252                 }
4253         }
4254 }
4255 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp