~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/idr.h>
 30 
 31 #include <linux/rfkill.h>
 32 
 33 #include <net/bluetooth/bluetooth.h>
 34 #include <net/bluetooth/hci_core.h>
 35 
 36 static void hci_rx_work(struct work_struct *work);
 37 static void hci_cmd_work(struct work_struct *work);
 38 static void hci_tx_work(struct work_struct *work);
 39 
 40 /* HCI device list */
 41 LIST_HEAD(hci_dev_list);
 42 DEFINE_RWLOCK(hci_dev_list_lock);
 43 
 44 /* HCI callback list */
 45 LIST_HEAD(hci_cb_list);
 46 DEFINE_RWLOCK(hci_cb_list_lock);
 47 
 48 /* HCI ID Numbering */
 49 static DEFINE_IDA(hci_index_ida);
 50 
 51 /* ---- HCI notifications ---- */
 52 
 53 static void hci_notify(struct hci_dev *hdev, int event)
 54 {
 55         hci_sock_dev_event(hdev, event);
 56 }
 57 
 58 /* ---- HCI requests ---- */
 59 
 60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
 61 {
 62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
 63 
 64         if (hdev->req_status == HCI_REQ_PEND) {
 65                 hdev->req_result = result;
 66                 hdev->req_status = HCI_REQ_DONE;
 67                 wake_up_interruptible(&hdev->req_wait_q);
 68         }
 69 }
 70 
 71 static void hci_req_cancel(struct hci_dev *hdev, int err)
 72 {
 73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
 74 
 75         if (hdev->req_status == HCI_REQ_PEND) {
 76                 hdev->req_result = err;
 77                 hdev->req_status = HCI_REQ_CANCELED;
 78                 wake_up_interruptible(&hdev->req_wait_q);
 79         }
 80 }
 81 
 82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
 83                                             u8 event)
 84 {
 85         struct hci_ev_cmd_complete *ev;
 86         struct hci_event_hdr *hdr;
 87         struct sk_buff *skb;
 88 
 89         hci_dev_lock(hdev);
 90 
 91         skb = hdev->recv_evt;
 92         hdev->recv_evt = NULL;
 93 
 94         hci_dev_unlock(hdev);
 95 
 96         if (!skb)
 97                 return ERR_PTR(-ENODATA);
 98 
 99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103 
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106 
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112 
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117 
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122 
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125 
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128 
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131 
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136 
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143 
144         BT_DBG("%s", hdev->name);
145 
146         hci_req_init(&req, hdev);
147 
148         hci_req_add_ev(&req, opcode, plen, param, event);
149 
150         hdev->req_status = HCI_REQ_PEND;
151 
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155 
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158 
159         schedule_timeout(timeout);
160 
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162 
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165 
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170 
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174 
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179 
180         hdev->req_status = hdev->req_result = 0;
181 
182         BT_DBG("%s end: err %d", hdev->name, err);
183 
184         if (err < 0)
185                 return ERR_PTR(err);
186 
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190 
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197 
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207 
208         BT_DBG("%s start", hdev->name);
209 
210         hci_req_init(&req, hdev);
211 
212         hdev->req_status = HCI_REQ_PEND;
213 
214         func(&req, opt);
215 
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219 
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227 
228                 return err;
229         }
230 
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233 
234         schedule_timeout(timeout);
235 
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237 
238         if (signal_pending(current))
239                 return -EINTR;
240 
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245 
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249 
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254 
255         hdev->req_status = hdev->req_result = 0;
256 
257         BT_DBG("%s end: err %d", hdev->name, err);
258 
259         return err;
260 }
261 
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268 
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271 
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276 
277         return ret;
278 }
279 
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283 
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288 
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292 
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295 
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298 
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302 
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306 
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309 
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312 
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316 
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320 
321         BT_DBG("%s %ld", hdev->name, opt);
322 
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326 
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331 
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335 
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341 
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346 
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349 
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352 
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355 
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358 
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362 
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366 
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373 
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377 
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380 
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383 
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386 
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389 
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392 
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397 
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402 
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405 
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409 
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418 
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422 
423         return 0x00;
424 }
425 
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429 
430         mode = hci_get_inquiry_mode(req->hdev);
431 
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434 
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438 
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444 
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450 
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         }
458 
459         if (lmp_inq_rssi_capable(hdev))
460                 events[4] |= 0x02; /* Inquiry Result with RSSI */
461 
462         if (lmp_sniffsubr_capable(hdev))
463                 events[5] |= 0x20; /* Sniff Subrating */
464 
465         if (lmp_pause_enc_capable(hdev))
466                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467 
468         if (lmp_ext_inq_capable(hdev))
469                 events[5] |= 0x40; /* Extended Inquiry Result */
470 
471         if (lmp_no_flush_capable(hdev))
472                 events[7] |= 0x01; /* Enhanced Flush Complete */
473 
474         if (lmp_lsto_capable(hdev))
475                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476 
477         if (lmp_ssp_capable(hdev)) {
478                 events[6] |= 0x01;      /* IO Capability Request */
479                 events[6] |= 0x02;      /* IO Capability Response */
480                 events[6] |= 0x04;      /* User Confirmation Request */
481                 events[6] |= 0x08;      /* User Passkey Request */
482                 events[6] |= 0x10;      /* Remote OOB Data Request */
483                 events[6] |= 0x20;      /* Simple Pairing Complete */
484                 events[7] |= 0x04;      /* User Passkey Notification */
485                 events[7] |= 0x08;      /* Keypress Notification */
486                 events[7] |= 0x10;      /* Remote Host Supported
487                                          * Features Notification
488                                          */
489         }
490 
491         if (lmp_le_capable(hdev))
492                 events[7] |= 0x20;      /* LE Meta-Event */
493 
494         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495 
496         if (lmp_le_capable(hdev)) {
497                 memset(events, 0, sizeof(events));
498                 events[0] = 0x1f;
499                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500                             sizeof(events), events);
501         }
502 }
503 
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
505 {
506         struct hci_dev *hdev = req->hdev;
507 
508         if (lmp_bredr_capable(hdev))
509                 bredr_setup(req);
510 
511         if (lmp_le_capable(hdev))
512                 le_setup(req);
513 
514         hci_setup_event_mask(req);
515 
516         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
518 
519         if (lmp_ssp_capable(hdev)) {
520                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521                         u8 mode = 0x01;
522                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523                                     sizeof(mode), &mode);
524                 } else {
525                         struct hci_cp_write_eir cp;
526 
527                         memset(hdev->eir, 0, sizeof(hdev->eir));
528                         memset(&cp, 0, sizeof(cp));
529 
530                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
531                 }
532         }
533 
534         if (lmp_inq_rssi_capable(hdev))
535                 hci_setup_inquiry_mode(req);
536 
537         if (lmp_inq_tx_pwr_capable(hdev))
538                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
539 
540         if (lmp_ext_feat_capable(hdev)) {
541                 struct hci_cp_read_local_ext_features cp;
542 
543                 cp.page = 0x01;
544                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545                             sizeof(cp), &cp);
546         }
547 
548         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549                 u8 enable = 1;
550                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551                             &enable);
552         }
553 }
554 
555 static void hci_setup_link_policy(struct hci_request *req)
556 {
557         struct hci_dev *hdev = req->hdev;
558         struct hci_cp_write_def_link_policy cp;
559         u16 link_policy = 0;
560 
561         if (lmp_rswitch_capable(hdev))
562                 link_policy |= HCI_LP_RSWITCH;
563         if (lmp_hold_capable(hdev))
564                 link_policy |= HCI_LP_HOLD;
565         if (lmp_sniff_capable(hdev))
566                 link_policy |= HCI_LP_SNIFF;
567         if (lmp_park_capable(hdev))
568                 link_policy |= HCI_LP_PARK;
569 
570         cp.policy = cpu_to_le16(link_policy);
571         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
572 }
573 
574 static void hci_set_le_support(struct hci_request *req)
575 {
576         struct hci_dev *hdev = req->hdev;
577         struct hci_cp_write_le_host_supported cp;
578 
579         /* LE-only devices do not support explicit enablement */
580         if (!lmp_bredr_capable(hdev))
581                 return;
582 
583         memset(&cp, 0, sizeof(cp));
584 
585         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586                 cp.le = 0x01;
587                 cp.simul = lmp_le_br_capable(hdev);
588         }
589 
590         if (cp.le != lmp_host_le_capable(hdev))
591                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592                             &cp);
593 }
594 
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
596 {
597         struct hci_dev *hdev = req->hdev;
598         u8 p;
599 
600         /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601         if (hdev->commands[6] & 0x80) {
602                 struct hci_cp_delete_stored_link_key cp;
603 
604                 bacpy(&cp.bdaddr, BDADDR_ANY);
605                 cp.delete_all = 0x01;
606                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607                             sizeof(cp), &cp);
608         }
609 
610         if (hdev->commands[5] & 0x10)
611                 hci_setup_link_policy(req);
612 
613         if (lmp_le_capable(hdev)) {
614                 hci_set_le_support(req);
615                 hci_update_ad(req);
616         }
617 
618         /* Read features beyond page 1 if available */
619         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620                 struct hci_cp_read_local_ext_features cp;
621 
622                 cp.page = p;
623                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624                             sizeof(cp), &cp);
625         }
626 }
627 
628 static int __hci_init(struct hci_dev *hdev)
629 {
630         int err;
631 
632         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633         if (err < 0)
634                 return err;
635 
636         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637          * BR/EDR/LE type controllers. AMP controllers only need the
638          * first stage init.
639          */
640         if (hdev->dev_type != HCI_BREDR)
641                 return 0;
642 
643         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644         if (err < 0)
645                 return err;
646 
647         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
648 }
649 
650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
651 {
652         __u8 scan = opt;
653 
654         BT_DBG("%s %x", req->hdev->name, scan);
655 
656         /* Inquiry and Page scans */
657         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
658 }
659 
660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
661 {
662         __u8 auth = opt;
663 
664         BT_DBG("%s %x", req->hdev->name, auth);
665 
666         /* Authentication */
667         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
668 }
669 
670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
671 {
672         __u8 encrypt = opt;
673 
674         BT_DBG("%s %x", req->hdev->name, encrypt);
675 
676         /* Encryption */
677         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
678 }
679 
680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
681 {
682         __le16 policy = cpu_to_le16(opt);
683 
684         BT_DBG("%s %x", req->hdev->name, policy);
685 
686         /* Default link policy */
687         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
688 }
689 
690 /* Get HCI device by index.
691  * Device is held on return. */
692 struct hci_dev *hci_dev_get(int index)
693 {
694         struct hci_dev *hdev = NULL, *d;
695 
696         BT_DBG("%d", index);
697 
698         if (index < 0)
699                 return NULL;
700 
701         read_lock(&hci_dev_list_lock);
702         list_for_each_entry(d, &hci_dev_list, list) {
703                 if (d->id == index) {
704                         hdev = hci_dev_hold(d);
705                         break;
706                 }
707         }
708         read_unlock(&hci_dev_list_lock);
709         return hdev;
710 }
711 
712 /* ---- Inquiry support ---- */
713 
714 bool hci_discovery_active(struct hci_dev *hdev)
715 {
716         struct discovery_state *discov = &hdev->discovery;
717 
718         switch (discov->state) {
719         case DISCOVERY_FINDING:
720         case DISCOVERY_RESOLVING:
721                 return true;
722 
723         default:
724                 return false;
725         }
726 }
727 
728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
729 {
730         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
731 
732         if (hdev->discovery.state == state)
733                 return;
734 
735         switch (state) {
736         case DISCOVERY_STOPPED:
737                 if (hdev->discovery.state != DISCOVERY_STARTING)
738                         mgmt_discovering(hdev, 0);
739                 break;
740         case DISCOVERY_STARTING:
741                 break;
742         case DISCOVERY_FINDING:
743                 mgmt_discovering(hdev, 1);
744                 break;
745         case DISCOVERY_RESOLVING:
746                 break;
747         case DISCOVERY_STOPPING:
748                 break;
749         }
750 
751         hdev->discovery.state = state;
752 }
753 
754 static void inquiry_cache_flush(struct hci_dev *hdev)
755 {
756         struct discovery_state *cache = &hdev->discovery;
757         struct inquiry_entry *p, *n;
758 
759         list_for_each_entry_safe(p, n, &cache->all, all) {
760                 list_del(&p->all);
761                 kfree(p);
762         }
763 
764         INIT_LIST_HEAD(&cache->unknown);
765         INIT_LIST_HEAD(&cache->resolve);
766 }
767 
768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769                                                bdaddr_t *bdaddr)
770 {
771         struct discovery_state *cache = &hdev->discovery;
772         struct inquiry_entry *e;
773 
774         BT_DBG("cache %p, %pMR", cache, bdaddr);
775 
776         list_for_each_entry(e, &cache->all, all) {
777                 if (!bacmp(&e->data.bdaddr, bdaddr))
778                         return e;
779         }
780 
781         return NULL;
782 }
783 
784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
785                                                        bdaddr_t *bdaddr)
786 {
787         struct discovery_state *cache = &hdev->discovery;
788         struct inquiry_entry *e;
789 
790         BT_DBG("cache %p, %pMR", cache, bdaddr);
791 
792         list_for_each_entry(e, &cache->unknown, list) {
793                 if (!bacmp(&e->data.bdaddr, bdaddr))
794                         return e;
795         }
796 
797         return NULL;
798 }
799 
800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
801                                                        bdaddr_t *bdaddr,
802                                                        int state)
803 {
804         struct discovery_state *cache = &hdev->discovery;
805         struct inquiry_entry *e;
806 
807         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
808 
809         list_for_each_entry(e, &cache->resolve, list) {
810                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811                         return e;
812                 if (!bacmp(&e->data.bdaddr, bdaddr))
813                         return e;
814         }
815 
816         return NULL;
817 }
818 
819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820                                       struct inquiry_entry *ie)
821 {
822         struct discovery_state *cache = &hdev->discovery;
823         struct list_head *pos = &cache->resolve;
824         struct inquiry_entry *p;
825 
826         list_del(&ie->list);
827 
828         list_for_each_entry(p, &cache->resolve, list) {
829                 if (p->name_state != NAME_PENDING &&
830                     abs(p->data.rssi) >= abs(ie->data.rssi))
831                         break;
832                 pos = &p->list;
833         }
834 
835         list_add(&ie->list, pos);
836 }
837 
838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839                               bool name_known, bool *ssp)
840 {
841         struct discovery_state *cache = &hdev->discovery;
842         struct inquiry_entry *ie;
843 
844         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
845 
846         hci_remove_remote_oob_data(hdev, &data->bdaddr);
847 
848         if (ssp)
849                 *ssp = data->ssp_mode;
850 
851         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
852         if (ie) {
853                 if (ie->data.ssp_mode && ssp)
854                         *ssp = true;
855 
856                 if (ie->name_state == NAME_NEEDED &&
857                     data->rssi != ie->data.rssi) {
858                         ie->data.rssi = data->rssi;
859                         hci_inquiry_cache_update_resolve(hdev, ie);
860                 }
861 
862                 goto update;
863         }
864 
865         /* Entry not in the cache. Add new one. */
866         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867         if (!ie)
868                 return false;
869 
870         list_add(&ie->all, &cache->all);
871 
872         if (name_known) {
873                 ie->name_state = NAME_KNOWN;
874         } else {
875                 ie->name_state = NAME_NOT_KNOWN;
876                 list_add(&ie->list, &cache->unknown);
877         }
878 
879 update:
880         if (name_known && ie->name_state != NAME_KNOWN &&
881             ie->name_state != NAME_PENDING) {
882                 ie->name_state = NAME_KNOWN;
883                 list_del(&ie->list);
884         }
885 
886         memcpy(&ie->data, data, sizeof(*data));
887         ie->timestamp = jiffies;
888         cache->timestamp = jiffies;
889 
890         if (ie->name_state == NAME_NOT_KNOWN)
891                 return false;
892 
893         return true;
894 }
895 
896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
897 {
898         struct discovery_state *cache = &hdev->discovery;
899         struct inquiry_info *info = (struct inquiry_info *) buf;
900         struct inquiry_entry *e;
901         int copied = 0;
902 
903         list_for_each_entry(e, &cache->all, all) {
904                 struct inquiry_data *data = &e->data;
905 
906                 if (copied >= num)
907                         break;
908 
909                 bacpy(&info->bdaddr, &data->bdaddr);
910                 info->pscan_rep_mode    = data->pscan_rep_mode;
911                 info->pscan_period_mode = data->pscan_period_mode;
912                 info->pscan_mode        = data->pscan_mode;
913                 memcpy(info->dev_class, data->dev_class, 3);
914                 info->clock_offset      = data->clock_offset;
915 
916                 info++;
917                 copied++;
918         }
919 
920         BT_DBG("cache %p, copied %d", cache, copied);
921         return copied;
922 }
923 
924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
925 {
926         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927         struct hci_dev *hdev = req->hdev;
928         struct hci_cp_inquiry cp;
929 
930         BT_DBG("%s", hdev->name);
931 
932         if (test_bit(HCI_INQUIRY, &hdev->flags))
933                 return;
934 
935         /* Start Inquiry */
936         memcpy(&cp.lap, &ir->lap, 3);
937         cp.length  = ir->length;
938         cp.num_rsp = ir->num_rsp;
939         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
940 }
941 
942 static int wait_inquiry(void *word)
943 {
944         schedule();
945         return signal_pending(current);
946 }
947 
948 int hci_inquiry(void __user *arg)
949 {
950         __u8 __user *ptr = arg;
951         struct hci_inquiry_req ir;
952         struct hci_dev *hdev;
953         int err = 0, do_inquiry = 0, max_rsp;
954         long timeo;
955         __u8 *buf;
956 
957         if (copy_from_user(&ir, ptr, sizeof(ir)))
958                 return -EFAULT;
959 
960         hdev = hci_dev_get(ir.dev_id);
961         if (!hdev)
962                 return -ENODEV;
963 
964         hci_dev_lock(hdev);
965         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967                 inquiry_cache_flush(hdev);
968                 do_inquiry = 1;
969         }
970         hci_dev_unlock(hdev);
971 
972         timeo = ir.length * msecs_to_jiffies(2000);
973 
974         if (do_inquiry) {
975                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976                                    timeo);
977                 if (err < 0)
978                         goto done;
979 
980                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981                  * cleared). If it is interrupted by a signal, return -EINTR.
982                  */
983                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984                                 TASK_INTERRUPTIBLE))
985                         return -EINTR;
986         }
987 
988         /* for unlimited number of responses we will use buffer with
989          * 255 entries
990          */
991         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
992 
993         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994          * copy it to the user space.
995          */
996         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
997         if (!buf) {
998                 err = -ENOMEM;
999                 goto done;
1000         }
1001 
1002         hci_dev_lock(hdev);
1003         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004         hci_dev_unlock(hdev);
1005 
1006         BT_DBG("num_rsp %d", ir.num_rsp);
1007 
1008         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009                 ptr += sizeof(ir);
1010                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1011                                  ir.num_rsp))
1012                         err = -EFAULT;
1013         } else
1014                 err = -EFAULT;
1015 
1016         kfree(buf);
1017 
1018 done:
1019         hci_dev_put(hdev);
1020         return err;
1021 }
1022 
1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1024 {
1025         u8 ad_len = 0, flags = 0;
1026         size_t name_len;
1027 
1028         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029                 flags |= LE_AD_GENERAL;
1030 
1031         if (!lmp_bredr_capable(hdev))
1032                 flags |= LE_AD_NO_BREDR;
1033 
1034         if (lmp_le_br_capable(hdev))
1035                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1036 
1037         if (lmp_host_le_br_capable(hdev))
1038                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1039 
1040         if (flags) {
1041                 BT_DBG("adv flags 0x%02x", flags);
1042 
1043                 ptr[0] = 2;
1044                 ptr[1] = EIR_FLAGS;
1045                 ptr[2] = flags;
1046 
1047                 ad_len += 3;
1048                 ptr += 3;
1049         }
1050 
1051         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052                 ptr[0] = 2;
1053                 ptr[1] = EIR_TX_POWER;
1054                 ptr[2] = (u8) hdev->adv_tx_power;
1055 
1056                 ad_len += 3;
1057                 ptr += 3;
1058         }
1059 
1060         name_len = strlen(hdev->dev_name);
1061         if (name_len > 0) {
1062                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1063 
1064                 if (name_len > max_len) {
1065                         name_len = max_len;
1066                         ptr[1] = EIR_NAME_SHORT;
1067                 } else
1068                         ptr[1] = EIR_NAME_COMPLETE;
1069 
1070                 ptr[0] = name_len + 1;
1071 
1072                 memcpy(ptr + 2, hdev->dev_name, name_len);
1073 
1074                 ad_len += (name_len + 2);
1075                 ptr += (name_len + 2);
1076         }
1077 
1078         return ad_len;
1079 }
1080 
1081 void hci_update_ad(struct hci_request *req)
1082 {
1083         struct hci_dev *hdev = req->hdev;
1084         struct hci_cp_le_set_adv_data cp;
1085         u8 len;
1086 
1087         if (!lmp_le_capable(hdev))
1088                 return;
1089 
1090         memset(&cp, 0, sizeof(cp));
1091 
1092         len = create_ad(hdev, cp.data);
1093 
1094         if (hdev->adv_data_len == len &&
1095             memcmp(cp.data, hdev->adv_data, len) == 0)
1096                 return;
1097 
1098         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099         hdev->adv_data_len = len;
1100 
1101         cp.length = len;
1102 
1103         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1104 }
1105 
1106 /* ---- HCI ioctl helpers ---- */
1107 
1108 int hci_dev_open(__u16 dev)
1109 {
1110         struct hci_dev *hdev;
1111         int ret = 0;
1112 
1113         hdev = hci_dev_get(dev);
1114         if (!hdev)
1115                 return -ENODEV;
1116 
1117         BT_DBG("%s %p", hdev->name, hdev);
1118 
1119         hci_req_lock(hdev);
1120 
1121         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122                 ret = -ENODEV;
1123                 goto done;
1124         }
1125 
1126         /* Check for rfkill but allow the HCI setup stage to proceed
1127          * (which in itself doesn't cause any RF activity).
1128          */
1129         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1130             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1131                 ret = -ERFKILL;
1132                 goto done;
1133         }
1134 
1135         if (test_bit(HCI_UP, &hdev->flags)) {
1136                 ret = -EALREADY;
1137                 goto done;
1138         }
1139 
1140         if (hdev->open(hdev)) {
1141                 ret = -EIO;
1142                 goto done;
1143         }
1144 
1145         atomic_set(&hdev->cmd_cnt, 1);
1146         set_bit(HCI_INIT, &hdev->flags);
1147 
1148         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1149                 ret = hdev->setup(hdev);
1150 
1151         if (!ret) {
1152                 /* Treat all non BR/EDR controllers as raw devices if
1153                  * enable_hs is not set.
1154                  */
1155                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1156                         set_bit(HCI_RAW, &hdev->flags);
1157 
1158                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1159                         set_bit(HCI_RAW, &hdev->flags);
1160 
1161                 if (!test_bit(HCI_RAW, &hdev->flags))
1162                         ret = __hci_init(hdev);
1163         }
1164 
1165         clear_bit(HCI_INIT, &hdev->flags);
1166 
1167         if (!ret) {
1168                 hci_dev_hold(hdev);
1169                 set_bit(HCI_UP, &hdev->flags);
1170                 hci_notify(hdev, HCI_DEV_UP);
1171                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1172                     mgmt_valid_hdev(hdev)) {
1173                         hci_dev_lock(hdev);
1174                         mgmt_powered(hdev, 1);
1175                         hci_dev_unlock(hdev);
1176                 }
1177         } else {
1178                 /* Init failed, cleanup */
1179                 flush_work(&hdev->tx_work);
1180                 flush_work(&hdev->cmd_work);
1181                 flush_work(&hdev->rx_work);
1182 
1183                 skb_queue_purge(&hdev->cmd_q);
1184                 skb_queue_purge(&hdev->rx_q);
1185 
1186                 if (hdev->flush)
1187                         hdev->flush(hdev);
1188 
1189                 if (hdev->sent_cmd) {
1190                         kfree_skb(hdev->sent_cmd);
1191                         hdev->sent_cmd = NULL;
1192                 }
1193 
1194                 hdev->close(hdev);
1195                 hdev->flags = 0;
1196         }
1197 
1198 done:
1199         hci_req_unlock(hdev);
1200         hci_dev_put(hdev);
1201         return ret;
1202 }
1203 
1204 static int hci_dev_do_close(struct hci_dev *hdev)
1205 {
1206         BT_DBG("%s %p", hdev->name, hdev);
1207 
1208         cancel_work_sync(&hdev->le_scan);
1209 
1210         cancel_delayed_work(&hdev->power_off);
1211 
1212         hci_req_cancel(hdev, ENODEV);
1213         hci_req_lock(hdev);
1214 
1215         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1216                 del_timer_sync(&hdev->cmd_timer);
1217                 hci_req_unlock(hdev);
1218                 return 0;
1219         }
1220 
1221         /* Flush RX and TX works */
1222         flush_work(&hdev->tx_work);
1223         flush_work(&hdev->rx_work);
1224 
1225         if (hdev->discov_timeout > 0) {
1226                 cancel_delayed_work(&hdev->discov_off);
1227                 hdev->discov_timeout = 0;
1228                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1229         }
1230 
1231         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1232                 cancel_delayed_work(&hdev->service_cache);
1233 
1234         cancel_delayed_work_sync(&hdev->le_scan_disable);
1235 
1236         hci_dev_lock(hdev);
1237         inquiry_cache_flush(hdev);
1238         hci_conn_hash_flush(hdev);
1239         hci_dev_unlock(hdev);
1240 
1241         hci_notify(hdev, HCI_DEV_DOWN);
1242 
1243         if (hdev->flush)
1244                 hdev->flush(hdev);
1245 
1246         /* Reset device */
1247         skb_queue_purge(&hdev->cmd_q);
1248         atomic_set(&hdev->cmd_cnt, 1);
1249         if (!test_bit(HCI_RAW, &hdev->flags) &&
1250             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1251                 set_bit(HCI_INIT, &hdev->flags);
1252                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1253                 clear_bit(HCI_INIT, &hdev->flags);
1254         }
1255 
1256         /* flush cmd  work */
1257         flush_work(&hdev->cmd_work);
1258 
1259         /* Drop queues */
1260         skb_queue_purge(&hdev->rx_q);
1261         skb_queue_purge(&hdev->cmd_q);
1262         skb_queue_purge(&hdev->raw_q);
1263 
1264         /* Drop last sent command */
1265         if (hdev->sent_cmd) {
1266                 del_timer_sync(&hdev->cmd_timer);
1267                 kfree_skb(hdev->sent_cmd);
1268                 hdev->sent_cmd = NULL;
1269         }
1270 
1271         kfree_skb(hdev->recv_evt);
1272         hdev->recv_evt = NULL;
1273 
1274         /* After this point our queues are empty
1275          * and no tasks are scheduled. */
1276         hdev->close(hdev);
1277 
1278         /* Clear flags */
1279         hdev->flags = 0;
1280         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1281 
1282         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1283             mgmt_valid_hdev(hdev)) {
1284                 hci_dev_lock(hdev);
1285                 mgmt_powered(hdev, 0);
1286                 hci_dev_unlock(hdev);
1287         }
1288 
1289         /* Controller radio is available but is currently powered down */
1290         hdev->amp_status = 0;
1291 
1292         memset(hdev->eir, 0, sizeof(hdev->eir));
1293         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1294 
1295         hci_req_unlock(hdev);
1296 
1297         hci_dev_put(hdev);
1298         return 0;
1299 }
1300 
1301 int hci_dev_close(__u16 dev)
1302 {
1303         struct hci_dev *hdev;
1304         int err;
1305 
1306         hdev = hci_dev_get(dev);
1307         if (!hdev)
1308                 return -ENODEV;
1309 
1310         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1311                 cancel_delayed_work(&hdev->power_off);
1312 
1313         err = hci_dev_do_close(hdev);
1314 
1315         hci_dev_put(hdev);
1316         return err;
1317 }
1318 
1319 int hci_dev_reset(__u16 dev)
1320 {
1321         struct hci_dev *hdev;
1322         int ret = 0;
1323 
1324         hdev = hci_dev_get(dev);
1325         if (!hdev)
1326                 return -ENODEV;
1327 
1328         hci_req_lock(hdev);
1329 
1330         if (!test_bit(HCI_UP, &hdev->flags))
1331                 goto done;
1332 
1333         /* Drop queues */
1334         skb_queue_purge(&hdev->rx_q);
1335         skb_queue_purge(&hdev->cmd_q);
1336 
1337         hci_dev_lock(hdev);
1338         inquiry_cache_flush(hdev);
1339         hci_conn_hash_flush(hdev);
1340         hci_dev_unlock(hdev);
1341 
1342         if (hdev->flush)
1343                 hdev->flush(hdev);
1344 
1345         atomic_set(&hdev->cmd_cnt, 1);
1346         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1347 
1348         if (!test_bit(HCI_RAW, &hdev->flags))
1349                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1350 
1351 done:
1352         hci_req_unlock(hdev);
1353         hci_dev_put(hdev);
1354         return ret;
1355 }
1356 
1357 int hci_dev_reset_stat(__u16 dev)
1358 {
1359         struct hci_dev *hdev;
1360         int ret = 0;
1361 
1362         hdev = hci_dev_get(dev);
1363         if (!hdev)
1364                 return -ENODEV;
1365 
1366         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1367 
1368         hci_dev_put(hdev);
1369 
1370         return ret;
1371 }
1372 
1373 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1374 {
1375         struct hci_dev *hdev;
1376         struct hci_dev_req dr;
1377         int err = 0;
1378 
1379         if (copy_from_user(&dr, arg, sizeof(dr)))
1380                 return -EFAULT;
1381 
1382         hdev = hci_dev_get(dr.dev_id);
1383         if (!hdev)
1384                 return -ENODEV;
1385 
1386         switch (cmd) {
1387         case HCISETAUTH:
1388                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1389                                    HCI_INIT_TIMEOUT);
1390                 break;
1391 
1392         case HCISETENCRYPT:
1393                 if (!lmp_encrypt_capable(hdev)) {
1394                         err = -EOPNOTSUPP;
1395                         break;
1396                 }
1397 
1398                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1399                         /* Auth must be enabled first */
1400                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1401                                            HCI_INIT_TIMEOUT);
1402                         if (err)
1403                                 break;
1404                 }
1405 
1406                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1407                                    HCI_INIT_TIMEOUT);
1408                 break;
1409 
1410         case HCISETSCAN:
1411                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1412                                    HCI_INIT_TIMEOUT);
1413                 break;
1414 
1415         case HCISETLINKPOL:
1416                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1417                                    HCI_INIT_TIMEOUT);
1418                 break;
1419 
1420         case HCISETLINKMODE:
1421                 hdev->link_mode = ((__u16) dr.dev_opt) &
1422                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1423                 break;
1424 
1425         case HCISETPTYPE:
1426                 hdev->pkt_type = (__u16) dr.dev_opt;
1427                 break;
1428 
1429         case HCISETACLMTU:
1430                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1431                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1432                 break;
1433 
1434         case HCISETSCOMTU:
1435                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1436                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1437                 break;
1438 
1439         default:
1440                 err = -EINVAL;
1441                 break;
1442         }
1443 
1444         hci_dev_put(hdev);
1445         return err;
1446 }
1447 
1448 int hci_get_dev_list(void __user *arg)
1449 {
1450         struct hci_dev *hdev;
1451         struct hci_dev_list_req *dl;
1452         struct hci_dev_req *dr;
1453         int n = 0, size, err;
1454         __u16 dev_num;
1455 
1456         if (get_user(dev_num, (__u16 __user *) arg))
1457                 return -EFAULT;
1458 
1459         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1460                 return -EINVAL;
1461 
1462         size = sizeof(*dl) + dev_num * sizeof(*dr);
1463 
1464         dl = kzalloc(size, GFP_KERNEL);
1465         if (!dl)
1466                 return -ENOMEM;
1467 
1468         dr = dl->dev_req;
1469 
1470         read_lock(&hci_dev_list_lock);
1471         list_for_each_entry(hdev, &hci_dev_list, list) {
1472                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1473                         cancel_delayed_work(&hdev->power_off);
1474 
1475                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1476                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1477 
1478                 (dr + n)->dev_id  = hdev->id;
1479                 (dr + n)->dev_opt = hdev->flags;
1480 
1481                 if (++n >= dev_num)
1482                         break;
1483         }
1484         read_unlock(&hci_dev_list_lock);
1485 
1486         dl->dev_num = n;
1487         size = sizeof(*dl) + n * sizeof(*dr);
1488 
1489         err = copy_to_user(arg, dl, size);
1490         kfree(dl);
1491 
1492         return err ? -EFAULT : 0;
1493 }
1494 
1495 int hci_get_dev_info(void __user *arg)
1496 {
1497         struct hci_dev *hdev;
1498         struct hci_dev_info di;
1499         int err = 0;
1500 
1501         if (copy_from_user(&di, arg, sizeof(di)))
1502                 return -EFAULT;
1503 
1504         hdev = hci_dev_get(di.dev_id);
1505         if (!hdev)
1506                 return -ENODEV;
1507 
1508         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1509                 cancel_delayed_work_sync(&hdev->power_off);
1510 
1511         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1512                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1513 
1514         strcpy(di.name, hdev->name);
1515         di.bdaddr   = hdev->bdaddr;
1516         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1517         di.flags    = hdev->flags;
1518         di.pkt_type = hdev->pkt_type;
1519         if (lmp_bredr_capable(hdev)) {
1520                 di.acl_mtu  = hdev->acl_mtu;
1521                 di.acl_pkts = hdev->acl_pkts;
1522                 di.sco_mtu  = hdev->sco_mtu;
1523                 di.sco_pkts = hdev->sco_pkts;
1524         } else {
1525                 di.acl_mtu  = hdev->le_mtu;
1526                 di.acl_pkts = hdev->le_pkts;
1527                 di.sco_mtu  = 0;
1528                 di.sco_pkts = 0;
1529         }
1530         di.link_policy = hdev->link_policy;
1531         di.link_mode   = hdev->link_mode;
1532 
1533         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1534         memcpy(&di.features, &hdev->features, sizeof(di.features));
1535 
1536         if (copy_to_user(arg, &di, sizeof(di)))
1537                 err = -EFAULT;
1538 
1539         hci_dev_put(hdev);
1540 
1541         return err;
1542 }
1543 
1544 /* ---- Interface to HCI drivers ---- */
1545 
1546 static int hci_rfkill_set_block(void *data, bool blocked)
1547 {
1548         struct hci_dev *hdev = data;
1549 
1550         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1551 
1552         if (blocked) {
1553                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1554                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1555                         hci_dev_do_close(hdev);
1556         } else {
1557                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1558 }
1559 
1560         return 0;
1561 }
1562 
1563 static const struct rfkill_ops hci_rfkill_ops = {
1564         .set_block = hci_rfkill_set_block,
1565 };
1566 
1567 static void hci_power_on(struct work_struct *work)
1568 {
1569         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1570         int err;
1571 
1572         BT_DBG("%s", hdev->name);
1573 
1574         err = hci_dev_open(hdev->id);
1575         if (err < 0) {
1576                 mgmt_set_powered_failed(hdev, err);
1577                 return;
1578         }
1579 
1580         if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1581                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1582                 hci_dev_do_close(hdev);
1583         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1584                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1585                                    HCI_AUTO_OFF_TIMEOUT);
1586         }
1587 
1588         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1589                 mgmt_index_added(hdev);
1590 }
1591 
1592 static void hci_power_off(struct work_struct *work)
1593 {
1594         struct hci_dev *hdev = container_of(work, struct hci_dev,
1595                                             power_off.work);
1596 
1597         BT_DBG("%s", hdev->name);
1598 
1599         hci_dev_do_close(hdev);
1600 }
1601 
1602 static void hci_discov_off(struct work_struct *work)
1603 {
1604         struct hci_dev *hdev;
1605         u8 scan = SCAN_PAGE;
1606 
1607         hdev = container_of(work, struct hci_dev, discov_off.work);
1608 
1609         BT_DBG("%s", hdev->name);
1610 
1611         hci_dev_lock(hdev);
1612 
1613         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1614 
1615         hdev->discov_timeout = 0;
1616 
1617         hci_dev_unlock(hdev);
1618 }
1619 
1620 int hci_uuids_clear(struct hci_dev *hdev)
1621 {
1622         struct bt_uuid *uuid, *tmp;
1623 
1624         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1625                 list_del(&uuid->list);
1626                 kfree(uuid);
1627         }
1628 
1629         return 0;
1630 }
1631 
1632 int hci_link_keys_clear(struct hci_dev *hdev)
1633 {
1634         struct list_head *p, *n;
1635 
1636         list_for_each_safe(p, n, &hdev->link_keys) {
1637                 struct link_key *key;
1638 
1639                 key = list_entry(p, struct link_key, list);
1640 
1641                 list_del(p);
1642                 kfree(key);
1643         }
1644 
1645         return 0;
1646 }
1647 
1648 int hci_smp_ltks_clear(struct hci_dev *hdev)
1649 {
1650         struct smp_ltk *k, *tmp;
1651 
1652         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1653                 list_del(&k->list);
1654                 kfree(k);
1655         }
1656 
1657         return 0;
1658 }
1659 
1660 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1661 {
1662         struct link_key *k;
1663 
1664         list_for_each_entry(k, &hdev->link_keys, list)
1665                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1666                         return k;
1667 
1668         return NULL;
1669 }
1670 
1671 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1672                                u8 key_type, u8 old_key_type)
1673 {
1674         /* Legacy key */
1675         if (key_type < 0x03)
1676                 return true;
1677 
1678         /* Debug keys are insecure so don't store them persistently */
1679         if (key_type == HCI_LK_DEBUG_COMBINATION)
1680                 return false;
1681 
1682         /* Changed combination key and there's no previous one */
1683         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1684                 return false;
1685 
1686         /* Security mode 3 case */
1687         if (!conn)
1688                 return true;
1689 
1690         /* Neither local nor remote side had no-bonding as requirement */
1691         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1692                 return true;
1693 
1694         /* Local side had dedicated bonding as requirement */
1695         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1696                 return true;
1697 
1698         /* Remote side had dedicated bonding as requirement */
1699         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1700                 return true;
1701 
1702         /* If none of the above criteria match, then don't store the key
1703          * persistently */
1704         return false;
1705 }
1706 
1707 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1708 {
1709         struct smp_ltk *k;
1710 
1711         list_for_each_entry(k, &hdev->long_term_keys, list) {
1712                 if (k->ediv != ediv ||
1713                     memcmp(rand, k->rand, sizeof(k->rand)))
1714                         continue;
1715 
1716                 return k;
1717         }
1718 
1719         return NULL;
1720 }
1721 
1722 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1723                                      u8 addr_type)
1724 {
1725         struct smp_ltk *k;
1726 
1727         list_for_each_entry(k, &hdev->long_term_keys, list)
1728                 if (addr_type == k->bdaddr_type &&
1729                     bacmp(bdaddr, &k->bdaddr) == 0)
1730                         return k;
1731 
1732         return NULL;
1733 }
1734 
1735 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1736                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1737 {
1738         struct link_key *key, *old_key;
1739         u8 old_key_type;
1740         bool persistent;
1741 
1742         old_key = hci_find_link_key(hdev, bdaddr);
1743         if (old_key) {
1744                 old_key_type = old_key->type;
1745                 key = old_key;
1746         } else {
1747                 old_key_type = conn ? conn->key_type : 0xff;
1748                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1749                 if (!key)
1750                         return -ENOMEM;
1751                 list_add(&key->list, &hdev->link_keys);
1752         }
1753 
1754         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1755 
1756         /* Some buggy controller combinations generate a changed
1757          * combination key for legacy pairing even when there's no
1758          * previous key */
1759         if (type == HCI_LK_CHANGED_COMBINATION &&
1760             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1761                 type = HCI_LK_COMBINATION;
1762                 if (conn)
1763                         conn->key_type = type;
1764         }
1765 
1766         bacpy(&key->bdaddr, bdaddr);
1767         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1768         key->pin_len = pin_len;
1769 
1770         if (type == HCI_LK_CHANGED_COMBINATION)
1771                 key->type = old_key_type;
1772         else
1773                 key->type = type;
1774 
1775         if (!new_key)
1776                 return 0;
1777 
1778         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1779 
1780         mgmt_new_link_key(hdev, key, persistent);
1781 
1782         if (conn)
1783                 conn->flush_key = !persistent;
1784 
1785         return 0;
1786 }
1787 
1788 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1789                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1790                 ediv, u8 rand[8])
1791 {
1792         struct smp_ltk *key, *old_key;
1793 
1794         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1795                 return 0;
1796 
1797         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1798         if (old_key)
1799                 key = old_key;
1800         else {
1801                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1802                 if (!key)
1803                         return -ENOMEM;
1804                 list_add(&key->list, &hdev->long_term_keys);
1805         }
1806 
1807         bacpy(&key->bdaddr, bdaddr);
1808         key->bdaddr_type = addr_type;
1809         memcpy(key->val, tk, sizeof(key->val));
1810         key->authenticated = authenticated;
1811         key->ediv = ediv;
1812         key->enc_size = enc_size;
1813         key->type = type;
1814         memcpy(key->rand, rand, sizeof(key->rand));
1815 
1816         if (!new_key)
1817                 return 0;
1818 
1819         if (type & HCI_SMP_LTK)
1820                 mgmt_new_ltk(hdev, key, 1);
1821 
1822         return 0;
1823 }
1824 
1825 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1826 {
1827         struct link_key *key;
1828 
1829         key = hci_find_link_key(hdev, bdaddr);
1830         if (!key)
1831                 return -ENOENT;
1832 
1833         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1834 
1835         list_del(&key->list);
1836         kfree(key);
1837 
1838         return 0;
1839 }
1840 
1841 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1842 {
1843         struct smp_ltk *k, *tmp;
1844 
1845         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1846                 if (bacmp(bdaddr, &k->bdaddr))
1847                         continue;
1848 
1849                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1850 
1851                 list_del(&k->list);
1852                 kfree(k);
1853         }
1854 
1855         return 0;
1856 }
1857 
1858 /* HCI command timer function */
1859 static void hci_cmd_timeout(unsigned long arg)
1860 {
1861         struct hci_dev *hdev = (void *) arg;
1862 
1863         if (hdev->sent_cmd) {
1864                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1865                 u16 opcode = __le16_to_cpu(sent->opcode);
1866 
1867                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1868         } else {
1869                 BT_ERR("%s command tx timeout", hdev->name);
1870         }
1871 
1872         atomic_set(&hdev->cmd_cnt, 1);
1873         queue_work(hdev->workqueue, &hdev->cmd_work);
1874 }
1875 
1876 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1877                                           bdaddr_t *bdaddr)
1878 {
1879         struct oob_data *data;
1880 
1881         list_for_each_entry(data, &hdev->remote_oob_data, list)
1882                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1883                         return data;
1884 
1885         return NULL;
1886 }
1887 
1888 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1889 {
1890         struct oob_data *data;
1891 
1892         data = hci_find_remote_oob_data(hdev, bdaddr);
1893         if (!data)
1894                 return -ENOENT;
1895 
1896         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1897 
1898         list_del(&data->list);
1899         kfree(data);
1900 
1901         return 0;
1902 }
1903 
1904 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1905 {
1906         struct oob_data *data, *n;
1907 
1908         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1909                 list_del(&data->list);
1910                 kfree(data);
1911         }
1912 
1913         return 0;
1914 }
1915 
1916 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1917                             u8 *randomizer)
1918 {
1919         struct oob_data *data;
1920 
1921         data = hci_find_remote_oob_data(hdev, bdaddr);
1922 
1923         if (!data) {
1924                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1925                 if (!data)
1926                         return -ENOMEM;
1927 
1928                 bacpy(&data->bdaddr, bdaddr);
1929                 list_add(&data->list, &hdev->remote_oob_data);
1930         }
1931 
1932         memcpy(data->hash, hash, sizeof(data->hash));
1933         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1934 
1935         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1936 
1937         return 0;
1938 }
1939 
1940 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1941 {
1942         struct bdaddr_list *b;
1943 
1944         list_for_each_entry(b, &hdev->blacklist, list)
1945                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1946                         return b;
1947 
1948         return NULL;
1949 }
1950 
1951 int hci_blacklist_clear(struct hci_dev *hdev)
1952 {
1953         struct list_head *p, *n;
1954 
1955         list_for_each_safe(p, n, &hdev->blacklist) {
1956                 struct bdaddr_list *b;
1957 
1958                 b = list_entry(p, struct bdaddr_list, list);
1959 
1960                 list_del(p);
1961                 kfree(b);
1962         }
1963 
1964         return 0;
1965 }
1966 
1967 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1968 {
1969         struct bdaddr_list *entry;
1970 
1971         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1972                 return -EBADF;
1973 
1974         if (hci_blacklist_lookup(hdev, bdaddr))
1975                 return -EEXIST;
1976 
1977         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1978         if (!entry)
1979                 return -ENOMEM;
1980 
1981         bacpy(&entry->bdaddr, bdaddr);
1982 
1983         list_add(&entry->list, &hdev->blacklist);
1984 
1985         return mgmt_device_blocked(hdev, bdaddr, type);
1986 }
1987 
1988 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1989 {
1990         struct bdaddr_list *entry;
1991 
1992         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1993                 return hci_blacklist_clear(hdev);
1994 
1995         entry = hci_blacklist_lookup(hdev, bdaddr);
1996         if (!entry)
1997                 return -ENOENT;
1998 
1999         list_del(&entry->list);
2000         kfree(entry);
2001 
2002         return mgmt_device_unblocked(hdev, bdaddr, type);
2003 }
2004 
2005 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
2006 {
2007         struct le_scan_params *param =  (struct le_scan_params *) opt;
2008         struct hci_cp_le_set_scan_param cp;
2009 
2010         memset(&cp, 0, sizeof(cp));
2011         cp.type = param->type;
2012         cp.interval = cpu_to_le16(param->interval);
2013         cp.window = cpu_to_le16(param->window);
2014 
2015         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2016 }
2017 
2018 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2019 {
2020         struct hci_cp_le_set_scan_enable cp;
2021 
2022         memset(&cp, 0, sizeof(cp));
2023         cp.enable = LE_SCAN_ENABLE;
2024         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2025 
2026         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2027 }
2028 
2029 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2030                           u16 window, int timeout)
2031 {
2032         long timeo = msecs_to_jiffies(3000);
2033         struct le_scan_params param;
2034         int err;
2035 
2036         BT_DBG("%s", hdev->name);
2037 
2038         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2039                 return -EINPROGRESS;
2040 
2041         param.type = type;
2042         param.interval = interval;
2043         param.window = window;
2044 
2045         hci_req_lock(hdev);
2046 
2047         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2048                              timeo);
2049         if (!err)
2050                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2051 
2052         hci_req_unlock(hdev);
2053 
2054         if (err < 0)
2055                 return err;
2056 
2057         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2058                            timeout);
2059 
2060         return 0;
2061 }
2062 
2063 int hci_cancel_le_scan(struct hci_dev *hdev)
2064 {
2065         BT_DBG("%s", hdev->name);
2066 
2067         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2068                 return -EALREADY;
2069 
2070         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2071                 struct hci_cp_le_set_scan_enable cp;
2072 
2073                 /* Send HCI command to disable LE Scan */
2074                 memset(&cp, 0, sizeof(cp));
2075                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2076         }
2077 
2078         return 0;
2079 }
2080 
2081 static void le_scan_disable_work(struct work_struct *work)
2082 {
2083         struct hci_dev *hdev = container_of(work, struct hci_dev,
2084                                             le_scan_disable.work);
2085         struct hci_cp_le_set_scan_enable cp;
2086 
2087         BT_DBG("%s", hdev->name);
2088 
2089         memset(&cp, 0, sizeof(cp));
2090 
2091         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2092 }
2093 
2094 static void le_scan_work(struct work_struct *work)
2095 {
2096         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2097         struct le_scan_params *param = &hdev->le_scan_params;
2098 
2099         BT_DBG("%s", hdev->name);
2100 
2101         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2102                        param->timeout);
2103 }
2104 
2105 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2106                 int timeout)
2107 {
2108         struct le_scan_params *param = &hdev->le_scan_params;
2109 
2110         BT_DBG("%s", hdev->name);
2111 
2112         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2113                 return -ENOTSUPP;
2114 
2115         if (work_busy(&hdev->le_scan))
2116                 return -EINPROGRESS;
2117 
2118         param->type = type;
2119         param->interval = interval;
2120         param->window = window;
2121         param->timeout = timeout;
2122 
2123         queue_work(system_long_wq, &hdev->le_scan);
2124 
2125         return 0;
2126 }
2127 
2128 /* Alloc HCI device */
2129 struct hci_dev *hci_alloc_dev(void)
2130 {
2131         struct hci_dev *hdev;
2132 
2133         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2134         if (!hdev)
2135                 return NULL;
2136 
2137         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2138         hdev->esco_type = (ESCO_HV1);
2139         hdev->link_mode = (HCI_LM_ACCEPT);
2140         hdev->io_capability = 0x03; /* No Input No Output */
2141         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2142         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2143 
2144         hdev->sniff_max_interval = 800;
2145         hdev->sniff_min_interval = 80;
2146 
2147         mutex_init(&hdev->lock);
2148         mutex_init(&hdev->req_lock);
2149 
2150         INIT_LIST_HEAD(&hdev->mgmt_pending);
2151         INIT_LIST_HEAD(&hdev->blacklist);
2152         INIT_LIST_HEAD(&hdev->uuids);
2153         INIT_LIST_HEAD(&hdev->link_keys);
2154         INIT_LIST_HEAD(&hdev->long_term_keys);
2155         INIT_LIST_HEAD(&hdev->remote_oob_data);
2156         INIT_LIST_HEAD(&hdev->conn_hash.list);
2157 
2158         INIT_WORK(&hdev->rx_work, hci_rx_work);
2159         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2160         INIT_WORK(&hdev->tx_work, hci_tx_work);
2161         INIT_WORK(&hdev->power_on, hci_power_on);
2162         INIT_WORK(&hdev->le_scan, le_scan_work);
2163 
2164         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2165         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2166         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2167 
2168         skb_queue_head_init(&hdev->rx_q);
2169         skb_queue_head_init(&hdev->cmd_q);
2170         skb_queue_head_init(&hdev->raw_q);
2171 
2172         init_waitqueue_head(&hdev->req_wait_q);
2173 
2174         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2175 
2176         hci_init_sysfs(hdev);
2177         discovery_init(hdev);
2178 
2179         return hdev;
2180 }
2181 EXPORT_SYMBOL(hci_alloc_dev);
2182 
2183 /* Free HCI device */
2184 void hci_free_dev(struct hci_dev *hdev)
2185 {
2186         /* will free via device release */
2187         put_device(&hdev->dev);
2188 }
2189 EXPORT_SYMBOL(hci_free_dev);
2190 
2191 /* Register HCI device */
2192 int hci_register_dev(struct hci_dev *hdev)
2193 {
2194         int id, error;
2195 
2196         if (!hdev->open || !hdev->close)
2197                 return -EINVAL;
2198 
2199         /* Do not allow HCI_AMP devices to register at index 0,
2200          * so the index can be used as the AMP controller ID.
2201          */
2202         switch (hdev->dev_type) {
2203         case HCI_BREDR:
2204                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2205                 break;
2206         case HCI_AMP:
2207                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2208                 break;
2209         default:
2210                 return -EINVAL;
2211         }
2212 
2213         if (id < 0)
2214                 return id;
2215 
2216         sprintf(hdev->name, "hci%d", id);
2217         hdev->id = id;
2218 
2219         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2220 
2221         write_lock(&hci_dev_list_lock);
2222         list_add(&hdev->list, &hci_dev_list);
2223         write_unlock(&hci_dev_list_lock);
2224 
2225         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2226                                           WQ_MEM_RECLAIM, 1);
2227         if (!hdev->workqueue) {
2228                 error = -ENOMEM;
2229                 goto err;
2230         }
2231 
2232         hdev->req_workqueue = alloc_workqueue(hdev->name,
2233                                               WQ_HIGHPRI | WQ_UNBOUND |
2234                                               WQ_MEM_RECLAIM, 1);
2235         if (!hdev->req_workqueue) {
2236                 destroy_workqueue(hdev->workqueue);
2237                 error = -ENOMEM;
2238                 goto err;
2239         }
2240 
2241         error = hci_add_sysfs(hdev);
2242         if (error < 0)
2243                 goto err_wqueue;
2244 
2245         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2246                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2247                                     hdev);
2248         if (hdev->rfkill) {
2249                 if (rfkill_register(hdev->rfkill) < 0) {
2250                         rfkill_destroy(hdev->rfkill);
2251                         hdev->rfkill = NULL;
2252                 }
2253         }
2254 
2255         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2256                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2257 
2258         set_bit(HCI_SETUP, &hdev->dev_flags);
2259 
2260         if (hdev->dev_type != HCI_AMP)
2261                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2262 
2263         hci_notify(hdev, HCI_DEV_REG);
2264         hci_dev_hold(hdev);
2265 
2266         queue_work(hdev->req_workqueue, &hdev->power_on);
2267 
2268         return id;
2269 
2270 err_wqueue:
2271         destroy_workqueue(hdev->workqueue);
2272         destroy_workqueue(hdev->req_workqueue);
2273 err:
2274         ida_simple_remove(&hci_index_ida, hdev->id);
2275         write_lock(&hci_dev_list_lock);
2276         list_del(&hdev->list);
2277         write_unlock(&hci_dev_list_lock);
2278 
2279         return error;
2280 }
2281 EXPORT_SYMBOL(hci_register_dev);
2282 
2283 /* Unregister HCI device */
2284 void hci_unregister_dev(struct hci_dev *hdev)
2285 {
2286         int i, id;
2287 
2288         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2289 
2290         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2291 
2292         id = hdev->id;
2293 
2294         write_lock(&hci_dev_list_lock);
2295         list_del(&hdev->list);
2296         write_unlock(&hci_dev_list_lock);
2297 
2298         hci_dev_do_close(hdev);
2299 
2300         for (i = 0; i < NUM_REASSEMBLY; i++)
2301                 kfree_skb(hdev->reassembly[i]);
2302 
2303         cancel_work_sync(&hdev->power_on);
2304 
2305         if (!test_bit(HCI_INIT, &hdev->flags) &&
2306             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2307                 hci_dev_lock(hdev);
2308                 mgmt_index_removed(hdev);
2309                 hci_dev_unlock(hdev);
2310         }
2311 
2312         /* mgmt_index_removed should take care of emptying the
2313          * pending list */
2314         BUG_ON(!list_empty(&hdev->mgmt_pending));
2315 
2316         hci_notify(hdev, HCI_DEV_UNREG);
2317 
2318         if (hdev->rfkill) {
2319                 rfkill_unregister(hdev->rfkill);
2320                 rfkill_destroy(hdev->rfkill);
2321         }
2322 
2323         hci_del_sysfs(hdev);
2324 
2325         destroy_workqueue(hdev->workqueue);
2326         destroy_workqueue(hdev->req_workqueue);
2327 
2328         hci_dev_lock(hdev);
2329         hci_blacklist_clear(hdev);
2330         hci_uuids_clear(hdev);
2331         hci_link_keys_clear(hdev);
2332         hci_smp_ltks_clear(hdev);
2333         hci_remote_oob_data_clear(hdev);
2334         hci_dev_unlock(hdev);
2335 
2336         hci_dev_put(hdev);
2337 
2338         ida_simple_remove(&hci_index_ida, id);
2339 }
2340 EXPORT_SYMBOL(hci_unregister_dev);
2341 
2342 /* Suspend HCI device */
2343 int hci_suspend_dev(struct hci_dev *hdev)
2344 {
2345         hci_notify(hdev, HCI_DEV_SUSPEND);
2346         return 0;
2347 }
2348 EXPORT_SYMBOL(hci_suspend_dev);
2349 
2350 /* Resume HCI device */
2351 int hci_resume_dev(struct hci_dev *hdev)
2352 {
2353         hci_notify(hdev, HCI_DEV_RESUME);
2354         return 0;
2355 }
2356 EXPORT_SYMBOL(hci_resume_dev);
2357 
2358 /* Receive frame from HCI drivers */
2359 int hci_recv_frame(struct sk_buff *skb)
2360 {
2361         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2362         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2363                       && !test_bit(HCI_INIT, &hdev->flags))) {
2364                 kfree_skb(skb);
2365                 return -ENXIO;
2366         }
2367 
2368         /* Incoming skb */
2369         bt_cb(skb)->incoming = 1;
2370 
2371         /* Time stamp */
2372         __net_timestamp(skb);
2373 
2374         skb_queue_tail(&hdev->rx_q, skb);
2375         queue_work(hdev->workqueue, &hdev->rx_work);
2376 
2377         return 0;
2378 }
2379 EXPORT_SYMBOL(hci_recv_frame);
2380 
2381 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2382                           int count, __u8 index)
2383 {
2384         int len = 0;
2385         int hlen = 0;
2386         int remain = count;
2387         struct sk_buff *skb;
2388         struct bt_skb_cb *scb;
2389 
2390         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2391             index >= NUM_REASSEMBLY)
2392                 return -EILSEQ;
2393 
2394         skb = hdev->reassembly[index];
2395 
2396         if (!skb) {
2397                 switch (type) {
2398                 case HCI_ACLDATA_PKT:
2399                         len = HCI_MAX_FRAME_SIZE;
2400                         hlen = HCI_ACL_HDR_SIZE;
2401                         break;
2402                 case HCI_EVENT_PKT:
2403                         len = HCI_MAX_EVENT_SIZE;
2404                         hlen = HCI_EVENT_HDR_SIZE;
2405                         break;
2406                 case HCI_SCODATA_PKT:
2407                         len = HCI_MAX_SCO_SIZE;
2408                         hlen = HCI_SCO_HDR_SIZE;
2409                         break;
2410                 }
2411 
2412                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2413                 if (!skb)
2414                         return -ENOMEM;
2415 
2416                 scb = (void *) skb->cb;
2417                 scb->expect = hlen;
2418                 scb->pkt_type = type;
2419 
2420                 skb->dev = (void *) hdev;
2421                 hdev->reassembly[index] = skb;
2422         }
2423 
2424         while (count) {
2425                 scb = (void *) skb->cb;
2426                 len = min_t(uint, scb->expect, count);
2427 
2428                 memcpy(skb_put(skb, len), data, len);
2429 
2430                 count -= len;
2431                 data += len;
2432                 scb->expect -= len;
2433                 remain = count;
2434 
2435                 switch (type) {
2436                 case HCI_EVENT_PKT:
2437                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2438                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2439                                 scb->expect = h->plen;
2440 
2441                                 if (skb_tailroom(skb) < scb->expect) {
2442                                         kfree_skb(skb);
2443                                         hdev->reassembly[index] = NULL;
2444                                         return -ENOMEM;
2445                                 }
2446                         }
2447                         break;
2448 
2449                 case HCI_ACLDATA_PKT:
2450                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2451                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2452                                 scb->expect = __le16_to_cpu(h->dlen);
2453 
2454                                 if (skb_tailroom(skb) < scb->expect) {
2455                                         kfree_skb(skb);
2456                                         hdev->reassembly[index] = NULL;
2457                                         return -ENOMEM;
2458                                 }
2459                         }
2460                         break;
2461 
2462                 case HCI_SCODATA_PKT:
2463                         if (skb->len == HCI_SCO_HDR_SIZE) {
2464                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2465                                 scb->expect = h->dlen;
2466 
2467                                 if (skb_tailroom(skb) < scb->expect) {
2468                                         kfree_skb(skb);
2469                                         hdev->reassembly[index] = NULL;
2470                                         return -ENOMEM;
2471                                 }
2472                         }
2473                         break;
2474                 }
2475 
2476                 if (scb->expect == 0) {
2477                         /* Complete frame */
2478 
2479                         bt_cb(skb)->pkt_type = type;
2480                         hci_recv_frame(skb);
2481 
2482                         hdev->reassembly[index] = NULL;
2483                         return remain;
2484                 }
2485         }
2486 
2487         return remain;
2488 }
2489 
2490 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2491 {
2492         int rem = 0;
2493 
2494         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2495                 return -EILSEQ;
2496 
2497         while (count) {
2498                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2499                 if (rem < 0)
2500                         return rem;
2501 
2502                 data += (count - rem);
2503                 count = rem;
2504         }
2505 
2506         return rem;
2507 }
2508 EXPORT_SYMBOL(hci_recv_fragment);
2509 
2510 #define STREAM_REASSEMBLY 0
2511 
2512 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2513 {
2514         int type;
2515         int rem = 0;
2516 
2517         while (count) {
2518                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2519 
2520                 if (!skb) {
2521                         struct { char type; } *pkt;
2522 
2523                         /* Start of the frame */
2524                         pkt = data;
2525                         type = pkt->type;
2526 
2527                         data++;
2528                         count--;
2529                 } else
2530                         type = bt_cb(skb)->pkt_type;
2531 
2532                 rem = hci_reassembly(hdev, type, data, count,
2533                                      STREAM_REASSEMBLY);
2534                 if (rem < 0)
2535                         return rem;
2536 
2537                 data += (count - rem);
2538                 count = rem;
2539         }
2540 
2541         return rem;
2542 }
2543 EXPORT_SYMBOL(hci_recv_stream_fragment);
2544 
2545 /* ---- Interface to upper protocols ---- */
2546 
2547 int hci_register_cb(struct hci_cb *cb)
2548 {
2549         BT_DBG("%p name %s", cb, cb->name);
2550 
2551         write_lock(&hci_cb_list_lock);
2552         list_add(&cb->list, &hci_cb_list);
2553         write_unlock(&hci_cb_list_lock);
2554 
2555         return 0;
2556 }
2557 EXPORT_SYMBOL(hci_register_cb);
2558 
2559 int hci_unregister_cb(struct hci_cb *cb)
2560 {
2561         BT_DBG("%p name %s", cb, cb->name);
2562 
2563         write_lock(&hci_cb_list_lock);
2564         list_del(&cb->list);
2565         write_unlock(&hci_cb_list_lock);
2566 
2567         return 0;
2568 }
2569 EXPORT_SYMBOL(hci_unregister_cb);
2570 
2571 static int hci_send_frame(struct sk_buff *skb)
2572 {
2573         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2574 
2575         if (!hdev) {
2576                 kfree_skb(skb);
2577                 return -ENODEV;
2578         }
2579 
2580         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2581 
2582         /* Time stamp */
2583         __net_timestamp(skb);
2584 
2585         /* Send copy to monitor */
2586         hci_send_to_monitor(hdev, skb);
2587 
2588         if (atomic_read(&hdev->promisc)) {
2589                 /* Send copy to the sockets */
2590                 hci_send_to_sock(hdev, skb);
2591         }
2592 
2593         /* Get rid of skb owner, prior to sending to the driver. */
2594         skb_orphan(skb);
2595 
2596         return hdev->send(skb);
2597 }
2598 
2599 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2600 {
2601         skb_queue_head_init(&req->cmd_q);
2602         req->hdev = hdev;
2603         req->err = 0;
2604 }
2605 
2606 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2607 {
2608         struct hci_dev *hdev = req->hdev;
2609         struct sk_buff *skb;
2610         unsigned long flags;
2611 
2612         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2613 
2614         /* If an error occured during request building, remove all HCI
2615          * commands queued on the HCI request queue.
2616          */
2617         if (req->err) {
2618                 skb_queue_purge(&req->cmd_q);
2619                 return req->err;
2620         }
2621 
2622         /* Do not allow empty requests */
2623         if (skb_queue_empty(&req->cmd_q))
2624                 return -ENODATA;
2625 
2626         skb = skb_peek_tail(&req->cmd_q);
2627         bt_cb(skb)->req.complete = complete;
2628 
2629         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2630         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2631         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2632 
2633         queue_work(hdev->workqueue, &hdev->cmd_work);
2634 
2635         return 0;
2636 }
2637 
2638 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2639                                        u32 plen, const void *param)
2640 {
2641         int len = HCI_COMMAND_HDR_SIZE + plen;
2642         struct hci_command_hdr *hdr;
2643         struct sk_buff *skb;
2644 
2645         skb = bt_skb_alloc(len, GFP_ATOMIC);
2646         if (!skb)
2647                 return NULL;
2648 
2649         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2650         hdr->opcode = cpu_to_le16(opcode);
2651         hdr->plen   = plen;
2652 
2653         if (plen)
2654                 memcpy(skb_put(skb, plen), param, plen);
2655 
2656         BT_DBG("skb len %d", skb->len);
2657 
2658         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2659         skb->dev = (void *) hdev;
2660 
2661         return skb;
2662 }
2663 
2664 /* Send HCI command */
2665 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2666                  const void *param)
2667 {
2668         struct sk_buff *skb;
2669 
2670         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2671 
2672         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2673         if (!skb) {
2674                 BT_ERR("%s no memory for command", hdev->name);
2675                 return -ENOMEM;
2676         }
2677 
2678         /* Stand-alone HCI commands must be flaged as
2679          * single-command requests.
2680          */
2681         bt_cb(skb)->req.start = true;
2682 
2683         skb_queue_tail(&hdev->cmd_q, skb);
2684         queue_work(hdev->workqueue, &hdev->cmd_work);
2685 
2686         return 0;
2687 }
2688 
2689 /* Queue a command to an asynchronous HCI request */
2690 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2691                     const void *param, u8 event)
2692 {
2693         struct hci_dev *hdev = req->hdev;
2694         struct sk_buff *skb;
2695 
2696         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2697 
2698         /* If an error occured during request building, there is no point in
2699          * queueing the HCI command. We can simply return.
2700          */
2701         if (req->err)
2702                 return;
2703 
2704         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2705         if (!skb) {
2706                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2707                        hdev->name, opcode);
2708                 req->err = -ENOMEM;
2709                 return;
2710         }
2711 
2712         if (skb_queue_empty(&req->cmd_q))
2713                 bt_cb(skb)->req.start = true;
2714 
2715         bt_cb(skb)->req.event = event;
2716 
2717         skb_queue_tail(&req->cmd_q, skb);
2718 }
2719 
2720 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2721                  const void *param)
2722 {
2723         hci_req_add_ev(req, opcode, plen, param, 0);
2724 }
2725 
2726 /* Get data from the previously sent command */
2727 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2728 {
2729         struct hci_command_hdr *hdr;
2730 
2731         if (!hdev->sent_cmd)
2732                 return NULL;
2733 
2734         hdr = (void *) hdev->sent_cmd->data;
2735 
2736         if (hdr->opcode != cpu_to_le16(opcode))
2737                 return NULL;
2738 
2739         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2740 
2741         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2742 }
2743 
2744 /* Send ACL data */
2745 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2746 {
2747         struct hci_acl_hdr *hdr;
2748         int len = skb->len;
2749 
2750         skb_push(skb, HCI_ACL_HDR_SIZE);
2751         skb_reset_transport_header(skb);
2752         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2753         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2754         hdr->dlen   = cpu_to_le16(len);
2755 }
2756 
2757 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2758                           struct sk_buff *skb, __u16 flags)
2759 {
2760         struct hci_conn *conn = chan->conn;
2761         struct hci_dev *hdev = conn->hdev;
2762         struct sk_buff *list;
2763 
2764         skb->len = skb_headlen(skb);
2765         skb->data_len = 0;
2766 
2767         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2768 
2769         switch (hdev->dev_type) {
2770         case HCI_BREDR:
2771                 hci_add_acl_hdr(skb, conn->handle, flags);
2772                 break;
2773         case HCI_AMP:
2774                 hci_add_acl_hdr(skb, chan->handle, flags);
2775                 break;
2776         default:
2777                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2778                 return;
2779         }
2780 
2781         list = skb_shinfo(skb)->frag_list;
2782         if (!list) {
2783                 /* Non fragmented */
2784                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2785 
2786                 skb_queue_tail(queue, skb);
2787         } else {
2788                 /* Fragmented */
2789                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2790 
2791                 skb_shinfo(skb)->frag_list = NULL;
2792 
2793                 /* Queue all fragments atomically */
2794                 spin_lock(&queue->lock);
2795 
2796                 __skb_queue_tail(queue, skb);
2797 
2798                 flags &= ~ACL_START;
2799                 flags |= ACL_CONT;
2800                 do {
2801                         skb = list; list = list->next;
2802 
2803                         skb->dev = (void *) hdev;
2804                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2805                         hci_add_acl_hdr(skb, conn->handle, flags);
2806 
2807                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2808 
2809                         __skb_queue_tail(queue, skb);
2810                 } while (list);
2811 
2812                 spin_unlock(&queue->lock);
2813         }
2814 }
2815 
2816 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2817 {
2818         struct hci_dev *hdev = chan->conn->hdev;
2819 
2820         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2821 
2822         skb->dev = (void *) hdev;
2823 
2824         hci_queue_acl(chan, &chan->data_q, skb, flags);
2825 
2826         queue_work(hdev->workqueue, &hdev->tx_work);
2827 }
2828 
2829 /* Send SCO data */
2830 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2831 {
2832         struct hci_dev *hdev = conn->hdev;
2833         struct hci_sco_hdr hdr;
2834 
2835         BT_DBG("%s len %d", hdev->name, skb->len);
2836 
2837         hdr.handle = cpu_to_le16(conn->handle);
2838         hdr.dlen   = skb->len;
2839 
2840         skb_push(skb, HCI_SCO_HDR_SIZE);
2841         skb_reset_transport_header(skb);
2842         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2843 
2844         skb->dev = (void *) hdev;
2845         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2846 
2847         skb_queue_tail(&conn->data_q, skb);
2848         queue_work(hdev->workqueue, &hdev->tx_work);
2849 }
2850 
2851 /* ---- HCI TX task (outgoing data) ---- */
2852 
2853 /* HCI Connection scheduler */
2854 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2855                                      int *quote)
2856 {
2857         struct hci_conn_hash *h = &hdev->conn_hash;
2858         struct hci_conn *conn = NULL, *c;
2859         unsigned int num = 0, min = ~0;
2860 
2861         /* We don't have to lock device here. Connections are always
2862          * added and removed with TX task disabled. */
2863 
2864         rcu_read_lock();
2865 
2866         list_for_each_entry_rcu(c, &h->list, list) {
2867                 if (c->type != type || skb_queue_empty(&c->data_q))
2868                         continue;
2869 
2870                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2871                         continue;
2872 
2873                 num++;
2874 
2875                 if (c->sent < min) {
2876                         min  = c->sent;
2877                         conn = c;
2878                 }
2879 
2880                 if (hci_conn_num(hdev, type) == num)
2881                         break;
2882         }
2883 
2884         rcu_read_unlock();
2885 
2886         if (conn) {
2887                 int cnt, q;
2888 
2889                 switch (conn->type) {
2890                 case ACL_LINK:
2891                         cnt = hdev->acl_cnt;
2892                         break;
2893                 case SCO_LINK:
2894                 case ESCO_LINK:
2895                         cnt = hdev->sco_cnt;
2896                         break;
2897                 case LE_LINK:
2898                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2899                         break;
2900                 default:
2901                         cnt = 0;
2902                         BT_ERR("Unknown link type");
2903                 }
2904 
2905                 q = cnt / num;
2906                 *quote = q ? q : 1;
2907         } else
2908                 *quote = 0;
2909 
2910         BT_DBG("conn %p quote %d", conn, *quote);
2911         return conn;
2912 }
2913 
2914 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2915 {
2916         struct hci_conn_hash *h = &hdev->conn_hash;
2917         struct hci_conn *c;
2918 
2919         BT_ERR("%s link tx timeout", hdev->name);
2920 
2921         rcu_read_lock();
2922 
2923         /* Kill stalled connections */
2924         list_for_each_entry_rcu(c, &h->list, list) {
2925                 if (c->type == type && c->sent) {
2926                         BT_ERR("%s killing stalled connection %pMR",
2927                                hdev->name, &c->dst);
2928                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2929                 }
2930         }
2931 
2932         rcu_read_unlock();
2933 }
2934 
2935 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2936                                       int *quote)
2937 {
2938         struct hci_conn_hash *h = &hdev->conn_hash;
2939         struct hci_chan *chan = NULL;
2940         unsigned int num = 0, min = ~0, cur_prio = 0;
2941         struct hci_conn *conn;
2942         int cnt, q, conn_num = 0;
2943 
2944         BT_DBG("%s", hdev->name);
2945 
2946         rcu_read_lock();
2947 
2948         list_for_each_entry_rcu(conn, &h->list, list) {
2949                 struct hci_chan *tmp;
2950 
2951                 if (conn->type != type)
2952                         continue;
2953 
2954                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2955                         continue;
2956 
2957                 conn_num++;
2958 
2959                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2960                         struct sk_buff *skb;
2961 
2962                         if (skb_queue_empty(&tmp->data_q))
2963                                 continue;
2964 
2965                         skb = skb_peek(&tmp->data_q);
2966                         if (skb->priority < cur_prio)
2967                                 continue;
2968 
2969                         if (skb->priority > cur_prio) {
2970                                 num = 0;
2971                                 min = ~0;
2972                                 cur_prio = skb->priority;
2973                         }
2974 
2975                         num++;
2976 
2977                         if (conn->sent < min) {
2978                                 min  = conn->sent;
2979                                 chan = tmp;
2980                         }
2981                 }
2982 
2983                 if (hci_conn_num(hdev, type) == conn_num)
2984                         break;
2985         }
2986 
2987         rcu_read_unlock();
2988 
2989         if (!chan)
2990                 return NULL;
2991 
2992         switch (chan->conn->type) {
2993         case ACL_LINK:
2994                 cnt = hdev->acl_cnt;
2995                 break;
2996         case AMP_LINK:
2997                 cnt = hdev->block_cnt;
2998                 break;
2999         case SCO_LINK:
3000         case ESCO_LINK:
3001                 cnt = hdev->sco_cnt;
3002                 break;
3003         case LE_LINK:
3004                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3005                 break;
3006         default:
3007                 cnt = 0;
3008                 BT_ERR("Unknown link type");
3009         }
3010 
3011         q = cnt / num;
3012         *quote = q ? q : 1;
3013         BT_DBG("chan %p quote %d", chan, *quote);
3014         return chan;
3015 }
3016 
3017 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3018 {
3019         struct hci_conn_hash *h = &hdev->conn_hash;
3020         struct hci_conn *conn;
3021         int num = 0;
3022 
3023         BT_DBG("%s", hdev->name);
3024 
3025         rcu_read_lock();
3026 
3027         list_for_each_entry_rcu(conn, &h->list, list) {
3028                 struct hci_chan *chan;
3029 
3030                 if (conn->type != type)
3031                         continue;
3032 
3033                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3034                         continue;
3035 
3036                 num++;
3037 
3038                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3039                         struct sk_buff *skb;
3040 
3041                         if (chan->sent) {
3042                                 chan->sent = 0;
3043                                 continue;
3044                         }
3045 
3046                         if (skb_queue_empty(&chan->data_q))
3047                                 continue;
3048 
3049                         skb = skb_peek(&chan->data_q);
3050                         if (skb->priority >= HCI_PRIO_MAX - 1)
3051                                 continue;
3052 
3053                         skb->priority = HCI_PRIO_MAX - 1;
3054 
3055                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3056                                skb->priority);
3057                 }
3058 
3059                 if (hci_conn_num(hdev, type) == num)
3060                         break;
3061         }
3062 
3063         rcu_read_unlock();
3064 
3065 }
3066 
3067 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3068 {
3069         /* Calculate count of blocks used by this packet */
3070         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3071 }
3072 
3073 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3074 {
3075         if (!test_bit(HCI_RAW, &hdev->flags)) {
3076                 /* ACL tx timeout must be longer than maximum
3077                  * link supervision timeout (40.9 seconds) */
3078                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3079                                        HCI_ACL_TX_TIMEOUT))
3080                         hci_link_tx_to(hdev, ACL_LINK);
3081         }
3082 }
3083 
3084 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3085 {
3086         unsigned int cnt = hdev->acl_cnt;
3087         struct hci_chan *chan;
3088         struct sk_buff *skb;
3089         int quote;
3090 
3091         __check_timeout(hdev, cnt);
3092 
3093         while (hdev->acl_cnt &&
3094                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3095                 u32 priority = (skb_peek(&chan->data_q))->priority;
3096                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3097                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3098                                skb->len, skb->priority);
3099 
3100                         /* Stop if priority has changed */
3101                         if (skb->priority < priority)
3102                                 break;
3103 
3104                         skb = skb_dequeue(&chan->data_q);
3105 
3106                         hci_conn_enter_active_mode(chan->conn,
3107                                                    bt_cb(skb)->force_active);
3108 
3109                         hci_send_frame(skb);
3110                         hdev->acl_last_tx = jiffies;
3111 
3112                         hdev->acl_cnt--;
3113                         chan->sent++;
3114                         chan->conn->sent++;
3115                 }
3116         }
3117 
3118         if (cnt != hdev->acl_cnt)
3119                 hci_prio_recalculate(hdev, ACL_LINK);
3120 }
3121 
3122 static void hci_sched_acl_blk(struct hci_dev *hdev)
3123 {
3124         unsigned int cnt = hdev->block_cnt;
3125         struct hci_chan *chan;
3126         struct sk_buff *skb;
3127         int quote;
3128         u8 type;
3129 
3130         __check_timeout(hdev, cnt);
3131 
3132         BT_DBG("%s", hdev->name);
3133 
3134         if (hdev->dev_type == HCI_AMP)
3135                 type = AMP_LINK;
3136         else
3137                 type = ACL_LINK;
3138 
3139         while (hdev->block_cnt > 0 &&
3140                (chan = hci_chan_sent(hdev, type, &quote))) {
3141                 u32 priority = (skb_peek(&chan->data_q))->priority;
3142                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3143                         int blocks;
3144 
3145                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3146                                skb->len, skb->priority);
3147 
3148                         /* Stop if priority has changed */
3149                         if (skb->priority < priority)
3150                                 break;
3151 
3152                         skb = skb_dequeue(&chan->data_q);
3153 
3154                         blocks = __get_blocks(hdev, skb);
3155                         if (blocks > hdev->block_cnt)
3156                                 return;
3157 
3158                         hci_conn_enter_active_mode(chan->conn,
3159                                                    bt_cb(skb)->force_active);
3160 
3161                         hci_send_frame(skb);
3162                         hdev->acl_last_tx = jiffies;
3163 
3164                         hdev->block_cnt -= blocks;
3165                         quote -= blocks;
3166 
3167                         chan->sent += blocks;
3168                         chan->conn->sent += blocks;
3169                 }
3170         }
3171 
3172         if (cnt != hdev->block_cnt)
3173                 hci_prio_recalculate(hdev, type);
3174 }
3175 
3176 static void hci_sched_acl(struct hci_dev *hdev)
3177 {
3178         BT_DBG("%s", hdev->name);
3179 
3180         /* No ACL link over BR/EDR controller */
3181         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3182                 return;
3183 
3184         /* No AMP link over AMP controller */
3185         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3186                 return;
3187 
3188         switch (hdev->flow_ctl_mode) {
3189         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3190                 hci_sched_acl_pkt(hdev);
3191                 break;
3192 
3193         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3194                 hci_sched_acl_blk(hdev);
3195                 break;
3196         }
3197 }
3198 
3199 /* Schedule SCO */
3200 static void hci_sched_sco(struct hci_dev *hdev)
3201 {
3202         struct hci_conn *conn;
3203         struct sk_buff *skb;
3204         int quote;
3205 
3206         BT_DBG("%s", hdev->name);
3207 
3208         if (!hci_conn_num(hdev, SCO_LINK))
3209                 return;
3210 
3211         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3212                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3213                         BT_DBG("skb %p len %d", skb, skb->len);
3214                         hci_send_frame(skb);
3215 
3216                         conn->sent++;
3217                         if (conn->sent == ~0)
3218                                 conn->sent = 0;
3219                 }
3220         }
3221 }
3222 
3223 static void hci_sched_esco(struct hci_dev *hdev)
3224 {
3225         struct hci_conn *conn;
3226         struct sk_buff *skb;
3227         int quote;
3228 
3229         BT_DBG("%s", hdev->name);
3230 
3231         if (!hci_conn_num(hdev, ESCO_LINK))
3232                 return;
3233 
3234         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3235                                                      &quote))) {
3236                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3237                         BT_DBG("skb %p len %d", skb, skb->len);
3238                         hci_send_frame(skb);
3239 
3240                         conn->sent++;
3241                         if (conn->sent == ~0)
3242                                 conn->sent = 0;
3243                 }
3244         }
3245 }
3246 
3247 static void hci_sched_le(struct hci_dev *hdev)
3248 {
3249         struct hci_chan *chan;
3250         struct sk_buff *skb;
3251         int quote, cnt, tmp;
3252 
3253         BT_DBG("%s", hdev->name);
3254 
3255         if (!hci_conn_num(hdev, LE_LINK))
3256                 return;
3257 
3258         if (!test_bit(HCI_RAW, &hdev->flags)) {
3259                 /* LE tx timeout must be longer than maximum
3260                  * link supervision timeout (40.9 seconds) */
3261                 if (!hdev->le_cnt && hdev->le_pkts &&
3262                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3263                         hci_link_tx_to(hdev, LE_LINK);
3264         }
3265 
3266         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3267         tmp = cnt;
3268         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3269                 u32 priority = (skb_peek(&chan->data_q))->priority;
3270                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3271                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3272                                skb->len, skb->priority);
3273 
3274                         /* Stop if priority has changed */
3275                         if (skb->priority < priority)
3276                                 break;
3277 
3278                         skb = skb_dequeue(&chan->data_q);
3279 
3280                         hci_send_frame(skb);
3281                         hdev->le_last_tx = jiffies;
3282 
3283                         cnt--;
3284                         chan->sent++;
3285                         chan->conn->sent++;
3286                 }
3287         }
3288 
3289         if (hdev->le_pkts)
3290                 hdev->le_cnt = cnt;
3291         else
3292                 hdev->acl_cnt = cnt;
3293 
3294         if (cnt != tmp)
3295                 hci_prio_recalculate(hdev, LE_LINK);
3296 }
3297 
3298 static void hci_tx_work(struct work_struct *work)
3299 {
3300         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3301         struct sk_buff *skb;
3302 
3303         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3304                hdev->sco_cnt, hdev->le_cnt);
3305 
3306         /* Schedule queues and send stuff to HCI driver */
3307 
3308         hci_sched_acl(hdev);
3309 
3310         hci_sched_sco(hdev);
3311 
3312         hci_sched_esco(hdev);
3313 
3314         hci_sched_le(hdev);
3315 
3316         /* Send next queued raw (unknown type) packet */
3317         while ((skb = skb_dequeue(&hdev->raw_q)))
3318                 hci_send_frame(skb);
3319 }
3320 
3321 /* ----- HCI RX task (incoming data processing) ----- */
3322 
3323 /* ACL data packet */
3324 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3325 {
3326         struct hci_acl_hdr *hdr = (void *) skb->data;
3327         struct hci_conn *conn;
3328         __u16 handle, flags;
3329 
3330         skb_pull(skb, HCI_ACL_HDR_SIZE);
3331 
3332         handle = __le16_to_cpu(hdr->handle);
3333         flags  = hci_flags(handle);
3334         handle = hci_handle(handle);
3335 
3336         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3337                handle, flags);
3338 
3339         hdev->stat.acl_rx++;
3340 
3341         hci_dev_lock(hdev);
3342         conn = hci_conn_hash_lookup_handle(hdev, handle);
3343         hci_dev_unlock(hdev);
3344 
3345         if (conn) {
3346                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3347 
3348                 /* Send to upper protocol */
3349                 l2cap_recv_acldata(conn, skb, flags);
3350                 return;
3351         } else {
3352                 BT_ERR("%s ACL packet for unknown connection handle %d",
3353                        hdev->name, handle);
3354         }
3355 
3356         kfree_skb(skb);
3357 }
3358 
3359 /* SCO data packet */
3360 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3361 {
3362         struct hci_sco_hdr *hdr = (void *) skb->data;
3363         struct hci_conn *conn;
3364         __u16 handle;
3365 
3366         skb_pull(skb, HCI_SCO_HDR_SIZE);
3367 
3368         handle = __le16_to_cpu(hdr->handle);
3369 
3370         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3371 
3372         hdev->stat.sco_rx++;
3373 
3374         hci_dev_lock(hdev);
3375         conn = hci_conn_hash_lookup_handle(hdev, handle);
3376         hci_dev_unlock(hdev);
3377 
3378         if (conn) {
3379                 /* Send to upper protocol */
3380                 sco_recv_scodata(conn, skb);
3381                 return;
3382         } else {
3383                 BT_ERR("%s SCO packet for unknown connection handle %d",
3384                        hdev->name, handle);
3385         }
3386 
3387         kfree_skb(skb);
3388 }
3389 
3390 static bool hci_req_is_complete(struct hci_dev *hdev)
3391 {
3392         struct sk_buff *skb;
3393 
3394         skb = skb_peek(&hdev->cmd_q);
3395         if (!skb)
3396                 return true;
3397 
3398         return bt_cb(skb)->req.start;
3399 }
3400 
3401 static void hci_resend_last(struct hci_dev *hdev)
3402 {
3403         struct hci_command_hdr *sent;
3404         struct sk_buff *skb;
3405         u16 opcode;
3406 
3407         if (!hdev->sent_cmd)
3408                 return;
3409 
3410         sent = (void *) hdev->sent_cmd->data;
3411         opcode = __le16_to_cpu(sent->opcode);
3412         if (opcode == HCI_OP_RESET)
3413                 return;
3414 
3415         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3416         if (!skb)
3417                 return;
3418 
3419         skb_queue_head(&hdev->cmd_q, skb);
3420         queue_work(hdev->workqueue, &hdev->cmd_work);
3421 }
3422 
3423 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3424 {
3425         hci_req_complete_t req_complete = NULL;
3426         struct sk_buff *skb;
3427         unsigned long flags;
3428 
3429         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3430 
3431         /* If the completed command doesn't match the last one that was
3432          * sent we need to do special handling of it.
3433          */
3434         if (!hci_sent_cmd_data(hdev, opcode)) {
3435                 /* Some CSR based controllers generate a spontaneous
3436                  * reset complete event during init and any pending
3437                  * command will never be completed. In such a case we
3438                  * need to resend whatever was the last sent
3439                  * command.
3440                  */
3441                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3442                         hci_resend_last(hdev);
3443 
3444                 return;
3445         }
3446 
3447         /* If the command succeeded and there's still more commands in
3448          * this request the request is not yet complete.
3449          */
3450         if (!status && !hci_req_is_complete(hdev))
3451                 return;
3452 
3453         /* If this was the last command in a request the complete
3454          * callback would be found in hdev->sent_cmd instead of the
3455          * command queue (hdev->cmd_q).
3456          */
3457         if (hdev->sent_cmd) {
3458                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3459                 if (req_complete)
3460                         goto call_complete;
3461         }
3462 
3463         /* Remove all pending commands belonging to this request */
3464         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3465         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3466                 if (bt_cb(skb)->req.start) {
3467                         __skb_queue_head(&hdev->cmd_q, skb);
3468                         break;
3469                 }
3470 
3471                 req_complete = bt_cb(skb)->req.complete;
3472                 kfree_skb(skb);
3473         }
3474         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3475 
3476 call_complete:
3477         if (req_complete)
3478                 req_complete(hdev, status);
3479 }
3480 
3481 static void hci_rx_work(struct work_struct *work)
3482 {
3483         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3484         struct sk_buff *skb;
3485 
3486         BT_DBG("%s", hdev->name);
3487 
3488         while ((skb = skb_dequeue(&hdev->rx_q))) {
3489                 /* Send copy to monitor */
3490                 hci_send_to_monitor(hdev, skb);
3491 
3492                 if (atomic_read(&hdev->promisc)) {
3493                         /* Send copy to the sockets */
3494                         hci_send_to_sock(hdev, skb);
3495                 }
3496 
3497                 if (test_bit(HCI_RAW, &hdev->flags)) {
3498                         kfree_skb(skb);
3499                         continue;
3500                 }
3501 
3502                 if (test_bit(HCI_INIT, &hdev->flags)) {
3503                         /* Don't process data packets in this states. */
3504                         switch (bt_cb(skb)->pkt_type) {
3505                         case HCI_ACLDATA_PKT:
3506                         case HCI_SCODATA_PKT:
3507                                 kfree_skb(skb);
3508                                 continue;
3509                         }
3510                 }
3511 
3512                 /* Process frame */
3513                 switch (bt_cb(skb)->pkt_type) {
3514                 case HCI_EVENT_PKT:
3515                         BT_DBG("%s Event packet", hdev->name);
3516                         hci_event_packet(hdev, skb);
3517                         break;
3518 
3519                 case HCI_ACLDATA_PKT:
3520                         BT_DBG("%s ACL data packet", hdev->name);
3521                         hci_acldata_packet(hdev, skb);
3522                         break;
3523 
3524                 case HCI_SCODATA_PKT:
3525                         BT_DBG("%s SCO data packet", hdev->name);
3526                         hci_scodata_packet(hdev, skb);
3527                         break;
3528 
3529                 default:
3530                         kfree_skb(skb);
3531                         break;
3532                 }
3533         }
3534 }
3535 
3536 static void hci_cmd_work(struct work_struct *work)
3537 {
3538         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3539         struct sk_buff *skb;
3540 
3541         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3542                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3543 
3544         /* Send queued commands */
3545         if (atomic_read(&hdev->cmd_cnt)) {
3546                 skb = skb_dequeue(&hdev->cmd_q);
3547                 if (!skb)
3548                         return;
3549 
3550                 kfree_skb(hdev->sent_cmd);
3551 
3552                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3553                 if (hdev->sent_cmd) {
3554                         atomic_dec(&hdev->cmd_cnt);
3555                         hci_send_frame(skb);
3556                         if (test_bit(HCI_RESET, &hdev->flags))
3557                                 del_timer(&hdev->cmd_timer);
3558                         else
3559                                 mod_timer(&hdev->cmd_timer,
3560                                           jiffies + HCI_CMD_TIMEOUT);
3561                 } else {
3562                         skb_queue_head(&hdev->cmd_q, skb);
3563                         queue_work(hdev->workqueue, &hdev->cmd_work);
3564                 }
3565         }
3566 }
3567 
3568 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3569 {
3570         /* General inquiry access code (GIAC) */
3571         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3572         struct hci_cp_inquiry cp;
3573 
3574         BT_DBG("%s", hdev->name);
3575 
3576         if (test_bit(HCI_INQUIRY, &hdev->flags))
3577                 return -EINPROGRESS;
3578 
3579         inquiry_cache_flush(hdev);
3580 
3581         memset(&cp, 0, sizeof(cp));
3582         memcpy(&cp.lap, lap, sizeof(cp.lap));
3583         cp.length  = length;
3584 
3585         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3586 }
3587 
3588 int hci_cancel_inquiry(struct hci_dev *hdev)
3589 {
3590         BT_DBG("%s", hdev->name);
3591 
3592         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3593                 return -EALREADY;
3594 
3595         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3596 }
3597 
3598 u8 bdaddr_to_le(u8 bdaddr_type)
3599 {
3600         switch (bdaddr_type) {
3601         case BDADDR_LE_PUBLIC:
3602                 return ADDR_LE_DEV_PUBLIC;
3603 
3604         default:
3605                 /* Fallback to LE Random address type */
3606                 return ADDR_LE_DEV_RANDOM;
3607         }
3608 }
3609 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp