~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-5.5-rc2 ] ~ [ linux-5.4.3 ] ~ [ linux-5.3.16 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.89 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.79 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/idr.h>
 30 
 31 #include <linux/rfkill.h>
 32 
 33 #include <net/bluetooth/bluetooth.h>
 34 #include <net/bluetooth/hci_core.h>
 35 
 36 static void hci_rx_work(struct work_struct *work);
 37 static void hci_cmd_work(struct work_struct *work);
 38 static void hci_tx_work(struct work_struct *work);
 39 
 40 /* HCI device list */
 41 LIST_HEAD(hci_dev_list);
 42 DEFINE_RWLOCK(hci_dev_list_lock);
 43 
 44 /* HCI callback list */
 45 LIST_HEAD(hci_cb_list);
 46 DEFINE_RWLOCK(hci_cb_list_lock);
 47 
 48 /* HCI ID Numbering */
 49 static DEFINE_IDA(hci_index_ida);
 50 
 51 /* ---- HCI notifications ---- */
 52 
 53 static void hci_notify(struct hci_dev *hdev, int event)
 54 {
 55         hci_sock_dev_event(hdev, event);
 56 }
 57 
 58 /* ---- HCI requests ---- */
 59 
 60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
 61 {
 62         BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
 63 
 64         /* If this is the init phase check if the completed command matches
 65          * the last init command, and if not just return.
 66          */
 67         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
 68                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
 69                 u16 opcode = __le16_to_cpu(sent->opcode);
 70                 struct sk_buff *skb;
 71 
 72                 /* Some CSR based controllers generate a spontaneous
 73                  * reset complete event during init and any pending
 74                  * command will never be completed. In such a case we
 75                  * need to resend whatever was the last sent
 76                  * command.
 77                  */
 78 
 79                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
 80                         return;
 81 
 82                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
 83                 if (skb) {
 84                         skb_queue_head(&hdev->cmd_q, skb);
 85                         queue_work(hdev->workqueue, &hdev->cmd_work);
 86                 }
 87 
 88                 return;
 89         }
 90 
 91         if (hdev->req_status == HCI_REQ_PEND) {
 92                 hdev->req_result = result;
 93                 hdev->req_status = HCI_REQ_DONE;
 94                 wake_up_interruptible(&hdev->req_wait_q);
 95         }
 96 }
 97 
 98 static void hci_req_cancel(struct hci_dev *hdev, int err)
 99 {
100         BT_DBG("%s err 0x%2.2x", hdev->name, err);
101 
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = err;
104                 hdev->req_status = HCI_REQ_CANCELED;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108 
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111                          void (*req)(struct hci_dev *hdev, unsigned long opt),
112                          unsigned long opt, __u32 timeout)
113 {
114         DECLARE_WAITQUEUE(wait, current);
115         int err = 0;
116 
117         BT_DBG("%s start", hdev->name);
118 
119         hdev->req_status = HCI_REQ_PEND;
120 
121         add_wait_queue(&hdev->req_wait_q, &wait);
122         set_current_state(TASK_INTERRUPTIBLE);
123 
124         req(hdev, opt);
125         schedule_timeout(timeout);
126 
127         remove_wait_queue(&hdev->req_wait_q, &wait);
128 
129         if (signal_pending(current))
130                 return -EINTR;
131 
132         switch (hdev->req_status) {
133         case HCI_REQ_DONE:
134                 err = -bt_to_errno(hdev->req_result);
135                 break;
136 
137         case HCI_REQ_CANCELED:
138                 err = -hdev->req_result;
139                 break;
140 
141         default:
142                 err = -ETIMEDOUT;
143                 break;
144         }
145 
146         hdev->req_status = hdev->req_result = 0;
147 
148         BT_DBG("%s end: err %d", hdev->name, err);
149 
150         return err;
151 }
152 
153 static int hci_request(struct hci_dev *hdev,
154                        void (*req)(struct hci_dev *hdev, unsigned long opt),
155                        unsigned long opt, __u32 timeout)
156 {
157         int ret;
158 
159         if (!test_bit(HCI_UP, &hdev->flags))
160                 return -ENETDOWN;
161 
162         /* Serialize all requests */
163         hci_req_lock(hdev);
164         ret = __hci_request(hdev, req, opt, timeout);
165         hci_req_unlock(hdev);
166 
167         return ret;
168 }
169 
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172         BT_DBG("%s %ld", hdev->name, opt);
173 
174         /* Reset device */
175         set_bit(HCI_RESET, &hdev->flags);
176         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178 
179 static void bredr_init(struct hci_dev *hdev)
180 {
181         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182 
183         /* Read Local Supported Features */
184         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185 
186         /* Read Local Version */
187         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
188 }
189 
190 static void amp_init(struct hci_dev *hdev)
191 {
192         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193 
194         /* Read Local Version */
195         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196 
197         /* Read Local AMP Info */
198         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199 
200         /* Read Data Blk size */
201         hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202 }
203 
204 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205 {
206         struct sk_buff *skb;
207 
208         BT_DBG("%s %ld", hdev->name, opt);
209 
210         /* Driver initialization */
211 
212         /* Special commands */
213         while ((skb = skb_dequeue(&hdev->driver_init))) {
214                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215                 skb->dev = (void *) hdev;
216 
217                 skb_queue_tail(&hdev->cmd_q, skb);
218                 queue_work(hdev->workqueue, &hdev->cmd_work);
219         }
220         skb_queue_purge(&hdev->driver_init);
221 
222         /* Reset */
223         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224                 hci_reset_req(hdev, 0);
225 
226         switch (hdev->dev_type) {
227         case HCI_BREDR:
228                 bredr_init(hdev);
229                 break;
230 
231         case HCI_AMP:
232                 amp_init(hdev);
233                 break;
234 
235         default:
236                 BT_ERR("Unknown device type %d", hdev->dev_type);
237                 break;
238         }
239 }
240 
241 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 {
243         __u8 scan = opt;
244 
245         BT_DBG("%s %x", hdev->name, scan);
246 
247         /* Inquiry and Page scans */
248         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249 }
250 
251 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 {
253         __u8 auth = opt;
254 
255         BT_DBG("%s %x", hdev->name, auth);
256 
257         /* Authentication */
258         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259 }
260 
261 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 {
263         __u8 encrypt = opt;
264 
265         BT_DBG("%s %x", hdev->name, encrypt);
266 
267         /* Encryption */
268         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269 }
270 
271 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272 {
273         __le16 policy = cpu_to_le16(opt);
274 
275         BT_DBG("%s %x", hdev->name, policy);
276 
277         /* Default link policy */
278         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279 }
280 
281 /* Get HCI device by index.
282  * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285         struct hci_dev *hdev = NULL, *d;
286 
287         BT_DBG("%d", index);
288 
289         if (index < 0)
290                 return NULL;
291 
292         read_lock(&hci_dev_list_lock);
293         list_for_each_entry(d, &hci_dev_list, list) {
294                 if (d->id == index) {
295                         hdev = hci_dev_hold(d);
296                         break;
297                 }
298         }
299         read_unlock(&hci_dev_list_lock);
300         return hdev;
301 }
302 
303 /* ---- Inquiry support ---- */
304 
305 bool hci_discovery_active(struct hci_dev *hdev)
306 {
307         struct discovery_state *discov = &hdev->discovery;
308 
309         switch (discov->state) {
310         case DISCOVERY_FINDING:
311         case DISCOVERY_RESOLVING:
312                 return true;
313 
314         default:
315                 return false;
316         }
317 }
318 
319 void hci_discovery_set_state(struct hci_dev *hdev, int state)
320 {
321         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322 
323         if (hdev->discovery.state == state)
324                 return;
325 
326         switch (state) {
327         case DISCOVERY_STOPPED:
328                 if (hdev->discovery.state != DISCOVERY_STARTING)
329                         mgmt_discovering(hdev, 0);
330                 break;
331         case DISCOVERY_STARTING:
332                 break;
333         case DISCOVERY_FINDING:
334                 mgmt_discovering(hdev, 1);
335                 break;
336         case DISCOVERY_RESOLVING:
337                 break;
338         case DISCOVERY_STOPPING:
339                 break;
340         }
341 
342         hdev->discovery.state = state;
343 }
344 
345 static void inquiry_cache_flush(struct hci_dev *hdev)
346 {
347         struct discovery_state *cache = &hdev->discovery;
348         struct inquiry_entry *p, *n;
349 
350         list_for_each_entry_safe(p, n, &cache->all, all) {
351                 list_del(&p->all);
352                 kfree(p);
353         }
354 
355         INIT_LIST_HEAD(&cache->unknown);
356         INIT_LIST_HEAD(&cache->resolve);
357 }
358 
359 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360                                                bdaddr_t *bdaddr)
361 {
362         struct discovery_state *cache = &hdev->discovery;
363         struct inquiry_entry *e;
364 
365         BT_DBG("cache %p, %pMR", cache, bdaddr);
366 
367         list_for_each_entry(e, &cache->all, all) {
368                 if (!bacmp(&e->data.bdaddr, bdaddr))
369                         return e;
370         }
371 
372         return NULL;
373 }
374 
375 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
376                                                        bdaddr_t *bdaddr)
377 {
378         struct discovery_state *cache = &hdev->discovery;
379         struct inquiry_entry *e;
380 
381         BT_DBG("cache %p, %pMR", cache, bdaddr);
382 
383         list_for_each_entry(e, &cache->unknown, list) {
384                 if (!bacmp(&e->data.bdaddr, bdaddr))
385                         return e;
386         }
387 
388         return NULL;
389 }
390 
391 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
392                                                        bdaddr_t *bdaddr,
393                                                        int state)
394 {
395         struct discovery_state *cache = &hdev->discovery;
396         struct inquiry_entry *e;
397 
398         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
399 
400         list_for_each_entry(e, &cache->resolve, list) {
401                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402                         return e;
403                 if (!bacmp(&e->data.bdaddr, bdaddr))
404                         return e;
405         }
406 
407         return NULL;
408 }
409 
410 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
411                                       struct inquiry_entry *ie)
412 {
413         struct discovery_state *cache = &hdev->discovery;
414         struct list_head *pos = &cache->resolve;
415         struct inquiry_entry *p;
416 
417         list_del(&ie->list);
418 
419         list_for_each_entry(p, &cache->resolve, list) {
420                 if (p->name_state != NAME_PENDING &&
421                     abs(p->data.rssi) >= abs(ie->data.rssi))
422                         break;
423                 pos = &p->list;
424         }
425 
426         list_add(&ie->list, pos);
427 }
428 
429 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
430                               bool name_known, bool *ssp)
431 {
432         struct discovery_state *cache = &hdev->discovery;
433         struct inquiry_entry *ie;
434 
435         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436 
437         hci_remove_remote_oob_data(hdev, &data->bdaddr);
438 
439         if (ssp)
440                 *ssp = data->ssp_mode;
441 
442         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
443         if (ie) {
444                 if (ie->data.ssp_mode && ssp)
445                         *ssp = true;
446 
447                 if (ie->name_state == NAME_NEEDED &&
448                     data->rssi != ie->data.rssi) {
449                         ie->data.rssi = data->rssi;
450                         hci_inquiry_cache_update_resolve(hdev, ie);
451                 }
452 
453                 goto update;
454         }
455 
456         /* Entry not in the cache. Add new one. */
457         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
458         if (!ie)
459                 return false;
460 
461         list_add(&ie->all, &cache->all);
462 
463         if (name_known) {
464                 ie->name_state = NAME_KNOWN;
465         } else {
466                 ie->name_state = NAME_NOT_KNOWN;
467                 list_add(&ie->list, &cache->unknown);
468         }
469 
470 update:
471         if (name_known && ie->name_state != NAME_KNOWN &&
472             ie->name_state != NAME_PENDING) {
473                 ie->name_state = NAME_KNOWN;
474                 list_del(&ie->list);
475         }
476 
477         memcpy(&ie->data, data, sizeof(*data));
478         ie->timestamp = jiffies;
479         cache->timestamp = jiffies;
480 
481         if (ie->name_state == NAME_NOT_KNOWN)
482                 return false;
483 
484         return true;
485 }
486 
487 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
488 {
489         struct discovery_state *cache = &hdev->discovery;
490         struct inquiry_info *info = (struct inquiry_info *) buf;
491         struct inquiry_entry *e;
492         int copied = 0;
493 
494         list_for_each_entry(e, &cache->all, all) {
495                 struct inquiry_data *data = &e->data;
496 
497                 if (copied >= num)
498                         break;
499 
500                 bacpy(&info->bdaddr, &data->bdaddr);
501                 info->pscan_rep_mode    = data->pscan_rep_mode;
502                 info->pscan_period_mode = data->pscan_period_mode;
503                 info->pscan_mode        = data->pscan_mode;
504                 memcpy(info->dev_class, data->dev_class, 3);
505                 info->clock_offset      = data->clock_offset;
506 
507                 info++;
508                 copied++;
509         }
510 
511         BT_DBG("cache %p, copied %d", cache, copied);
512         return copied;
513 }
514 
515 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
516 {
517         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
518         struct hci_cp_inquiry cp;
519 
520         BT_DBG("%s", hdev->name);
521 
522         if (test_bit(HCI_INQUIRY, &hdev->flags))
523                 return;
524 
525         /* Start Inquiry */
526         memcpy(&cp.lap, &ir->lap, 3);
527         cp.length  = ir->length;
528         cp.num_rsp = ir->num_rsp;
529         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
530 }
531 
532 int hci_inquiry(void __user *arg)
533 {
534         __u8 __user *ptr = arg;
535         struct hci_inquiry_req ir;
536         struct hci_dev *hdev;
537         int err = 0, do_inquiry = 0, max_rsp;
538         long timeo;
539         __u8 *buf;
540 
541         if (copy_from_user(&ir, ptr, sizeof(ir)))
542                 return -EFAULT;
543 
544         hdev = hci_dev_get(ir.dev_id);
545         if (!hdev)
546                 return -ENODEV;
547 
548         hci_dev_lock(hdev);
549         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
550             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
551                 inquiry_cache_flush(hdev);
552                 do_inquiry = 1;
553         }
554         hci_dev_unlock(hdev);
555 
556         timeo = ir.length * msecs_to_jiffies(2000);
557 
558         if (do_inquiry) {
559                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
560                 if (err < 0)
561                         goto done;
562         }
563 
564         /* for unlimited number of responses we will use buffer with
565          * 255 entries
566          */
567         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
568 
569         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
570          * copy it to the user space.
571          */
572         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
573         if (!buf) {
574                 err = -ENOMEM;
575                 goto done;
576         }
577 
578         hci_dev_lock(hdev);
579         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
580         hci_dev_unlock(hdev);
581 
582         BT_DBG("num_rsp %d", ir.num_rsp);
583 
584         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
585                 ptr += sizeof(ir);
586                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
587                                  ir.num_rsp))
588                         err = -EFAULT;
589         } else
590                 err = -EFAULT;
591 
592         kfree(buf);
593 
594 done:
595         hci_dev_put(hdev);
596         return err;
597 }
598 
599 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
600 {
601         u8 ad_len = 0, flags = 0;
602         size_t name_len;
603 
604         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
605                 flags |= LE_AD_GENERAL;
606 
607         if (!lmp_bredr_capable(hdev))
608                 flags |= LE_AD_NO_BREDR;
609 
610         if (lmp_le_br_capable(hdev))
611                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
612 
613         if (lmp_host_le_br_capable(hdev))
614                 flags |= LE_AD_SIM_LE_BREDR_HOST;
615 
616         if (flags) {
617                 BT_DBG("adv flags 0x%02x", flags);
618 
619                 ptr[0] = 2;
620                 ptr[1] = EIR_FLAGS;
621                 ptr[2] = flags;
622 
623                 ad_len += 3;
624                 ptr += 3;
625         }
626 
627         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
628                 ptr[0] = 2;
629                 ptr[1] = EIR_TX_POWER;
630                 ptr[2] = (u8) hdev->adv_tx_power;
631 
632                 ad_len += 3;
633                 ptr += 3;
634         }
635 
636         name_len = strlen(hdev->dev_name);
637         if (name_len > 0) {
638                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
639 
640                 if (name_len > max_len) {
641                         name_len = max_len;
642                         ptr[1] = EIR_NAME_SHORT;
643                 } else
644                         ptr[1] = EIR_NAME_COMPLETE;
645 
646                 ptr[0] = name_len + 1;
647 
648                 memcpy(ptr + 2, hdev->dev_name, name_len);
649 
650                 ad_len += (name_len + 2);
651                 ptr += (name_len + 2);
652         }
653 
654         return ad_len;
655 }
656 
657 int hci_update_ad(struct hci_dev *hdev)
658 {
659         struct hci_cp_le_set_adv_data cp;
660         u8 len;
661         int err;
662 
663         hci_dev_lock(hdev);
664 
665         if (!lmp_le_capable(hdev)) {
666                 err = -EINVAL;
667                 goto unlock;
668         }
669 
670         memset(&cp, 0, sizeof(cp));
671 
672         len = create_ad(hdev, cp.data);
673 
674         if (hdev->adv_data_len == len &&
675             memcmp(cp.data, hdev->adv_data, len) == 0) {
676                 err = 0;
677                 goto unlock;
678         }
679 
680         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
681         hdev->adv_data_len = len;
682 
683         cp.length = len;
684         err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
685 
686 unlock:
687         hci_dev_unlock(hdev);
688 
689         return err;
690 }
691 
692 /* ---- HCI ioctl helpers ---- */
693 
694 int hci_dev_open(__u16 dev)
695 {
696         struct hci_dev *hdev;
697         int ret = 0;
698 
699         hdev = hci_dev_get(dev);
700         if (!hdev)
701                 return -ENODEV;
702 
703         BT_DBG("%s %p", hdev->name, hdev);
704 
705         hci_req_lock(hdev);
706 
707         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
708                 ret = -ENODEV;
709                 goto done;
710         }
711 
712         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
713                 ret = -ERFKILL;
714                 goto done;
715         }
716 
717         if (test_bit(HCI_UP, &hdev->flags)) {
718                 ret = -EALREADY;
719                 goto done;
720         }
721 
722         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
723                 set_bit(HCI_RAW, &hdev->flags);
724 
725         /* Treat all non BR/EDR controllers as raw devices if
726            enable_hs is not set */
727         if (hdev->dev_type != HCI_BREDR && !enable_hs)
728                 set_bit(HCI_RAW, &hdev->flags);
729 
730         if (hdev->open(hdev)) {
731                 ret = -EIO;
732                 goto done;
733         }
734 
735         if (!test_bit(HCI_RAW, &hdev->flags)) {
736                 atomic_set(&hdev->cmd_cnt, 1);
737                 set_bit(HCI_INIT, &hdev->flags);
738                 hdev->init_last_cmd = 0;
739 
740                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
741 
742                 clear_bit(HCI_INIT, &hdev->flags);
743         }
744 
745         if (!ret) {
746                 hci_dev_hold(hdev);
747                 set_bit(HCI_UP, &hdev->flags);
748                 hci_notify(hdev, HCI_DEV_UP);
749                 hci_update_ad(hdev);
750                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
751                     mgmt_valid_hdev(hdev)) {
752                         hci_dev_lock(hdev);
753                         mgmt_powered(hdev, 1);
754                         hci_dev_unlock(hdev);
755                 }
756         } else {
757                 /* Init failed, cleanup */
758                 flush_work(&hdev->tx_work);
759                 flush_work(&hdev->cmd_work);
760                 flush_work(&hdev->rx_work);
761 
762                 skb_queue_purge(&hdev->cmd_q);
763                 skb_queue_purge(&hdev->rx_q);
764 
765                 if (hdev->flush)
766                         hdev->flush(hdev);
767 
768                 if (hdev->sent_cmd) {
769                         kfree_skb(hdev->sent_cmd);
770                         hdev->sent_cmd = NULL;
771                 }
772 
773                 hdev->close(hdev);
774                 hdev->flags = 0;
775         }
776 
777 done:
778         hci_req_unlock(hdev);
779         hci_dev_put(hdev);
780         return ret;
781 }
782 
783 static int hci_dev_do_close(struct hci_dev *hdev)
784 {
785         BT_DBG("%s %p", hdev->name, hdev);
786 
787         cancel_work_sync(&hdev->le_scan);
788 
789         cancel_delayed_work(&hdev->power_off);
790 
791         hci_req_cancel(hdev, ENODEV);
792         hci_req_lock(hdev);
793 
794         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
795                 del_timer_sync(&hdev->cmd_timer);
796                 hci_req_unlock(hdev);
797                 return 0;
798         }
799 
800         /* Flush RX and TX works */
801         flush_work(&hdev->tx_work);
802         flush_work(&hdev->rx_work);
803 
804         if (hdev->discov_timeout > 0) {
805                 cancel_delayed_work(&hdev->discov_off);
806                 hdev->discov_timeout = 0;
807                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
808         }
809 
810         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
811                 cancel_delayed_work(&hdev->service_cache);
812 
813         cancel_delayed_work_sync(&hdev->le_scan_disable);
814 
815         hci_dev_lock(hdev);
816         inquiry_cache_flush(hdev);
817         hci_conn_hash_flush(hdev);
818         hci_dev_unlock(hdev);
819 
820         hci_notify(hdev, HCI_DEV_DOWN);
821 
822         if (hdev->flush)
823                 hdev->flush(hdev);
824 
825         /* Reset device */
826         skb_queue_purge(&hdev->cmd_q);
827         atomic_set(&hdev->cmd_cnt, 1);
828         if (!test_bit(HCI_RAW, &hdev->flags) &&
829             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
830                 set_bit(HCI_INIT, &hdev->flags);
831                 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
832                 clear_bit(HCI_INIT, &hdev->flags);
833         }
834 
835         /* flush cmd  work */
836         flush_work(&hdev->cmd_work);
837 
838         /* Drop queues */
839         skb_queue_purge(&hdev->rx_q);
840         skb_queue_purge(&hdev->cmd_q);
841         skb_queue_purge(&hdev->raw_q);
842 
843         /* Drop last sent command */
844         if (hdev->sent_cmd) {
845                 del_timer_sync(&hdev->cmd_timer);
846                 kfree_skb(hdev->sent_cmd);
847                 hdev->sent_cmd = NULL;
848         }
849 
850         /* After this point our queues are empty
851          * and no tasks are scheduled. */
852         hdev->close(hdev);
853 
854         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
855             mgmt_valid_hdev(hdev)) {
856                 hci_dev_lock(hdev);
857                 mgmt_powered(hdev, 0);
858                 hci_dev_unlock(hdev);
859         }
860 
861         /* Clear flags */
862         hdev->flags = 0;
863 
864         /* Controller radio is available but is currently powered down */
865         hdev->amp_status = 0;
866 
867         memset(hdev->eir, 0, sizeof(hdev->eir));
868         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
869 
870         hci_req_unlock(hdev);
871 
872         hci_dev_put(hdev);
873         return 0;
874 }
875 
876 int hci_dev_close(__u16 dev)
877 {
878         struct hci_dev *hdev;
879         int err;
880 
881         hdev = hci_dev_get(dev);
882         if (!hdev)
883                 return -ENODEV;
884 
885         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
886                 cancel_delayed_work(&hdev->power_off);
887 
888         err = hci_dev_do_close(hdev);
889 
890         hci_dev_put(hdev);
891         return err;
892 }
893 
894 int hci_dev_reset(__u16 dev)
895 {
896         struct hci_dev *hdev;
897         int ret = 0;
898 
899         hdev = hci_dev_get(dev);
900         if (!hdev)
901                 return -ENODEV;
902 
903         hci_req_lock(hdev);
904 
905         if (!test_bit(HCI_UP, &hdev->flags))
906                 goto done;
907 
908         /* Drop queues */
909         skb_queue_purge(&hdev->rx_q);
910         skb_queue_purge(&hdev->cmd_q);
911 
912         hci_dev_lock(hdev);
913         inquiry_cache_flush(hdev);
914         hci_conn_hash_flush(hdev);
915         hci_dev_unlock(hdev);
916 
917         if (hdev->flush)
918                 hdev->flush(hdev);
919 
920         atomic_set(&hdev->cmd_cnt, 1);
921         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
922 
923         if (!test_bit(HCI_RAW, &hdev->flags))
924                 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
925 
926 done:
927         hci_req_unlock(hdev);
928         hci_dev_put(hdev);
929         return ret;
930 }
931 
932 int hci_dev_reset_stat(__u16 dev)
933 {
934         struct hci_dev *hdev;
935         int ret = 0;
936 
937         hdev = hci_dev_get(dev);
938         if (!hdev)
939                 return -ENODEV;
940 
941         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
942 
943         hci_dev_put(hdev);
944 
945         return ret;
946 }
947 
948 int hci_dev_cmd(unsigned int cmd, void __user *arg)
949 {
950         struct hci_dev *hdev;
951         struct hci_dev_req dr;
952         int err = 0;
953 
954         if (copy_from_user(&dr, arg, sizeof(dr)))
955                 return -EFAULT;
956 
957         hdev = hci_dev_get(dr.dev_id);
958         if (!hdev)
959                 return -ENODEV;
960 
961         switch (cmd) {
962         case HCISETAUTH:
963                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
964                                   HCI_INIT_TIMEOUT);
965                 break;
966 
967         case HCISETENCRYPT:
968                 if (!lmp_encrypt_capable(hdev)) {
969                         err = -EOPNOTSUPP;
970                         break;
971                 }
972 
973                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
974                         /* Auth must be enabled first */
975                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
976                                           HCI_INIT_TIMEOUT);
977                         if (err)
978                                 break;
979                 }
980 
981                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
982                                   HCI_INIT_TIMEOUT);
983                 break;
984 
985         case HCISETSCAN:
986                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
987                                   HCI_INIT_TIMEOUT);
988                 break;
989 
990         case HCISETLINKPOL:
991                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
992                                   HCI_INIT_TIMEOUT);
993                 break;
994 
995         case HCISETLINKMODE:
996                 hdev->link_mode = ((__u16) dr.dev_opt) &
997                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
998                 break;
999 
1000         case HCISETPTYPE:
1001                 hdev->pkt_type = (__u16) dr.dev_opt;
1002                 break;
1003 
1004         case HCISETACLMTU:
1005                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1006                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1007                 break;
1008 
1009         case HCISETSCOMTU:
1010                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1011                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1012                 break;
1013 
1014         default:
1015                 err = -EINVAL;
1016                 break;
1017         }
1018 
1019         hci_dev_put(hdev);
1020         return err;
1021 }
1022 
1023 int hci_get_dev_list(void __user *arg)
1024 {
1025         struct hci_dev *hdev;
1026         struct hci_dev_list_req *dl;
1027         struct hci_dev_req *dr;
1028         int n = 0, size, err;
1029         __u16 dev_num;
1030 
1031         if (get_user(dev_num, (__u16 __user *) arg))
1032                 return -EFAULT;
1033 
1034         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1035                 return -EINVAL;
1036 
1037         size = sizeof(*dl) + dev_num * sizeof(*dr);
1038 
1039         dl = kzalloc(size, GFP_KERNEL);
1040         if (!dl)
1041                 return -ENOMEM;
1042 
1043         dr = dl->dev_req;
1044 
1045         read_lock(&hci_dev_list_lock);
1046         list_for_each_entry(hdev, &hci_dev_list, list) {
1047                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1048                         cancel_delayed_work(&hdev->power_off);
1049 
1050                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1051                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1052 
1053                 (dr + n)->dev_id  = hdev->id;
1054                 (dr + n)->dev_opt = hdev->flags;
1055 
1056                 if (++n >= dev_num)
1057                         break;
1058         }
1059         read_unlock(&hci_dev_list_lock);
1060 
1061         dl->dev_num = n;
1062         size = sizeof(*dl) + n * sizeof(*dr);
1063 
1064         err = copy_to_user(arg, dl, size);
1065         kfree(dl);
1066 
1067         return err ? -EFAULT : 0;
1068 }
1069 
1070 int hci_get_dev_info(void __user *arg)
1071 {
1072         struct hci_dev *hdev;
1073         struct hci_dev_info di;
1074         int err = 0;
1075 
1076         if (copy_from_user(&di, arg, sizeof(di)))
1077                 return -EFAULT;
1078 
1079         hdev = hci_dev_get(di.dev_id);
1080         if (!hdev)
1081                 return -ENODEV;
1082 
1083         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1084                 cancel_delayed_work_sync(&hdev->power_off);
1085 
1086         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1087                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1088 
1089         strcpy(di.name, hdev->name);
1090         di.bdaddr   = hdev->bdaddr;
1091         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1092         di.flags    = hdev->flags;
1093         di.pkt_type = hdev->pkt_type;
1094         if (lmp_bredr_capable(hdev)) {
1095                 di.acl_mtu  = hdev->acl_mtu;
1096                 di.acl_pkts = hdev->acl_pkts;
1097                 di.sco_mtu  = hdev->sco_mtu;
1098                 di.sco_pkts = hdev->sco_pkts;
1099         } else {
1100                 di.acl_mtu  = hdev->le_mtu;
1101                 di.acl_pkts = hdev->le_pkts;
1102                 di.sco_mtu  = 0;
1103                 di.sco_pkts = 0;
1104         }
1105         di.link_policy = hdev->link_policy;
1106         di.link_mode   = hdev->link_mode;
1107 
1108         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1109         memcpy(&di.features, &hdev->features, sizeof(di.features));
1110 
1111         if (copy_to_user(arg, &di, sizeof(di)))
1112                 err = -EFAULT;
1113 
1114         hci_dev_put(hdev);
1115 
1116         return err;
1117 }
1118 
1119 /* ---- Interface to HCI drivers ---- */
1120 
1121 static int hci_rfkill_set_block(void *data, bool blocked)
1122 {
1123         struct hci_dev *hdev = data;
1124 
1125         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1126 
1127         if (!blocked)
1128                 return 0;
1129 
1130         hci_dev_do_close(hdev);
1131 
1132         return 0;
1133 }
1134 
1135 static const struct rfkill_ops hci_rfkill_ops = {
1136         .set_block = hci_rfkill_set_block,
1137 };
1138 
1139 static void hci_power_on(struct work_struct *work)
1140 {
1141         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1142         int err;
1143 
1144         BT_DBG("%s", hdev->name);
1145 
1146         err = hci_dev_open(hdev->id);
1147         if (err < 0) {
1148                 mgmt_set_powered_failed(hdev, err);
1149                 return;
1150         }
1151 
1152         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1153                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1154                                    HCI_AUTO_OFF_TIMEOUT);
1155 
1156         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1157                 mgmt_index_added(hdev);
1158 }
1159 
1160 static void hci_power_off(struct work_struct *work)
1161 {
1162         struct hci_dev *hdev = container_of(work, struct hci_dev,
1163                                             power_off.work);
1164 
1165         BT_DBG("%s", hdev->name);
1166 
1167         hci_dev_do_close(hdev);
1168 }
1169 
1170 static void hci_discov_off(struct work_struct *work)
1171 {
1172         struct hci_dev *hdev;
1173         u8 scan = SCAN_PAGE;
1174 
1175         hdev = container_of(work, struct hci_dev, discov_off.work);
1176 
1177         BT_DBG("%s", hdev->name);
1178 
1179         hci_dev_lock(hdev);
1180 
1181         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1182 
1183         hdev->discov_timeout = 0;
1184 
1185         hci_dev_unlock(hdev);
1186 }
1187 
1188 int hci_uuids_clear(struct hci_dev *hdev)
1189 {
1190         struct bt_uuid *uuid, *tmp;
1191 
1192         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1193                 list_del(&uuid->list);
1194                 kfree(uuid);
1195         }
1196 
1197         return 0;
1198 }
1199 
1200 int hci_link_keys_clear(struct hci_dev *hdev)
1201 {
1202         struct list_head *p, *n;
1203 
1204         list_for_each_safe(p, n, &hdev->link_keys) {
1205                 struct link_key *key;
1206 
1207                 key = list_entry(p, struct link_key, list);
1208 
1209                 list_del(p);
1210                 kfree(key);
1211         }
1212 
1213         return 0;
1214 }
1215 
1216 int hci_smp_ltks_clear(struct hci_dev *hdev)
1217 {
1218         struct smp_ltk *k, *tmp;
1219 
1220         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1221                 list_del(&k->list);
1222                 kfree(k);
1223         }
1224 
1225         return 0;
1226 }
1227 
1228 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1229 {
1230         struct link_key *k;
1231 
1232         list_for_each_entry(k, &hdev->link_keys, list)
1233                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1234                         return k;
1235 
1236         return NULL;
1237 }
1238 
1239 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1240                                u8 key_type, u8 old_key_type)
1241 {
1242         /* Legacy key */
1243         if (key_type < 0x03)
1244                 return true;
1245 
1246         /* Debug keys are insecure so don't store them persistently */
1247         if (key_type == HCI_LK_DEBUG_COMBINATION)
1248                 return false;
1249 
1250         /* Changed combination key and there's no previous one */
1251         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1252                 return false;
1253 
1254         /* Security mode 3 case */
1255         if (!conn)
1256                 return true;
1257 
1258         /* Neither local nor remote side had no-bonding as requirement */
1259         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1260                 return true;
1261 
1262         /* Local side had dedicated bonding as requirement */
1263         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1264                 return true;
1265 
1266         /* Remote side had dedicated bonding as requirement */
1267         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1268                 return true;
1269 
1270         /* If none of the above criteria match, then don't store the key
1271          * persistently */
1272         return false;
1273 }
1274 
1275 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1276 {
1277         struct smp_ltk *k;
1278 
1279         list_for_each_entry(k, &hdev->long_term_keys, list) {
1280                 if (k->ediv != ediv ||
1281                     memcmp(rand, k->rand, sizeof(k->rand)))
1282                         continue;
1283 
1284                 return k;
1285         }
1286 
1287         return NULL;
1288 }
1289 
1290 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1291                                      u8 addr_type)
1292 {
1293         struct smp_ltk *k;
1294 
1295         list_for_each_entry(k, &hdev->long_term_keys, list)
1296                 if (addr_type == k->bdaddr_type &&
1297                     bacmp(bdaddr, &k->bdaddr) == 0)
1298                         return k;
1299 
1300         return NULL;
1301 }
1302 
1303 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1304                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1305 {
1306         struct link_key *key, *old_key;
1307         u8 old_key_type;
1308         bool persistent;
1309 
1310         old_key = hci_find_link_key(hdev, bdaddr);
1311         if (old_key) {
1312                 old_key_type = old_key->type;
1313                 key = old_key;
1314         } else {
1315                 old_key_type = conn ? conn->key_type : 0xff;
1316                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1317                 if (!key)
1318                         return -ENOMEM;
1319                 list_add(&key->list, &hdev->link_keys);
1320         }
1321 
1322         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1323 
1324         /* Some buggy controller combinations generate a changed
1325          * combination key for legacy pairing even when there's no
1326          * previous key */
1327         if (type == HCI_LK_CHANGED_COMBINATION &&
1328             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1329                 type = HCI_LK_COMBINATION;
1330                 if (conn)
1331                         conn->key_type = type;
1332         }
1333 
1334         bacpy(&key->bdaddr, bdaddr);
1335         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1336         key->pin_len = pin_len;
1337 
1338         if (type == HCI_LK_CHANGED_COMBINATION)
1339                 key->type = old_key_type;
1340         else
1341                 key->type = type;
1342 
1343         if (!new_key)
1344                 return 0;
1345 
1346         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1347 
1348         mgmt_new_link_key(hdev, key, persistent);
1349 
1350         if (conn)
1351                 conn->flush_key = !persistent;
1352 
1353         return 0;
1354 }
1355 
1356 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1357                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1358                 ediv, u8 rand[8])
1359 {
1360         struct smp_ltk *key, *old_key;
1361 
1362         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1363                 return 0;
1364 
1365         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1366         if (old_key)
1367                 key = old_key;
1368         else {
1369                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1370                 if (!key)
1371                         return -ENOMEM;
1372                 list_add(&key->list, &hdev->long_term_keys);
1373         }
1374 
1375         bacpy(&key->bdaddr, bdaddr);
1376         key->bdaddr_type = addr_type;
1377         memcpy(key->val, tk, sizeof(key->val));
1378         key->authenticated = authenticated;
1379         key->ediv = ediv;
1380         key->enc_size = enc_size;
1381         key->type = type;
1382         memcpy(key->rand, rand, sizeof(key->rand));
1383 
1384         if (!new_key)
1385                 return 0;
1386 
1387         if (type & HCI_SMP_LTK)
1388                 mgmt_new_ltk(hdev, key, 1);
1389 
1390         return 0;
1391 }
1392 
1393 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394 {
1395         struct link_key *key;
1396 
1397         key = hci_find_link_key(hdev, bdaddr);
1398         if (!key)
1399                 return -ENOENT;
1400 
1401         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402 
1403         list_del(&key->list);
1404         kfree(key);
1405 
1406         return 0;
1407 }
1408 
1409 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1410 {
1411         struct smp_ltk *k, *tmp;
1412 
1413         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1414                 if (bacmp(bdaddr, &k->bdaddr))
1415                         continue;
1416 
1417                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1418 
1419                 list_del(&k->list);
1420                 kfree(k);
1421         }
1422 
1423         return 0;
1424 }
1425 
1426 /* HCI command timer function */
1427 static void hci_cmd_timeout(unsigned long arg)
1428 {
1429         struct hci_dev *hdev = (void *) arg;
1430 
1431         if (hdev->sent_cmd) {
1432                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1433                 u16 opcode = __le16_to_cpu(sent->opcode);
1434 
1435                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1436         } else {
1437                 BT_ERR("%s command tx timeout", hdev->name);
1438         }
1439 
1440         atomic_set(&hdev->cmd_cnt, 1);
1441         queue_work(hdev->workqueue, &hdev->cmd_work);
1442 }
1443 
1444 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1445                                           bdaddr_t *bdaddr)
1446 {
1447         struct oob_data *data;
1448 
1449         list_for_each_entry(data, &hdev->remote_oob_data, list)
1450                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1451                         return data;
1452 
1453         return NULL;
1454 }
1455 
1456 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1457 {
1458         struct oob_data *data;
1459 
1460         data = hci_find_remote_oob_data(hdev, bdaddr);
1461         if (!data)
1462                 return -ENOENT;
1463 
1464         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1465 
1466         list_del(&data->list);
1467         kfree(data);
1468 
1469         return 0;
1470 }
1471 
1472 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1473 {
1474         struct oob_data *data, *n;
1475 
1476         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1477                 list_del(&data->list);
1478                 kfree(data);
1479         }
1480 
1481         return 0;
1482 }
1483 
1484 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1485                             u8 *randomizer)
1486 {
1487         struct oob_data *data;
1488 
1489         data = hci_find_remote_oob_data(hdev, bdaddr);
1490 
1491         if (!data) {
1492                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1493                 if (!data)
1494                         return -ENOMEM;
1495 
1496                 bacpy(&data->bdaddr, bdaddr);
1497                 list_add(&data->list, &hdev->remote_oob_data);
1498         }
1499 
1500         memcpy(data->hash, hash, sizeof(data->hash));
1501         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1502 
1503         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1504 
1505         return 0;
1506 }
1507 
1508 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1509 {
1510         struct bdaddr_list *b;
1511 
1512         list_for_each_entry(b, &hdev->blacklist, list)
1513                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1514                         return b;
1515 
1516         return NULL;
1517 }
1518 
1519 int hci_blacklist_clear(struct hci_dev *hdev)
1520 {
1521         struct list_head *p, *n;
1522 
1523         list_for_each_safe(p, n, &hdev->blacklist) {
1524                 struct bdaddr_list *b;
1525 
1526                 b = list_entry(p, struct bdaddr_list, list);
1527 
1528                 list_del(p);
1529                 kfree(b);
1530         }
1531 
1532         return 0;
1533 }
1534 
1535 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1536 {
1537         struct bdaddr_list *entry;
1538 
1539         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1540                 return -EBADF;
1541 
1542         if (hci_blacklist_lookup(hdev, bdaddr))
1543                 return -EEXIST;
1544 
1545         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1546         if (!entry)
1547                 return -ENOMEM;
1548 
1549         bacpy(&entry->bdaddr, bdaddr);
1550 
1551         list_add(&entry->list, &hdev->blacklist);
1552 
1553         return mgmt_device_blocked(hdev, bdaddr, type);
1554 }
1555 
1556 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1557 {
1558         struct bdaddr_list *entry;
1559 
1560         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1561                 return hci_blacklist_clear(hdev);
1562 
1563         entry = hci_blacklist_lookup(hdev, bdaddr);
1564         if (!entry)
1565                 return -ENOENT;
1566 
1567         list_del(&entry->list);
1568         kfree(entry);
1569 
1570         return mgmt_device_unblocked(hdev, bdaddr, type);
1571 }
1572 
1573 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1574 {
1575         struct le_scan_params *param =  (struct le_scan_params *) opt;
1576         struct hci_cp_le_set_scan_param cp;
1577 
1578         memset(&cp, 0, sizeof(cp));
1579         cp.type = param->type;
1580         cp.interval = cpu_to_le16(param->interval);
1581         cp.window = cpu_to_le16(param->window);
1582 
1583         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1584 }
1585 
1586 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1587 {
1588         struct hci_cp_le_set_scan_enable cp;
1589 
1590         memset(&cp, 0, sizeof(cp));
1591         cp.enable = 1;
1592         cp.filter_dup = 1;
1593 
1594         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1595 }
1596 
1597 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1598                           u16 window, int timeout)
1599 {
1600         long timeo = msecs_to_jiffies(3000);
1601         struct le_scan_params param;
1602         int err;
1603 
1604         BT_DBG("%s", hdev->name);
1605 
1606         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1607                 return -EINPROGRESS;
1608 
1609         param.type = type;
1610         param.interval = interval;
1611         param.window = window;
1612 
1613         hci_req_lock(hdev);
1614 
1615         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1616                             timeo);
1617         if (!err)
1618                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1619 
1620         hci_req_unlock(hdev);
1621 
1622         if (err < 0)
1623                 return err;
1624 
1625         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1626                            msecs_to_jiffies(timeout));
1627 
1628         return 0;
1629 }
1630 
1631 int hci_cancel_le_scan(struct hci_dev *hdev)
1632 {
1633         BT_DBG("%s", hdev->name);
1634 
1635         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1636                 return -EALREADY;
1637 
1638         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1639                 struct hci_cp_le_set_scan_enable cp;
1640 
1641                 /* Send HCI command to disable LE Scan */
1642                 memset(&cp, 0, sizeof(cp));
1643                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1644         }
1645 
1646         return 0;
1647 }
1648 
1649 static void le_scan_disable_work(struct work_struct *work)
1650 {
1651         struct hci_dev *hdev = container_of(work, struct hci_dev,
1652                                             le_scan_disable.work);
1653         struct hci_cp_le_set_scan_enable cp;
1654 
1655         BT_DBG("%s", hdev->name);
1656 
1657         memset(&cp, 0, sizeof(cp));
1658 
1659         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1660 }
1661 
1662 static void le_scan_work(struct work_struct *work)
1663 {
1664         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1665         struct le_scan_params *param = &hdev->le_scan_params;
1666 
1667         BT_DBG("%s", hdev->name);
1668 
1669         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1670                        param->timeout);
1671 }
1672 
1673 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1674                 int timeout)
1675 {
1676         struct le_scan_params *param = &hdev->le_scan_params;
1677 
1678         BT_DBG("%s", hdev->name);
1679 
1680         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1681                 return -ENOTSUPP;
1682 
1683         if (work_busy(&hdev->le_scan))
1684                 return -EINPROGRESS;
1685 
1686         param->type = type;
1687         param->interval = interval;
1688         param->window = window;
1689         param->timeout = timeout;
1690 
1691         queue_work(system_long_wq, &hdev->le_scan);
1692 
1693         return 0;
1694 }
1695 
1696 /* Alloc HCI device */
1697 struct hci_dev *hci_alloc_dev(void)
1698 {
1699         struct hci_dev *hdev;
1700 
1701         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1702         if (!hdev)
1703                 return NULL;
1704 
1705         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1706         hdev->esco_type = (ESCO_HV1);
1707         hdev->link_mode = (HCI_LM_ACCEPT);
1708         hdev->io_capability = 0x03; /* No Input No Output */
1709         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1710         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1711 
1712         hdev->sniff_max_interval = 800;
1713         hdev->sniff_min_interval = 80;
1714 
1715         mutex_init(&hdev->lock);
1716         mutex_init(&hdev->req_lock);
1717 
1718         INIT_LIST_HEAD(&hdev->mgmt_pending);
1719         INIT_LIST_HEAD(&hdev->blacklist);
1720         INIT_LIST_HEAD(&hdev->uuids);
1721         INIT_LIST_HEAD(&hdev->link_keys);
1722         INIT_LIST_HEAD(&hdev->long_term_keys);
1723         INIT_LIST_HEAD(&hdev->remote_oob_data);
1724         INIT_LIST_HEAD(&hdev->conn_hash.list);
1725 
1726         INIT_WORK(&hdev->rx_work, hci_rx_work);
1727         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1728         INIT_WORK(&hdev->tx_work, hci_tx_work);
1729         INIT_WORK(&hdev->power_on, hci_power_on);
1730         INIT_WORK(&hdev->le_scan, le_scan_work);
1731 
1732         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1733         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1734         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1735 
1736         skb_queue_head_init(&hdev->driver_init);
1737         skb_queue_head_init(&hdev->rx_q);
1738         skb_queue_head_init(&hdev->cmd_q);
1739         skb_queue_head_init(&hdev->raw_q);
1740 
1741         init_waitqueue_head(&hdev->req_wait_q);
1742 
1743         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1744 
1745         hci_init_sysfs(hdev);
1746         discovery_init(hdev);
1747 
1748         return hdev;
1749 }
1750 EXPORT_SYMBOL(hci_alloc_dev);
1751 
1752 /* Free HCI device */
1753 void hci_free_dev(struct hci_dev *hdev)
1754 {
1755         skb_queue_purge(&hdev->driver_init);
1756 
1757         /* will free via device release */
1758         put_device(&hdev->dev);
1759 }
1760 EXPORT_SYMBOL(hci_free_dev);
1761 
1762 /* Register HCI device */
1763 int hci_register_dev(struct hci_dev *hdev)
1764 {
1765         int id, error;
1766 
1767         if (!hdev->open || !hdev->close)
1768                 return -EINVAL;
1769 
1770         /* Do not allow HCI_AMP devices to register at index 0,
1771          * so the index can be used as the AMP controller ID.
1772          */
1773         switch (hdev->dev_type) {
1774         case HCI_BREDR:
1775                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1776                 break;
1777         case HCI_AMP:
1778                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1779                 break;
1780         default:
1781                 return -EINVAL;
1782         }
1783 
1784         if (id < 0)
1785                 return id;
1786 
1787         sprintf(hdev->name, "hci%d", id);
1788         hdev->id = id;
1789 
1790         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1791 
1792         write_lock(&hci_dev_list_lock);
1793         list_add(&hdev->list, &hci_dev_list);
1794         write_unlock(&hci_dev_list_lock);
1795 
1796         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1797                                           WQ_MEM_RECLAIM, 1);
1798         if (!hdev->workqueue) {
1799                 error = -ENOMEM;
1800                 goto err;
1801         }
1802 
1803         hdev->req_workqueue = alloc_workqueue(hdev->name,
1804                                               WQ_HIGHPRI | WQ_UNBOUND |
1805                                               WQ_MEM_RECLAIM, 1);
1806         if (!hdev->req_workqueue) {
1807                 destroy_workqueue(hdev->workqueue);
1808                 error = -ENOMEM;
1809                 goto err;
1810         }
1811 
1812         error = hci_add_sysfs(hdev);
1813         if (error < 0)
1814                 goto err_wqueue;
1815 
1816         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1817                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1818                                     hdev);
1819         if (hdev->rfkill) {
1820                 if (rfkill_register(hdev->rfkill) < 0) {
1821                         rfkill_destroy(hdev->rfkill);
1822                         hdev->rfkill = NULL;
1823                 }
1824         }
1825 
1826         set_bit(HCI_SETUP, &hdev->dev_flags);
1827 
1828         if (hdev->dev_type != HCI_AMP)
1829                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1830 
1831         hci_notify(hdev, HCI_DEV_REG);
1832         hci_dev_hold(hdev);
1833 
1834         queue_work(hdev->req_workqueue, &hdev->power_on);
1835 
1836         return id;
1837 
1838 err_wqueue:
1839         destroy_workqueue(hdev->workqueue);
1840         destroy_workqueue(hdev->req_workqueue);
1841 err:
1842         ida_simple_remove(&hci_index_ida, hdev->id);
1843         write_lock(&hci_dev_list_lock);
1844         list_del(&hdev->list);
1845         write_unlock(&hci_dev_list_lock);
1846 
1847         return error;
1848 }
1849 EXPORT_SYMBOL(hci_register_dev);
1850 
1851 /* Unregister HCI device */
1852 void hci_unregister_dev(struct hci_dev *hdev)
1853 {
1854         int i, id;
1855 
1856         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1857 
1858         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1859 
1860         id = hdev->id;
1861 
1862         write_lock(&hci_dev_list_lock);
1863         list_del(&hdev->list);
1864         write_unlock(&hci_dev_list_lock);
1865 
1866         hci_dev_do_close(hdev);
1867 
1868         for (i = 0; i < NUM_REASSEMBLY; i++)
1869                 kfree_skb(hdev->reassembly[i]);
1870 
1871         cancel_work_sync(&hdev->power_on);
1872 
1873         if (!test_bit(HCI_INIT, &hdev->flags) &&
1874             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1875                 hci_dev_lock(hdev);
1876                 mgmt_index_removed(hdev);
1877                 hci_dev_unlock(hdev);
1878         }
1879 
1880         /* mgmt_index_removed should take care of emptying the
1881          * pending list */
1882         BUG_ON(!list_empty(&hdev->mgmt_pending));
1883 
1884         hci_notify(hdev, HCI_DEV_UNREG);
1885 
1886         if (hdev->rfkill) {
1887                 rfkill_unregister(hdev->rfkill);
1888                 rfkill_destroy(hdev->rfkill);
1889         }
1890 
1891         hci_del_sysfs(hdev);
1892 
1893         destroy_workqueue(hdev->workqueue);
1894         destroy_workqueue(hdev->req_workqueue);
1895 
1896         hci_dev_lock(hdev);
1897         hci_blacklist_clear(hdev);
1898         hci_uuids_clear(hdev);
1899         hci_link_keys_clear(hdev);
1900         hci_smp_ltks_clear(hdev);
1901         hci_remote_oob_data_clear(hdev);
1902         hci_dev_unlock(hdev);
1903 
1904         hci_dev_put(hdev);
1905 
1906         ida_simple_remove(&hci_index_ida, id);
1907 }
1908 EXPORT_SYMBOL(hci_unregister_dev);
1909 
1910 /* Suspend HCI device */
1911 int hci_suspend_dev(struct hci_dev *hdev)
1912 {
1913         hci_notify(hdev, HCI_DEV_SUSPEND);
1914         return 0;
1915 }
1916 EXPORT_SYMBOL(hci_suspend_dev);
1917 
1918 /* Resume HCI device */
1919 int hci_resume_dev(struct hci_dev *hdev)
1920 {
1921         hci_notify(hdev, HCI_DEV_RESUME);
1922         return 0;
1923 }
1924 EXPORT_SYMBOL(hci_resume_dev);
1925 
1926 /* Receive frame from HCI drivers */
1927 int hci_recv_frame(struct sk_buff *skb)
1928 {
1929         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1930         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1931                       && !test_bit(HCI_INIT, &hdev->flags))) {
1932                 kfree_skb(skb);
1933                 return -ENXIO;
1934         }
1935 
1936         /* Incoming skb */
1937         bt_cb(skb)->incoming = 1;
1938 
1939         /* Time stamp */
1940         __net_timestamp(skb);
1941 
1942         skb_queue_tail(&hdev->rx_q, skb);
1943         queue_work(hdev->workqueue, &hdev->rx_work);
1944 
1945         return 0;
1946 }
1947 EXPORT_SYMBOL(hci_recv_frame);
1948 
1949 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1950                           int count, __u8 index)
1951 {
1952         int len = 0;
1953         int hlen = 0;
1954         int remain = count;
1955         struct sk_buff *skb;
1956         struct bt_skb_cb *scb;
1957 
1958         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1959             index >= NUM_REASSEMBLY)
1960                 return -EILSEQ;
1961 
1962         skb = hdev->reassembly[index];
1963 
1964         if (!skb) {
1965                 switch (type) {
1966                 case HCI_ACLDATA_PKT:
1967                         len = HCI_MAX_FRAME_SIZE;
1968                         hlen = HCI_ACL_HDR_SIZE;
1969                         break;
1970                 case HCI_EVENT_PKT:
1971                         len = HCI_MAX_EVENT_SIZE;
1972                         hlen = HCI_EVENT_HDR_SIZE;
1973                         break;
1974                 case HCI_SCODATA_PKT:
1975                         len = HCI_MAX_SCO_SIZE;
1976                         hlen = HCI_SCO_HDR_SIZE;
1977                         break;
1978                 }
1979 
1980                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1981                 if (!skb)
1982                         return -ENOMEM;
1983 
1984                 scb = (void *) skb->cb;
1985                 scb->expect = hlen;
1986                 scb->pkt_type = type;
1987 
1988                 skb->dev = (void *) hdev;
1989                 hdev->reassembly[index] = skb;
1990         }
1991 
1992         while (count) {
1993                 scb = (void *) skb->cb;
1994                 len = min_t(uint, scb->expect, count);
1995 
1996                 memcpy(skb_put(skb, len), data, len);
1997 
1998                 count -= len;
1999                 data += len;
2000                 scb->expect -= len;
2001                 remain = count;
2002 
2003                 switch (type) {
2004                 case HCI_EVENT_PKT:
2005                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2006                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2007                                 scb->expect = h->plen;
2008 
2009                                 if (skb_tailroom(skb) < scb->expect) {
2010                                         kfree_skb(skb);
2011                                         hdev->reassembly[index] = NULL;
2012                                         return -ENOMEM;
2013                                 }
2014                         }
2015                         break;
2016 
2017                 case HCI_ACLDATA_PKT:
2018                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2019                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2020                                 scb->expect = __le16_to_cpu(h->dlen);
2021 
2022                                 if (skb_tailroom(skb) < scb->expect) {
2023                                         kfree_skb(skb);
2024                                         hdev->reassembly[index] = NULL;
2025                                         return -ENOMEM;
2026                                 }
2027                         }
2028                         break;
2029 
2030                 case HCI_SCODATA_PKT:
2031                         if (skb->len == HCI_SCO_HDR_SIZE) {
2032                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2033                                 scb->expect = h->dlen;
2034 
2035                                 if (skb_tailroom(skb) < scb->expect) {
2036                                         kfree_skb(skb);
2037                                         hdev->reassembly[index] = NULL;
2038                                         return -ENOMEM;
2039                                 }
2040                         }
2041                         break;
2042                 }
2043 
2044                 if (scb->expect == 0) {
2045                         /* Complete frame */
2046 
2047                         bt_cb(skb)->pkt_type = type;
2048                         hci_recv_frame(skb);
2049 
2050                         hdev->reassembly[index] = NULL;
2051                         return remain;
2052                 }
2053         }
2054 
2055         return remain;
2056 }
2057 
2058 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2059 {
2060         int rem = 0;
2061 
2062         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2063                 return -EILSEQ;
2064 
2065         while (count) {
2066                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2067                 if (rem < 0)
2068                         return rem;
2069 
2070                 data += (count - rem);
2071                 count = rem;
2072         }
2073 
2074         return rem;
2075 }
2076 EXPORT_SYMBOL(hci_recv_fragment);
2077 
2078 #define STREAM_REASSEMBLY 0
2079 
2080 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2081 {
2082         int type;
2083         int rem = 0;
2084 
2085         while (count) {
2086                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2087 
2088                 if (!skb) {
2089                         struct { char type; } *pkt;
2090 
2091                         /* Start of the frame */
2092                         pkt = data;
2093                         type = pkt->type;
2094 
2095                         data++;
2096                         count--;
2097                 } else
2098                         type = bt_cb(skb)->pkt_type;
2099 
2100                 rem = hci_reassembly(hdev, type, data, count,
2101                                      STREAM_REASSEMBLY);
2102                 if (rem < 0)
2103                         return rem;
2104 
2105                 data += (count - rem);
2106                 count = rem;
2107         }
2108 
2109         return rem;
2110 }
2111 EXPORT_SYMBOL(hci_recv_stream_fragment);
2112 
2113 /* ---- Interface to upper protocols ---- */
2114 
2115 int hci_register_cb(struct hci_cb *cb)
2116 {
2117         BT_DBG("%p name %s", cb, cb->name);
2118 
2119         write_lock(&hci_cb_list_lock);
2120         list_add(&cb->list, &hci_cb_list);
2121         write_unlock(&hci_cb_list_lock);
2122 
2123         return 0;
2124 }
2125 EXPORT_SYMBOL(hci_register_cb);
2126 
2127 int hci_unregister_cb(struct hci_cb *cb)
2128 {
2129         BT_DBG("%p name %s", cb, cb->name);
2130 
2131         write_lock(&hci_cb_list_lock);
2132         list_del(&cb->list);
2133         write_unlock(&hci_cb_list_lock);
2134 
2135         return 0;
2136 }
2137 EXPORT_SYMBOL(hci_unregister_cb);
2138 
2139 static int hci_send_frame(struct sk_buff *skb)
2140 {
2141         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2142 
2143         if (!hdev) {
2144                 kfree_skb(skb);
2145                 return -ENODEV;
2146         }
2147 
2148         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2149 
2150         /* Time stamp */
2151         __net_timestamp(skb);
2152 
2153         /* Send copy to monitor */
2154         hci_send_to_monitor(hdev, skb);
2155 
2156         if (atomic_read(&hdev->promisc)) {
2157                 /* Send copy to the sockets */
2158                 hci_send_to_sock(hdev, skb);
2159         }
2160 
2161         /* Get rid of skb owner, prior to sending to the driver. */
2162         skb_orphan(skb);
2163 
2164         return hdev->send(skb);
2165 }
2166 
2167 /* Send HCI command */
2168 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2169 {
2170         int len = HCI_COMMAND_HDR_SIZE + plen;
2171         struct hci_command_hdr *hdr;
2172         struct sk_buff *skb;
2173 
2174         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2175 
2176         skb = bt_skb_alloc(len, GFP_ATOMIC);
2177         if (!skb) {
2178                 BT_ERR("%s no memory for command", hdev->name);
2179                 return -ENOMEM;
2180         }
2181 
2182         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2183         hdr->opcode = cpu_to_le16(opcode);
2184         hdr->plen   = plen;
2185 
2186         if (plen)
2187                 memcpy(skb_put(skb, plen), param, plen);
2188 
2189         BT_DBG("skb len %d", skb->len);
2190 
2191         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2192         skb->dev = (void *) hdev;
2193 
2194         if (test_bit(HCI_INIT, &hdev->flags))
2195                 hdev->init_last_cmd = opcode;
2196 
2197         skb_queue_tail(&hdev->cmd_q, skb);
2198         queue_work(hdev->workqueue, &hdev->cmd_work);
2199 
2200         return 0;
2201 }
2202 
2203 /* Get data from the previously sent command */
2204 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2205 {
2206         struct hci_command_hdr *hdr;
2207 
2208         if (!hdev->sent_cmd)
2209                 return NULL;
2210 
2211         hdr = (void *) hdev->sent_cmd->data;
2212 
2213         if (hdr->opcode != cpu_to_le16(opcode))
2214                 return NULL;
2215 
2216         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2217 
2218         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2219 }
2220 
2221 /* Send ACL data */
2222 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2223 {
2224         struct hci_acl_hdr *hdr;
2225         int len = skb->len;
2226 
2227         skb_push(skb, HCI_ACL_HDR_SIZE);
2228         skb_reset_transport_header(skb);
2229         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2230         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2231         hdr->dlen   = cpu_to_le16(len);
2232 }
2233 
2234 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2235                           struct sk_buff *skb, __u16 flags)
2236 {
2237         struct hci_conn *conn = chan->conn;
2238         struct hci_dev *hdev = conn->hdev;
2239         struct sk_buff *list;
2240 
2241         skb->len = skb_headlen(skb);
2242         skb->data_len = 0;
2243 
2244         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2245 
2246         switch (hdev->dev_type) {
2247         case HCI_BREDR:
2248                 hci_add_acl_hdr(skb, conn->handle, flags);
2249                 break;
2250         case HCI_AMP:
2251                 hci_add_acl_hdr(skb, chan->handle, flags);
2252                 break;
2253         default:
2254                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2255                 return;
2256         }
2257 
2258         list = skb_shinfo(skb)->frag_list;
2259         if (!list) {
2260                 /* Non fragmented */
2261                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2262 
2263                 skb_queue_tail(queue, skb);
2264         } else {
2265                 /* Fragmented */
2266                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2267 
2268                 skb_shinfo(skb)->frag_list = NULL;
2269 
2270                 /* Queue all fragments atomically */
2271                 spin_lock(&queue->lock);
2272 
2273                 __skb_queue_tail(queue, skb);
2274 
2275                 flags &= ~ACL_START;
2276                 flags |= ACL_CONT;
2277                 do {
2278                         skb = list; list = list->next;
2279 
2280                         skb->dev = (void *) hdev;
2281                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2282                         hci_add_acl_hdr(skb, conn->handle, flags);
2283 
2284                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2285 
2286                         __skb_queue_tail(queue, skb);
2287                 } while (list);
2288 
2289                 spin_unlock(&queue->lock);
2290         }
2291 }
2292 
2293 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2294 {
2295         struct hci_dev *hdev = chan->conn->hdev;
2296 
2297         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2298 
2299         skb->dev = (void *) hdev;
2300 
2301         hci_queue_acl(chan, &chan->data_q, skb, flags);
2302 
2303         queue_work(hdev->workqueue, &hdev->tx_work);
2304 }
2305 
2306 /* Send SCO data */
2307 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2308 {
2309         struct hci_dev *hdev = conn->hdev;
2310         struct hci_sco_hdr hdr;
2311 
2312         BT_DBG("%s len %d", hdev->name, skb->len);
2313 
2314         hdr.handle = cpu_to_le16(conn->handle);
2315         hdr.dlen   = skb->len;
2316 
2317         skb_push(skb, HCI_SCO_HDR_SIZE);
2318         skb_reset_transport_header(skb);
2319         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2320 
2321         skb->dev = (void *) hdev;
2322         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2323 
2324         skb_queue_tail(&conn->data_q, skb);
2325         queue_work(hdev->workqueue, &hdev->tx_work);
2326 }
2327 
2328 /* ---- HCI TX task (outgoing data) ---- */
2329 
2330 /* HCI Connection scheduler */
2331 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2332                                      int *quote)
2333 {
2334         struct hci_conn_hash *h = &hdev->conn_hash;
2335         struct hci_conn *conn = NULL, *c;
2336         unsigned int num = 0, min = ~0;
2337 
2338         /* We don't have to lock device here. Connections are always
2339          * added and removed with TX task disabled. */
2340 
2341         rcu_read_lock();
2342 
2343         list_for_each_entry_rcu(c, &h->list, list) {
2344                 if (c->type != type || skb_queue_empty(&c->data_q))
2345                         continue;
2346 
2347                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2348                         continue;
2349 
2350                 num++;
2351 
2352                 if (c->sent < min) {
2353                         min  = c->sent;
2354                         conn = c;
2355                 }
2356 
2357                 if (hci_conn_num(hdev, type) == num)
2358                         break;
2359         }
2360 
2361         rcu_read_unlock();
2362 
2363         if (conn) {
2364                 int cnt, q;
2365 
2366                 switch (conn->type) {
2367                 case ACL_LINK:
2368                         cnt = hdev->acl_cnt;
2369                         break;
2370                 case SCO_LINK:
2371                 case ESCO_LINK:
2372                         cnt = hdev->sco_cnt;
2373                         break;
2374                 case LE_LINK:
2375                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2376                         break;
2377                 default:
2378                         cnt = 0;
2379                         BT_ERR("Unknown link type");
2380                 }
2381 
2382                 q = cnt / num;
2383                 *quote = q ? q : 1;
2384         } else
2385                 *quote = 0;
2386 
2387         BT_DBG("conn %p quote %d", conn, *quote);
2388         return conn;
2389 }
2390 
2391 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2392 {
2393         struct hci_conn_hash *h = &hdev->conn_hash;
2394         struct hci_conn *c;
2395 
2396         BT_ERR("%s link tx timeout", hdev->name);
2397 
2398         rcu_read_lock();
2399 
2400         /* Kill stalled connections */
2401         list_for_each_entry_rcu(c, &h->list, list) {
2402                 if (c->type == type && c->sent) {
2403                         BT_ERR("%s killing stalled connection %pMR",
2404                                hdev->name, &c->dst);
2405                         hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2406                 }
2407         }
2408 
2409         rcu_read_unlock();
2410 }
2411 
2412 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2413                                       int *quote)
2414 {
2415         struct hci_conn_hash *h = &hdev->conn_hash;
2416         struct hci_chan *chan = NULL;
2417         unsigned int num = 0, min = ~0, cur_prio = 0;
2418         struct hci_conn *conn;
2419         int cnt, q, conn_num = 0;
2420 
2421         BT_DBG("%s", hdev->name);
2422 
2423         rcu_read_lock();
2424 
2425         list_for_each_entry_rcu(conn, &h->list, list) {
2426                 struct hci_chan *tmp;
2427 
2428                 if (conn->type != type)
2429                         continue;
2430 
2431                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2432                         continue;
2433 
2434                 conn_num++;
2435 
2436                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2437                         struct sk_buff *skb;
2438 
2439                         if (skb_queue_empty(&tmp->data_q))
2440                                 continue;
2441 
2442                         skb = skb_peek(&tmp->data_q);
2443                         if (skb->priority < cur_prio)
2444                                 continue;
2445 
2446                         if (skb->priority > cur_prio) {
2447                                 num = 0;
2448                                 min = ~0;
2449                                 cur_prio = skb->priority;
2450                         }
2451 
2452                         num++;
2453 
2454                         if (conn->sent < min) {
2455                                 min  = conn->sent;
2456                                 chan = tmp;
2457                         }
2458                 }
2459 
2460                 if (hci_conn_num(hdev, type) == conn_num)
2461                         break;
2462         }
2463 
2464         rcu_read_unlock();
2465 
2466         if (!chan)
2467                 return NULL;
2468 
2469         switch (chan->conn->type) {
2470         case ACL_LINK:
2471                 cnt = hdev->acl_cnt;
2472                 break;
2473         case AMP_LINK:
2474                 cnt = hdev->block_cnt;
2475                 break;
2476         case SCO_LINK:
2477         case ESCO_LINK:
2478                 cnt = hdev->sco_cnt;
2479                 break;
2480         case LE_LINK:
2481                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2482                 break;
2483         default:
2484                 cnt = 0;
2485                 BT_ERR("Unknown link type");
2486         }
2487 
2488         q = cnt / num;
2489         *quote = q ? q : 1;
2490         BT_DBG("chan %p quote %d", chan, *quote);
2491         return chan;
2492 }
2493 
2494 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2495 {
2496         struct hci_conn_hash *h = &hdev->conn_hash;
2497         struct hci_conn *conn;
2498         int num = 0;
2499 
2500         BT_DBG("%s", hdev->name);
2501 
2502         rcu_read_lock();
2503 
2504         list_for_each_entry_rcu(conn, &h->list, list) {
2505                 struct hci_chan *chan;
2506 
2507                 if (conn->type != type)
2508                         continue;
2509 
2510                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2511                         continue;
2512 
2513                 num++;
2514 
2515                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2516                         struct sk_buff *skb;
2517 
2518                         if (chan->sent) {
2519                                 chan->sent = 0;
2520                                 continue;
2521                         }
2522 
2523                         if (skb_queue_empty(&chan->data_q))
2524                                 continue;
2525 
2526                         skb = skb_peek(&chan->data_q);
2527                         if (skb->priority >= HCI_PRIO_MAX - 1)
2528                                 continue;
2529 
2530                         skb->priority = HCI_PRIO_MAX - 1;
2531 
2532                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2533                                skb->priority);
2534                 }
2535 
2536                 if (hci_conn_num(hdev, type) == num)
2537                         break;
2538         }
2539 
2540         rcu_read_unlock();
2541 
2542 }
2543 
2544 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2545 {
2546         /* Calculate count of blocks used by this packet */
2547         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2548 }
2549 
2550 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2551 {
2552         if (!test_bit(HCI_RAW, &hdev->flags)) {
2553                 /* ACL tx timeout must be longer than maximum
2554                  * link supervision timeout (40.9 seconds) */
2555                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2556                                        HCI_ACL_TX_TIMEOUT))
2557                         hci_link_tx_to(hdev, ACL_LINK);
2558         }
2559 }
2560 
2561 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2562 {
2563         unsigned int cnt = hdev->acl_cnt;
2564         struct hci_chan *chan;
2565         struct sk_buff *skb;
2566         int quote;
2567 
2568         __check_timeout(hdev, cnt);
2569 
2570         while (hdev->acl_cnt &&
2571                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2572                 u32 priority = (skb_peek(&chan->data_q))->priority;
2573                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2574                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2575                                skb->len, skb->priority);
2576 
2577                         /* Stop if priority has changed */
2578                         if (skb->priority < priority)
2579                                 break;
2580 
2581                         skb = skb_dequeue(&chan->data_q);
2582 
2583                         hci_conn_enter_active_mode(chan->conn,
2584                                                    bt_cb(skb)->force_active);
2585 
2586                         hci_send_frame(skb);
2587                         hdev->acl_last_tx = jiffies;
2588 
2589                         hdev->acl_cnt--;
2590                         chan->sent++;
2591                         chan->conn->sent++;
2592                 }
2593         }
2594 
2595         if (cnt != hdev->acl_cnt)
2596                 hci_prio_recalculate(hdev, ACL_LINK);
2597 }
2598 
2599 static void hci_sched_acl_blk(struct hci_dev *hdev)
2600 {
2601         unsigned int cnt = hdev->block_cnt;
2602         struct hci_chan *chan;
2603         struct sk_buff *skb;
2604         int quote;
2605         u8 type;
2606 
2607         __check_timeout(hdev, cnt);
2608 
2609         BT_DBG("%s", hdev->name);
2610 
2611         if (hdev->dev_type == HCI_AMP)
2612                 type = AMP_LINK;
2613         else
2614                 type = ACL_LINK;
2615 
2616         while (hdev->block_cnt > 0 &&
2617                (chan = hci_chan_sent(hdev, type, &quote))) {
2618                 u32 priority = (skb_peek(&chan->data_q))->priority;
2619                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2620                         int blocks;
2621 
2622                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2623                                skb->len, skb->priority);
2624 
2625                         /* Stop if priority has changed */
2626                         if (skb->priority < priority)
2627                                 break;
2628 
2629                         skb = skb_dequeue(&chan->data_q);
2630 
2631                         blocks = __get_blocks(hdev, skb);
2632                         if (blocks > hdev->block_cnt)
2633                                 return;
2634 
2635                         hci_conn_enter_active_mode(chan->conn,
2636                                                    bt_cb(skb)->force_active);
2637 
2638                         hci_send_frame(skb);
2639                         hdev->acl_last_tx = jiffies;
2640 
2641                         hdev->block_cnt -= blocks;
2642                         quote -= blocks;
2643 
2644                         chan->sent += blocks;
2645                         chan->conn->sent += blocks;
2646                 }
2647         }
2648 
2649         if (cnt != hdev->block_cnt)
2650                 hci_prio_recalculate(hdev, type);
2651 }
2652 
2653 static void hci_sched_acl(struct hci_dev *hdev)
2654 {
2655         BT_DBG("%s", hdev->name);
2656 
2657         /* No ACL link over BR/EDR controller */
2658         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2659                 return;
2660 
2661         /* No AMP link over AMP controller */
2662         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2663                 return;
2664 
2665         switch (hdev->flow_ctl_mode) {
2666         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2667                 hci_sched_acl_pkt(hdev);
2668                 break;
2669 
2670         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2671                 hci_sched_acl_blk(hdev);
2672                 break;
2673         }
2674 }
2675 
2676 /* Schedule SCO */
2677 static void hci_sched_sco(struct hci_dev *hdev)
2678 {
2679         struct hci_conn *conn;
2680         struct sk_buff *skb;
2681         int quote;
2682 
2683         BT_DBG("%s", hdev->name);
2684 
2685         if (!hci_conn_num(hdev, SCO_LINK))
2686                 return;
2687 
2688         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2689                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2690                         BT_DBG("skb %p len %d", skb, skb->len);
2691                         hci_send_frame(skb);
2692 
2693                         conn->sent++;
2694                         if (conn->sent == ~0)
2695                                 conn->sent = 0;
2696                 }
2697         }
2698 }
2699 
2700 static void hci_sched_esco(struct hci_dev *hdev)
2701 {
2702         struct hci_conn *conn;
2703         struct sk_buff *skb;
2704         int quote;
2705 
2706         BT_DBG("%s", hdev->name);
2707 
2708         if (!hci_conn_num(hdev, ESCO_LINK))
2709                 return;
2710 
2711         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2712                                                      &quote))) {
2713                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2714                         BT_DBG("skb %p len %d", skb, skb->len);
2715                         hci_send_frame(skb);
2716 
2717                         conn->sent++;
2718                         if (conn->sent == ~0)
2719                                 conn->sent = 0;
2720                 }
2721         }
2722 }
2723 
2724 static void hci_sched_le(struct hci_dev *hdev)
2725 {
2726         struct hci_chan *chan;
2727         struct sk_buff *skb;
2728         int quote, cnt, tmp;
2729 
2730         BT_DBG("%s", hdev->name);
2731 
2732         if (!hci_conn_num(hdev, LE_LINK))
2733                 return;
2734 
2735         if (!test_bit(HCI_RAW, &hdev->flags)) {
2736                 /* LE tx timeout must be longer than maximum
2737                  * link supervision timeout (40.9 seconds) */
2738                 if (!hdev->le_cnt && hdev->le_pkts &&
2739                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
2740                         hci_link_tx_to(hdev, LE_LINK);
2741         }
2742 
2743         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2744         tmp = cnt;
2745         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2746                 u32 priority = (skb_peek(&chan->data_q))->priority;
2747                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2748                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2749                                skb->len, skb->priority);
2750 
2751                         /* Stop if priority has changed */
2752                         if (skb->priority < priority)
2753                                 break;
2754 
2755                         skb = skb_dequeue(&chan->data_q);
2756 
2757                         hci_send_frame(skb);
2758                         hdev->le_last_tx = jiffies;
2759 
2760                         cnt--;
2761                         chan->sent++;
2762                         chan->conn->sent++;
2763                 }
2764         }
2765 
2766         if (hdev->le_pkts)
2767                 hdev->le_cnt = cnt;
2768         else
2769                 hdev->acl_cnt = cnt;
2770 
2771         if (cnt != tmp)
2772                 hci_prio_recalculate(hdev, LE_LINK);
2773 }
2774 
2775 static void hci_tx_work(struct work_struct *work)
2776 {
2777         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2778         struct sk_buff *skb;
2779 
2780         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2781                hdev->sco_cnt, hdev->le_cnt);
2782 
2783         /* Schedule queues and send stuff to HCI driver */
2784 
2785         hci_sched_acl(hdev);
2786 
2787         hci_sched_sco(hdev);
2788 
2789         hci_sched_esco(hdev);
2790 
2791         hci_sched_le(hdev);
2792 
2793         /* Send next queued raw (unknown type) packet */
2794         while ((skb = skb_dequeue(&hdev->raw_q)))
2795                 hci_send_frame(skb);
2796 }
2797 
2798 /* ----- HCI RX task (incoming data processing) ----- */
2799 
2800 /* ACL data packet */
2801 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2802 {
2803         struct hci_acl_hdr *hdr = (void *) skb->data;
2804         struct hci_conn *conn;
2805         __u16 handle, flags;
2806 
2807         skb_pull(skb, HCI_ACL_HDR_SIZE);
2808 
2809         handle = __le16_to_cpu(hdr->handle);
2810         flags  = hci_flags(handle);
2811         handle = hci_handle(handle);
2812 
2813         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2814                handle, flags);
2815 
2816         hdev->stat.acl_rx++;
2817 
2818         hci_dev_lock(hdev);
2819         conn = hci_conn_hash_lookup_handle(hdev, handle);
2820         hci_dev_unlock(hdev);
2821 
2822         if (conn) {
2823                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2824 
2825                 /* Send to upper protocol */
2826                 l2cap_recv_acldata(conn, skb, flags);
2827                 return;
2828         } else {
2829                 BT_ERR("%s ACL packet for unknown connection handle %d",
2830                        hdev->name, handle);
2831         }
2832 
2833         kfree_skb(skb);
2834 }
2835 
2836 /* SCO data packet */
2837 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2838 {
2839         struct hci_sco_hdr *hdr = (void *) skb->data;
2840         struct hci_conn *conn;
2841         __u16 handle;
2842 
2843         skb_pull(skb, HCI_SCO_HDR_SIZE);
2844 
2845         handle = __le16_to_cpu(hdr->handle);
2846 
2847         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2848 
2849         hdev->stat.sco_rx++;
2850 
2851         hci_dev_lock(hdev);
2852         conn = hci_conn_hash_lookup_handle(hdev, handle);
2853         hci_dev_unlock(hdev);
2854 
2855         if (conn) {
2856                 /* Send to upper protocol */
2857                 sco_recv_scodata(conn, skb);
2858                 return;
2859         } else {
2860                 BT_ERR("%s SCO packet for unknown connection handle %d",
2861                        hdev->name, handle);
2862         }
2863 
2864         kfree_skb(skb);
2865 }
2866 
2867 static void hci_rx_work(struct work_struct *work)
2868 {
2869         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2870         struct sk_buff *skb;
2871 
2872         BT_DBG("%s", hdev->name);
2873 
2874         while ((skb = skb_dequeue(&hdev->rx_q))) {
2875                 /* Send copy to monitor */
2876                 hci_send_to_monitor(hdev, skb);
2877 
2878                 if (atomic_read(&hdev->promisc)) {
2879                         /* Send copy to the sockets */
2880                         hci_send_to_sock(hdev, skb);
2881                 }
2882 
2883                 if (test_bit(HCI_RAW, &hdev->flags)) {
2884                         kfree_skb(skb);
2885                         continue;
2886                 }
2887 
2888                 if (test_bit(HCI_INIT, &hdev->flags)) {
2889                         /* Don't process data packets in this states. */
2890                         switch (bt_cb(skb)->pkt_type) {
2891                         case HCI_ACLDATA_PKT:
2892                         case HCI_SCODATA_PKT:
2893                                 kfree_skb(skb);
2894                                 continue;
2895                         }
2896                 }
2897 
2898                 /* Process frame */
2899                 switch (bt_cb(skb)->pkt_type) {
2900                 case HCI_EVENT_PKT:
2901                         BT_DBG("%s Event packet", hdev->name);
2902                         hci_event_packet(hdev, skb);
2903                         break;
2904 
2905                 case HCI_ACLDATA_PKT:
2906                         BT_DBG("%s ACL data packet", hdev->name);
2907                         hci_acldata_packet(hdev, skb);
2908                         break;
2909 
2910                 case HCI_SCODATA_PKT:
2911                         BT_DBG("%s SCO data packet", hdev->name);
2912                         hci_scodata_packet(hdev, skb);
2913                         break;
2914 
2915                 default:
2916                         kfree_skb(skb);
2917                         break;
2918                 }
2919         }
2920 }
2921 
2922 static void hci_cmd_work(struct work_struct *work)
2923 {
2924         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2925         struct sk_buff *skb;
2926 
2927         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2928                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2929 
2930         /* Send queued commands */
2931         if (atomic_read(&hdev->cmd_cnt)) {
2932                 skb = skb_dequeue(&hdev->cmd_q);
2933                 if (!skb)
2934                         return;
2935 
2936                 kfree_skb(hdev->sent_cmd);
2937 
2938                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2939                 if (hdev->sent_cmd) {
2940                         atomic_dec(&hdev->cmd_cnt);
2941                         hci_send_frame(skb);
2942                         if (test_bit(HCI_RESET, &hdev->flags))
2943                                 del_timer(&hdev->cmd_timer);
2944                         else
2945                                 mod_timer(&hdev->cmd_timer,
2946                                           jiffies + HCI_CMD_TIMEOUT);
2947                 } else {
2948                         skb_queue_head(&hdev->cmd_q, skb);
2949                         queue_work(hdev->workqueue, &hdev->cmd_work);
2950                 }
2951         }
2952 }
2953 
2954 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2955 {
2956         /* General inquiry access code (GIAC) */
2957         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2958         struct hci_cp_inquiry cp;
2959 
2960         BT_DBG("%s", hdev->name);
2961 
2962         if (test_bit(HCI_INQUIRY, &hdev->flags))
2963                 return -EINPROGRESS;
2964 
2965         inquiry_cache_flush(hdev);
2966 
2967         memset(&cp, 0, sizeof(cp));
2968         memcpy(&cp.lap, lap, sizeof(cp.lap));
2969         cp.length  = length;
2970 
2971         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2972 }
2973 
2974 int hci_cancel_inquiry(struct hci_dev *hdev)
2975 {
2976         BT_DBG("%s", hdev->name);
2977 
2978         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2979                 return -EALREADY;
2980 
2981         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2982 }
2983 
2984 u8 bdaddr_to_le(u8 bdaddr_type)
2985 {
2986         switch (bdaddr_type) {
2987         case BDADDR_LE_PUBLIC:
2988                 return ADDR_LE_DEV_PUBLIC;
2989 
2990         default:
2991                 /* Fallback to LE Random address type */
2992                 return ADDR_LE_DEV_RANDOM;
2993         }
2994 }
2995 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp