~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* 
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4 
  5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  6 
  7    This program is free software; you can redistribute it and/or modify
  8    it under the terms of the GNU General Public License version 2 as
  9    published by the Free Software Foundation;
 10 
 11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
 16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
 17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
 18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 19 
 20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
 21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
 22    SOFTWARE IS DISCLAIMED.
 23 */
 24 
 25 /*
 26  * Bluetooth HCI Core.
 27  *
 28  * $Id: hci_core.c,v 1.6 2002/04/17 17:37:16 maxk Exp $
 29  */
 30 
 31 #include <linux/config.h>
 32 #include <linux/module.h>
 33 #include <linux/kmod.h>
 34 
 35 #include <linux/types.h>
 36 #include <linux/errno.h>
 37 #include <linux/kernel.h>
 38 #include <linux/major.h>
 39 #include <linux/sched.h>
 40 #include <linux/slab.h>
 41 #include <linux/poll.h>
 42 #include <linux/fcntl.h>
 43 #include <linux/init.h>
 44 #include <linux/skbuff.h>
 45 #include <linux/interrupt.h>
 46 #include <linux/notifier.h>
 47 #include <net/sock.h>
 48 
 49 #include <asm/system.h>
 50 #include <asm/uaccess.h>
 51 #include <asm/unaligned.h>
 52 
 53 #include <net/bluetooth/bluetooth.h>
 54 #include <net/bluetooth/hci_core.h>
 55 
 56 #ifndef CONFIG_BT_HCI_CORE_DEBUG
 57 #undef  BT_DBG
 58 #define BT_DBG( A... )
 59 #endif
 60 
 61 static void hci_cmd_task(unsigned long arg);
 62 static void hci_rx_task(unsigned long arg);
 63 static void hci_tx_task(unsigned long arg);
 64 static void hci_notify(struct hci_dev *hdev, int event);
 65 
 66 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
 67 
 68 /* HCI device list */
 69 LIST_HEAD(hci_dev_list);
 70 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
 71 
 72 /* HCI protocols */
 73 #define HCI_MAX_PROTO   2
 74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
 75 
 76 /* HCI notifiers list */
 77 static struct notifier_block *hci_notifier;
 78 
 79 /* ---- HCI notifications ---- */
 80 
 81 int hci_register_notifier(struct notifier_block *nb)
 82 {
 83         return notifier_chain_register(&hci_notifier, nb);
 84 }
 85 
 86 int hci_unregister_notifier(struct notifier_block *nb)
 87 {
 88         return notifier_chain_unregister(&hci_notifier, nb);
 89 }
 90 
 91 void hci_notify(struct hci_dev *hdev, int event)
 92 {
 93         notifier_call_chain(&hci_notifier, event, hdev);
 94 }
 95 
 96 /* ---- HCI hotplug support ---- */
 97 
 98 #ifdef CONFIG_HOTPLUG
 99 
100 static int hci_run_hotplug(char *dev, char *action)
101 {
102         char *argv[3], *envp[5], dstr[20], astr[32];
103 
104         sprintf(dstr, "DEVICE=%s", dev);
105         sprintf(astr, "ACTION=%s", action);
106 
107         argv[0] = hotplug_path;
108         argv[1] = "bluetooth";
109         argv[2] = NULL;
110 
111         envp[0] = "HOME=/";
112         envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
113         envp[2] = dstr;
114         envp[3] = astr;
115         envp[4] = NULL;
116         
117         return call_usermodehelper(argv[0], argv, envp, 0);
118 }
119 #else
120 #define hci_run_hotplug(A...)
121 #endif
122 
123 /* ---- HCI requests ---- */
124 
125 void hci_req_complete(struct hci_dev *hdev, int result)
126 {
127         BT_DBG("%s result 0x%2.2x", hdev->name, result);
128 
129         if (hdev->req_status == HCI_REQ_PEND) {
130                 hdev->req_result = result;
131                 hdev->req_status = HCI_REQ_DONE;
132                 wake_up_interruptible(&hdev->req_wait_q);
133         }
134 }
135 
136 void hci_req_cancel(struct hci_dev *hdev, int err)
137 {
138         BT_DBG("%s err 0x%2.2x", hdev->name, err);
139 
140         if (hdev->req_status == HCI_REQ_PEND) {
141                 hdev->req_result = err;
142                 hdev->req_status = HCI_REQ_CANCELED;
143                 wake_up_interruptible(&hdev->req_wait_q);
144         }
145 }
146 
147 /* Execute request and wait for completion. */
148 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
149                                 unsigned long opt, __u32 timeout)
150 {
151         DECLARE_WAITQUEUE(wait, current);
152         int err = 0;
153 
154         BT_DBG("%s start", hdev->name);
155 
156         hdev->req_status = HCI_REQ_PEND;
157 
158         add_wait_queue(&hdev->req_wait_q, &wait);
159         set_current_state(TASK_INTERRUPTIBLE);
160 
161         req(hdev, opt);
162         schedule_timeout(timeout);
163 
164         remove_wait_queue(&hdev->req_wait_q, &wait);
165 
166         if (signal_pending(current))
167                 return -EINTR;
168 
169         switch (hdev->req_status) {
170         case HCI_REQ_DONE:
171                 err = -bt_err(hdev->req_result);
172                 break;
173 
174         case HCI_REQ_CANCELED:
175                 err = -hdev->req_result;
176                 break;
177 
178         default:
179                 err = -ETIMEDOUT;
180                 break;
181         };
182 
183         hdev->req_status = hdev->req_result = 0;
184 
185         BT_DBG("%s end: err %d", hdev->name, err);
186 
187         return err;
188 }
189 
190 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
191                                 unsigned long opt, __u32 timeout)
192 {
193         int ret;
194 
195         /* Serialize all requests */
196         hci_req_lock(hdev);
197         ret = __hci_request(hdev, req, opt, timeout);
198         hci_req_unlock(hdev);
199 
200         return ret;
201 }
202 
203 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
204 {
205         BT_DBG("%s %ld", hdev->name, opt);
206 
207         /* Reset device */
208         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
209 }
210 
211 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
212 {
213         __u16 param;
214 
215         BT_DBG("%s %ld", hdev->name, opt);
216 
217         /* Mandatory initialization */
218 
219         /* Read Local Supported Features */
220         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
221 
222         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
223         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
224 
225 #if 0
226         /* Host buffer size */
227         {
228                 struct hci_cp_host_buffer_size cp;
229                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
230                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
231                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
232                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
233                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
234         }
235 #endif
236 
237         /* Read BD Address */
238         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
239 
240         /* Optional initialization */
241 
242         /* Clear Event Filters */
243         {
244                 struct hci_cp_set_event_flt cp;
245                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
246                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
247         }
248 
249         /* Page timeout ~20 secs */
250         param = __cpu_to_le16(0x8000);
251         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
252 
253         /* Connection accept timeout ~20 secs */
254         param = __cpu_to_le16(0x7d00);
255         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
256 }
257 
258 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
259 {
260         __u8 scan = opt;
261 
262         BT_DBG("%s %x", hdev->name, scan);
263 
264         /* Inquiry and Page scans */
265         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
266 }
267 
268 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
269 {
270         __u8 auth = opt;
271 
272         BT_DBG("%s %x", hdev->name, auth);
273 
274         /* Authentication */
275         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
276 }
277 
278 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 encrypt = opt;
281 
282         BT_DBG("%s %x", hdev->name, encrypt);
283 
284         /* Authentication */
285         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
286 }
287 
288 /* Get HCI device by index. 
289  * Device is held on return. */
290 struct hci_dev *hci_dev_get(int index)
291 {
292         struct hci_dev *hdev = NULL;
293         struct list_head *p;
294 
295         BT_DBG("%d", index);
296 
297         if (index < 0)
298                 return NULL;
299 
300         read_lock(&hci_dev_list_lock);
301         list_for_each(p, &hci_dev_list) {
302                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
303                 if (d->id == index) {
304                         hdev = hci_dev_hold(d);
305                         break;
306                 }
307         }
308         read_unlock(&hci_dev_list_lock);
309         return hdev;
310 }
311 
312 /* ---- Inquiry support ---- */
313 void inquiry_cache_flush(struct hci_dev *hdev)
314 {
315         struct inquiry_cache *cache = &hdev->inq_cache;
316         struct inquiry_entry *next  = cache->list, *e;
317 
318         BT_DBG("cache %p", cache);
319 
320         cache->list = NULL;
321         while ((e = next)) {
322                 next = e->next;
323                 kfree(e);
324         }
325 }
326 
327 struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
328 {
329         struct inquiry_cache *cache = &hdev->inq_cache;
330         struct inquiry_entry *e;
331 
332         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
333 
334         for (e = cache->list; e; e = e->next)
335                 if (!bacmp(&e->info.bdaddr, bdaddr))
336                         break;
337         return e;
338 }
339 
340 void inquiry_cache_update(struct hci_dev *hdev, struct inquiry_info *info)
341 {
342         struct inquiry_cache *cache = &hdev->inq_cache;
343         struct inquiry_entry *e;
344 
345         BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
346 
347         if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
348                 /* Entry not in the cache. Add new one. */
349                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
350                         return;
351                 memset(e, 0, sizeof(struct inquiry_entry));
352                 e->next     = cache->list;
353                 cache->list = e;
354         }
355 
356         memcpy(&e->info, info, sizeof(*info));
357         e->timestamp = jiffies;
358         cache->timestamp = jiffies;
359 }
360 
361 int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
362 {
363         struct inquiry_cache *cache = &hdev->inq_cache;
364         struct inquiry_info *info = (struct inquiry_info *) buf;
365         struct inquiry_entry *e;
366         int copied = 0;
367 
368         for (e = cache->list; e && copied < num; e = e->next, copied++)
369                 memcpy(info++, &e->info, sizeof(*info));
370 
371         BT_DBG("cache %p, copied %d", cache, copied);
372         return copied;
373 }
374 
375 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
376 {
377         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
378         struct hci_cp_inquiry cp;
379 
380         BT_DBG("%s", hdev->name);
381 
382         if (test_bit(HCI_INQUIRY, &hdev->flags))
383                 return;
384 
385         /* Start Inquiry */
386         memcpy(&cp.lap, &ir->lap, 3);
387         cp.length  = ir->length;
388         cp.num_rsp = ir->num_rsp;
389         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
390 }
391 
392 int hci_inquiry(unsigned long arg)
393 {
394         struct hci_inquiry_req ir;
395         struct hci_dev *hdev;
396         int err = 0, do_inquiry = 0, max_rsp;
397         long timeo;
398         __u8 *buf, *ptr;
399 
400         ptr = (void *) arg;
401         if (copy_from_user(&ir, ptr, sizeof(ir)))
402                 return -EFAULT;
403 
404         if (!(hdev = hci_dev_get(ir.dev_id)))
405                 return -ENODEV;
406 
407         hci_dev_lock_bh(hdev);
408         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
409                                         ir.flags & IREQ_CACHE_FLUSH) {
410                 inquiry_cache_flush(hdev);
411                 do_inquiry = 1;
412         }
413         hci_dev_unlock_bh(hdev);
414 
415         timeo = ir.length * 2 * HZ;
416         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
417                 goto done;
418 
419         /* for unlimited number of responses we will use buffer with 255 entries */
420         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
421 
422         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
423          * copy it to the user space.
424          */
425         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
426                 err = -ENOMEM;
427                 goto done;
428         }
429 
430         hci_dev_lock_bh(hdev);
431         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
432         hci_dev_unlock_bh(hdev);
433 
434         BT_DBG("num_rsp %d", ir.num_rsp);
435 
436         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
437                 ptr += sizeof(ir);
438                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
439                                         ir.num_rsp))
440                         err = -EFAULT;
441         } else 
442                 err = -EFAULT;
443 
444         kfree(buf);
445 
446 done:
447         hci_dev_put(hdev);
448         return err;
449 }
450 
451 /* ---- HCI ioctl helpers ---- */
452 
453 int hci_dev_open(__u16 dev)
454 {
455         struct hci_dev *hdev;
456         int ret = 0;
457 
458         if (!(hdev = hci_dev_get(dev)))
459                 return -ENODEV;
460 
461         BT_DBG("%s %p", hdev->name, hdev);
462 
463         hci_req_lock(hdev);
464 
465         if (test_bit(HCI_UP, &hdev->flags)) {
466                 ret = -EALREADY;
467                 goto done;
468         }
469 
470         if (hdev->open(hdev)) {
471                 ret = -EIO;
472                 goto done;
473         }
474 
475         if (!test_bit(HCI_RAW, &hdev->flags)) {
476                 atomic_set(&hdev->cmd_cnt, 1);
477                 set_bit(HCI_INIT, &hdev->flags);
478 
479                 //__hci_request(hdev, hci_reset_req, 0, HZ);
480                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
481        
482                 clear_bit(HCI_INIT, &hdev->flags);
483         }
484 
485         if (!ret) {
486                 hci_dev_hold(hdev);
487                 set_bit(HCI_UP, &hdev->flags);
488                 hci_notify(hdev, HCI_DEV_UP);
489         } else {        
490                 /* Init failed, cleanup */
491                 tasklet_kill(&hdev->rx_task);
492                 tasklet_kill(&hdev->tx_task);
493                 tasklet_kill(&hdev->cmd_task);
494 
495                 skb_queue_purge(&hdev->cmd_q);
496                 skb_queue_purge(&hdev->rx_q);
497 
498                 if (hdev->flush)
499                         hdev->flush(hdev);
500 
501                 if (hdev->sent_cmd) {
502                         kfree_skb(hdev->sent_cmd);
503                         hdev->sent_cmd = NULL;
504                 }
505 
506                 hdev->close(hdev);
507                 hdev->flags = 0;
508         }
509 
510 done:
511         hci_req_unlock(hdev);
512         hci_dev_put(hdev);
513         return ret;
514 }
515 
516 static int hci_dev_do_close(struct hci_dev *hdev)
517 {
518         BT_DBG("%s %p", hdev->name, hdev);
519 
520         hci_req_cancel(hdev, ENODEV);
521         hci_req_lock(hdev);
522 
523         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
524                 hci_req_unlock(hdev);
525                 return 0;
526         }
527 
528         /* Kill RX and TX tasks */
529         tasklet_kill(&hdev->rx_task);
530         tasklet_kill(&hdev->tx_task);
531 
532         hci_dev_lock_bh(hdev);
533         inquiry_cache_flush(hdev);
534         hci_conn_hash_flush(hdev);
535         hci_dev_unlock_bh(hdev);
536         
537         hci_notify(hdev, HCI_DEV_DOWN);
538 
539         if (hdev->flush)
540                 hdev->flush(hdev);
541 
542         /* Reset device */
543         skb_queue_purge(&hdev->cmd_q);
544         atomic_set(&hdev->cmd_cnt, 1);
545         set_bit(HCI_INIT, &hdev->flags);
546         __hci_request(hdev, hci_reset_req, 0, HZ/4);
547         clear_bit(HCI_INIT, &hdev->flags);
548 
549         /* Kill cmd task */
550         tasklet_kill(&hdev->cmd_task);
551 
552         /* Drop queues */
553         skb_queue_purge(&hdev->rx_q);
554         skb_queue_purge(&hdev->cmd_q);
555         skb_queue_purge(&hdev->raw_q);
556 
557         /* Drop last sent command */
558         if (hdev->sent_cmd) {
559                 kfree_skb(hdev->sent_cmd);
560                 hdev->sent_cmd = NULL;
561         }
562 
563         /* After this point our queues are empty
564          * and no tasks are scheduled. */
565         hdev->close(hdev);
566 
567         /* Clear flags */
568         hdev->flags = 0;
569 
570         hci_req_unlock(hdev);
571 
572         hci_dev_put(hdev);
573         return 0;
574 }
575 
576 int hci_dev_close(__u16 dev)
577 {
578         struct hci_dev *hdev;
579         int err;
580         
581         if (!(hdev = hci_dev_get(dev)))
582                 return -ENODEV;
583         err = hci_dev_do_close(hdev);
584         hci_dev_put(hdev);
585         return err;
586 }
587 
588 int hci_dev_reset(__u16 dev)
589 {
590         struct hci_dev *hdev;
591         int ret = 0;
592 
593         if (!(hdev = hci_dev_get(dev)))
594                 return -ENODEV;
595 
596         hci_req_lock(hdev);
597         tasklet_disable(&hdev->tx_task);
598 
599         if (!test_bit(HCI_UP, &hdev->flags))
600                 goto done;
601 
602         /* Drop queues */
603         skb_queue_purge(&hdev->rx_q);
604         skb_queue_purge(&hdev->cmd_q);
605 
606         hci_dev_lock_bh(hdev);
607         inquiry_cache_flush(hdev);
608         hci_conn_hash_flush(hdev);
609         hci_dev_unlock_bh(hdev);
610 
611         if (hdev->flush)
612                 hdev->flush(hdev);
613 
614         atomic_set(&hdev->cmd_cnt, 1); 
615         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
616 
617         ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
618 
619 done:
620         tasklet_enable(&hdev->tx_task);
621         hci_req_unlock(hdev);
622         hci_dev_put(hdev);
623         return ret;
624 }
625 
626 int hci_dev_reset_stat(__u16 dev)
627 {
628         struct hci_dev *hdev;
629         int ret = 0;
630 
631         if (!(hdev = hci_dev_get(dev)))
632                 return -ENODEV;
633 
634         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
635 
636         hci_dev_put(hdev);
637 
638         return ret;
639 }
640 
641 int hci_dev_cmd(unsigned int cmd, unsigned long arg)
642 {
643         struct hci_dev *hdev;
644         struct hci_dev_req dr;
645         int err = 0;
646 
647         if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
648                 return -EFAULT;
649 
650         if (!(hdev = hci_dev_get(dr.dev_id)))
651                 return -ENODEV;
652 
653         switch (cmd) {
654         case HCISETAUTH:
655                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
656                 break;
657 
658         case HCISETENCRYPT:
659                 if (!lmp_encrypt_capable(hdev)) {
660                         err = -EOPNOTSUPP;
661                         break;
662                 }
663 
664                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
665                         /* Auth must be enabled first */
666                         err = hci_request(hdev, hci_auth_req,
667                                         dr.dev_opt, HCI_INIT_TIMEOUT);
668                         if (err)
669                                 break;
670                 }
671                         
672                 err = hci_request(hdev, hci_encrypt_req,
673                                         dr.dev_opt, HCI_INIT_TIMEOUT);
674                 break;
675         
676         case HCISETSCAN:
677                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
678                 break;
679         
680         case HCISETPTYPE:
681                 hdev->pkt_type = (__u16) dr.dev_opt;
682                 break;
683                 
684         case HCISETLINKPOL:
685                 hdev->link_policy = (__u16) dr.dev_opt;
686                 break;
687 
688         case HCISETLINKMODE:
689                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
690                 break;
691 
692         case HCISETACLMTU:
693                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
694                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
695                 break;
696 
697         case HCISETSCOMTU:
698                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
699                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
700                 break;
701 
702         default:
703                 err = -EINVAL;
704                 break;
705         }       
706         hci_dev_put(hdev);
707         return err;
708 }
709 
710 int hci_get_dev_list(unsigned long arg)
711 {
712         struct hci_dev_list_req *dl;
713         struct hci_dev_req *dr;
714         struct list_head *p;
715         int n = 0, size;
716         __u16 dev_num;
717 
718         if (get_user(dev_num, (__u16 *) arg))
719                 return -EFAULT;
720 
721         if (!dev_num)
722                 return -EINVAL;
723         
724         size = dev_num * sizeof(*dr) + sizeof(*dl);
725 
726         if (verify_area(VERIFY_WRITE, (void *) arg, size))
727                 return -EFAULT;
728 
729         if (!(dl = kmalloc(size, GFP_KERNEL)))
730                 return -ENOMEM;
731         dr = dl->dev_req;
732 
733         read_lock_bh(&hci_dev_list_lock);
734         list_for_each(p, &hci_dev_list) {
735                 struct hci_dev *hdev;
736                 hdev = list_entry(p, struct hci_dev, list);
737                 (dr + n)->dev_id  = hdev->id;
738                 (dr + n)->dev_opt = hdev->flags;
739                 if (++n >= dev_num)
740                         break;
741         }
742         read_unlock_bh(&hci_dev_list_lock);
743 
744         dl->dev_num = n;
745         size = n * sizeof(*dr) + sizeof(*dl);
746 
747         copy_to_user((void *) arg, dl, size);
748         kfree(dl);
749 
750         return 0;
751 }
752 
753 int hci_get_dev_info(unsigned long arg)
754 {
755         struct hci_dev *hdev;
756         struct hci_dev_info di;
757         int err = 0;
758 
759         if (copy_from_user(&di, (void *) arg, sizeof(di)))
760                 return -EFAULT;
761 
762         if (!(hdev = hci_dev_get(di.dev_id)))
763                 return -ENODEV;
764 
765         strcpy(di.name, hdev->name);
766         di.bdaddr   = hdev->bdaddr;
767         di.type     = hdev->type;
768         di.flags    = hdev->flags;
769         di.pkt_type = hdev->pkt_type;
770         di.acl_mtu  = hdev->acl_mtu;
771         di.acl_pkts = hdev->acl_pkts;
772         di.sco_mtu  = hdev->sco_mtu;
773         di.sco_pkts = hdev->sco_pkts;
774         di.link_policy = hdev->link_policy;
775         di.link_mode   = hdev->link_mode;
776 
777         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
778         memcpy(&di.features, &hdev->features, sizeof(di.features));
779 
780         if (copy_to_user((void *) arg, &di, sizeof(di)))
781                 err = -EFAULT;
782 
783         hci_dev_put(hdev);
784 
785         return err;
786 }
787 
788 /* ---- Interface to HCI drivers ---- */
789 
790 /* Register HCI device */
791 int hci_register_dev(struct hci_dev *hdev)
792 {
793         struct list_head *head = &hci_dev_list, *p;
794         int id = 0;
795 
796         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
797 
798         if (!hdev->open || !hdev->close || !hdev->destruct)
799                 return -EINVAL;
800 
801         write_lock_bh(&hci_dev_list_lock);
802 
803         /* Find first available device id */
804         list_for_each(p, &hci_dev_list) {
805                 if (list_entry(p, struct hci_dev, list)->id != id)
806                         break;
807                 head = p; id++;
808         }
809         
810         sprintf(hdev->name, "hci%d", id);
811         hdev->id = id;
812         list_add(&hdev->list, head);
813 
814         atomic_set(&hdev->refcnt, 1);
815         spin_lock_init(&hdev->lock);
816                         
817         hdev->flags = 0;
818         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
819         hdev->link_mode = (HCI_LM_ACCEPT);
820 
821         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
822         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
823         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
824 
825         skb_queue_head_init(&hdev->rx_q);
826         skb_queue_head_init(&hdev->cmd_q);
827         skb_queue_head_init(&hdev->raw_q);
828 
829         init_waitqueue_head(&hdev->req_wait_q);
830         init_MUTEX(&hdev->req_lock);
831 
832         inquiry_cache_init(hdev);
833 
834         hci_conn_hash_init(hdev);
835 
836         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
837 
838         atomic_set(&hdev->promisc, 0);
839                 
840         write_unlock_bh(&hci_dev_list_lock);
841 
842         hci_dev_proc_init(hdev);
843 
844         hci_notify(hdev, HCI_DEV_REG);
845         hci_run_hotplug(hdev->name, "register");
846 
847         return id;
848 }
849 
850 /* Unregister HCI device */
851 int hci_unregister_dev(struct hci_dev *hdev)
852 {
853         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
854 
855         hci_dev_proc_cleanup(hdev);
856 
857         write_lock_bh(&hci_dev_list_lock);
858         list_del(&hdev->list);
859         write_unlock_bh(&hci_dev_list_lock);
860 
861         hci_dev_do_close(hdev);
862 
863         hci_notify(hdev, HCI_DEV_UNREG);
864         hci_run_hotplug(hdev->name, "unregister");
865         
866         __hci_dev_put(hdev);
867         return 0;
868 }
869 
870 /* Suspend HCI device */
871 int hci_suspend_dev(struct hci_dev *hdev)
872 {
873         hci_notify(hdev, HCI_DEV_SUSPEND);
874         hci_run_hotplug(hdev->name, "suspend");
875         return 0;
876 }
877 
878 /* Resume HCI device */
879 int hci_resume_dev(struct hci_dev *hdev)
880 {
881         hci_notify(hdev, HCI_DEV_RESUME);
882         hci_run_hotplug(hdev->name, "resume");
883         return 0;
884 }       
885 
886 /* ---- Interface to upper protocols ---- */
887 
888 /* Register/Unregister protocols.
889  * hci_task_lock is used to ensure that no tasks are running. */
890 int hci_register_proto(struct hci_proto *hp)
891 {
892         int err = 0;
893 
894         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
895 
896         if (hp->id >= HCI_MAX_PROTO)
897                 return -EINVAL;
898 
899         write_lock_bh(&hci_task_lock);
900 
901         if (!hci_proto[hp->id])
902                 hci_proto[hp->id] = hp;
903         else
904                 err = -EEXIST;
905 
906         write_unlock_bh(&hci_task_lock);
907 
908         return err;
909 }
910 
911 int hci_unregister_proto(struct hci_proto *hp)
912 {
913         int err = 0;
914 
915         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
916 
917         if (hp->id >= HCI_MAX_PROTO)
918                 return -EINVAL;
919 
920         write_lock_bh(&hci_task_lock);
921 
922         if (hci_proto[hp->id])
923                 hci_proto[hp->id] = NULL;
924         else
925                 err = -ENOENT;
926 
927         write_unlock_bh(&hci_task_lock);
928 
929         return err;
930 }
931 
932 static int hci_send_frame(struct sk_buff *skb)
933 {
934         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
935 
936         if (!hdev) {
937                 kfree_skb(skb);
938                 return -ENODEV;
939         }
940 
941         BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
942 
943         if (atomic_read(&hdev->promisc)) {
944                 /* Time stamp */
945                 do_gettimeofday(&skb->stamp);
946 
947                 hci_send_to_sock(hdev, skb);
948         }
949 
950         /* Get rid of skb owner, prior to sending to the driver. */
951         skb_orphan(skb);
952 
953         return hdev->send(skb);
954 }
955 
956 /* Send HCI command */
957 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
958 {
959         int len = HCI_COMMAND_HDR_SIZE + plen;
960         struct hci_command_hdr *hdr;
961         struct sk_buff *skb;
962 
963         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
964 
965         skb = bt_skb_alloc(len, GFP_ATOMIC);
966         if (!skb) {
967                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
968                 return -ENOMEM;
969         }
970 
971         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
972         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
973         hdr->plen   = plen;
974 
975         if (plen)
976                 memcpy(skb_put(skb, plen), param, plen);
977 
978         BT_DBG("skb len %d", skb->len);
979 
980         skb->pkt_type = HCI_COMMAND_PKT;
981         skb->dev = (void *) hdev;
982         skb_queue_tail(&hdev->cmd_q, skb);
983         hci_sched_cmd(hdev);
984 
985         return 0;
986 }
987 
988 /* Get data from the previously sent command */
989 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
990 {
991         struct hci_command_hdr *hdr;
992 
993         if (!hdev->sent_cmd)
994                 return NULL;
995 
996         hdr = (void *) hdev->sent_cmd->data;
997 
998         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
999                 return NULL;
1000 
1001         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1002 
1003         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1004 }
1005 
1006 /* Send ACL data */
1007 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1008 {
1009         struct hci_acl_hdr *hdr;
1010         int len = skb->len;
1011 
1012         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1013         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1014         hdr->dlen   = __cpu_to_le16(len);
1015 
1016         skb->h.raw = (void *) hdr;
1017 }
1018 
1019 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1020 {
1021         struct hci_dev *hdev = conn->hdev;
1022         struct sk_buff *list;
1023 
1024         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1025 
1026         skb->dev = (void *) hdev;
1027         skb->pkt_type = HCI_ACLDATA_PKT;
1028         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1029 
1030         if (!(list = skb_shinfo(skb)->frag_list)) {
1031                 /* Non fragmented */
1032                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1033                 
1034                 skb_queue_tail(&conn->data_q, skb);
1035         } else {
1036                 /* Fragmented */
1037                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1038 
1039                 skb_shinfo(skb)->frag_list = NULL;
1040 
1041                 /* Queue all fragments atomically */
1042                 spin_lock_bh(&conn->data_q.lock);
1043 
1044                 __skb_queue_tail(&conn->data_q, skb);
1045                 do {
1046                         skb = list; list = list->next;
1047                         
1048                         skb->dev = (void *) hdev;
1049                         skb->pkt_type = HCI_ACLDATA_PKT;
1050                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1051                 
1052                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1053 
1054                         __skb_queue_tail(&conn->data_q, skb);
1055                 } while (list);
1056 
1057                 spin_unlock_bh(&conn->data_q.lock);
1058         }
1059                 
1060         hci_sched_tx(hdev);
1061         return 0;
1062 }
1063 
1064 /* Send SCO data */
1065 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1066 {
1067         struct hci_dev *hdev = conn->hdev;
1068         struct hci_sco_hdr hdr;
1069 
1070         BT_DBG("%s len %d", hdev->name, skb->len);
1071 
1072         if (skb->len > hdev->sco_mtu) {
1073                 kfree_skb(skb);
1074                 return -EINVAL;
1075         }
1076 
1077         hdr.handle = __cpu_to_le16(conn->handle);
1078         hdr.dlen   = skb->len;
1079 
1080         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1081         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1082 
1083         skb->dev = (void *) hdev;
1084         skb->pkt_type = HCI_SCODATA_PKT;
1085         skb_queue_tail(&conn->data_q, skb);
1086         hci_sched_tx(hdev);
1087         return 0;
1088 }
1089 
1090 /* ---- HCI TX task (outgoing data) ---- */
1091 
1092 /* HCI Connection scheduler */
1093 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1094 {
1095         struct hci_conn_hash *h = &hdev->conn_hash;
1096         struct hci_conn  *conn = NULL;
1097         int num = 0, min = ~0;
1098         struct list_head *p;
1099 
1100         /* We don't have to lock device here. Connections are always 
1101          * added and removed with TX task disabled. */
1102         list_for_each(p, &h->list) {
1103                 struct hci_conn *c;
1104                 c = list_entry(p, struct hci_conn, list);
1105 
1106                 if (c->type != type || c->state != BT_CONNECTED
1107                                 || skb_queue_empty(&c->data_q))
1108                         continue;
1109                 num++;
1110 
1111                 if (c->sent < min) {
1112                         min  = c->sent;
1113                         conn = c;
1114                 }
1115         }
1116 
1117         if (conn) {
1118                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1119                 int q = cnt / num;
1120                 *quote = q ? q : 1;
1121         } else
1122                 *quote = 0;
1123 
1124         BT_DBG("conn %p quote %d", conn, *quote);
1125         return conn;
1126 }
1127 
1128 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1129 {
1130         struct hci_conn_hash *h = &hdev->conn_hash;
1131         struct list_head *p;
1132         struct hci_conn  *c;
1133 
1134         BT_ERR("%s ACL tx timeout", hdev->name);
1135 
1136         /* Kill stalled connections */
1137         list_for_each(p, &h->list) {
1138                 c = list_entry(p, struct hci_conn, list);
1139                 if (c->type == ACL_LINK && c->sent) {
1140                         BT_ERR("%s killing stalled ACL connection %s",
1141                                 hdev->name, batostr(&c->dst));
1142                         hci_acl_disconn(c, 0x13);
1143                 }
1144         }
1145 }
1146 
1147 static inline void hci_sched_acl(struct hci_dev *hdev)
1148 {
1149         struct hci_conn *conn;
1150         struct sk_buff *skb;
1151         int quote;
1152 
1153         BT_DBG("%s", hdev->name);
1154 
1155         /* ACL tx timeout must be longer than maximum
1156          * link supervision timeout (40.9 seconds) */
1157         if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1158                 hci_acl_tx_to(hdev);
1159 
1160         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1161                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1162                         BT_DBG("skb %p len %d", skb, skb->len);
1163                         hci_send_frame(skb);
1164                         hdev->acl_last_tx = jiffies;
1165 
1166                         hdev->acl_cnt--;
1167                         conn->sent++;
1168                 }
1169         }
1170 }
1171 
1172 /* Schedule SCO */
1173 static inline void hci_sched_sco(struct hci_dev *hdev)
1174 {
1175         struct hci_conn *conn;
1176         struct sk_buff *skb;
1177         int quote;
1178 
1179         BT_DBG("%s", hdev->name);
1180 
1181         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1182                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1183                         BT_DBG("skb %p len %d", skb, skb->len);
1184                         hci_send_frame(skb);
1185 
1186                         conn->sent++;
1187                         if (conn->sent == ~0)
1188                                 conn->sent = 0;
1189                 }
1190         }
1191 }
1192 
1193 static void hci_tx_task(unsigned long arg)
1194 {
1195         struct hci_dev *hdev = (struct hci_dev *) arg;
1196         struct sk_buff *skb;
1197 
1198         read_lock(&hci_task_lock);
1199 
1200         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1201 
1202         /* Schedule queues and send stuff to HCI driver */
1203 
1204         hci_sched_acl(hdev);
1205 
1206         hci_sched_sco(hdev);
1207 
1208         /* Send next queued raw (unknown type) packet */
1209         while ((skb = skb_dequeue(&hdev->raw_q)))
1210                 hci_send_frame(skb);
1211 
1212         read_unlock(&hci_task_lock);
1213 }
1214 
1215 /* ----- HCI RX task (incoming data proccessing) ----- */
1216 
1217 /* ACL data packet */
1218 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1219 {
1220         struct hci_acl_hdr *hdr = (void *) skb->data;
1221         struct hci_conn *conn;
1222         __u16 handle, flags;
1223 
1224         skb_pull(skb, HCI_ACL_HDR_SIZE);
1225 
1226         handle = __le16_to_cpu(hdr->handle);
1227         flags  = hci_flags(handle);
1228         handle = hci_handle(handle);
1229 
1230         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1231 
1232         hdev->stat.acl_rx++;
1233 
1234         hci_dev_lock(hdev);
1235         conn = hci_conn_hash_lookup_handle(hdev, handle);
1236         hci_dev_unlock(hdev);
1237         
1238         if (conn) {
1239                 register struct hci_proto *hp;
1240 
1241                 /* Send to upper protocol */
1242                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1243                         hp->recv_acldata(conn, skb, flags);
1244                         return;
1245                 }
1246         } else {
1247                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1248                         hdev->name, handle);
1249         }
1250 
1251         kfree_skb(skb);
1252 }
1253 
1254 /* SCO data packet */
1255 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1256 {
1257         struct hci_sco_hdr *hdr = (void *) skb->data;
1258         struct hci_conn *conn;
1259         __u16 handle;
1260 
1261         skb_pull(skb, HCI_SCO_HDR_SIZE);
1262 
1263         handle = __le16_to_cpu(hdr->handle);
1264 
1265         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1266 
1267         hdev->stat.sco_rx++;
1268 
1269         hci_dev_lock(hdev);
1270         conn = hci_conn_hash_lookup_handle(hdev, handle);
1271         hci_dev_unlock(hdev);
1272         
1273         if (conn) {
1274                 register struct hci_proto *hp;
1275 
1276                 /* Send to upper protocol */
1277                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1278                         hp->recv_scodata(conn, skb);
1279                         return;
1280                 }
1281         } else {
1282                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1283                         hdev->name, handle);
1284         }
1285 
1286         kfree_skb(skb);
1287 }
1288 
1289 void hci_rx_task(unsigned long arg)
1290 {
1291         struct hci_dev *hdev = (struct hci_dev *) arg;
1292         struct sk_buff *skb;
1293 
1294         BT_DBG("%s", hdev->name);
1295 
1296         read_lock(&hci_task_lock);
1297 
1298         while ((skb = skb_dequeue(&hdev->rx_q))) {
1299                 if (atomic_read(&hdev->promisc)) {
1300                         /* Send copy to the sockets */
1301                         hci_send_to_sock(hdev, skb);
1302                 }
1303 
1304                 if (test_bit(HCI_RAW, &hdev->flags)) {
1305                         kfree_skb(skb);
1306                         continue;
1307                 }
1308 
1309                 if (test_bit(HCI_INIT, &hdev->flags)) {
1310                         /* Don't process data packets in this states. */
1311                         switch (skb->pkt_type) {
1312                         case HCI_ACLDATA_PKT:
1313                         case HCI_SCODATA_PKT:
1314                                 kfree_skb(skb);
1315                                 continue;
1316                         };
1317                 }
1318 
1319                 /* Process frame */
1320                 switch (skb->pkt_type) {
1321                 case HCI_EVENT_PKT:
1322                         hci_event_packet(hdev, skb);
1323                         break;
1324 
1325                 case HCI_ACLDATA_PKT:
1326                         BT_DBG("%s ACL data packet", hdev->name);
1327                         hci_acldata_packet(hdev, skb);
1328                         break;
1329 
1330                 case HCI_SCODATA_PKT:
1331                         BT_DBG("%s SCO data packet", hdev->name);
1332                         hci_scodata_packet(hdev, skb);
1333                         break;
1334 
1335                 default:
1336                         kfree_skb(skb);
1337                         break;
1338                 }
1339         }
1340 
1341         read_unlock(&hci_task_lock);
1342 }
1343 
1344 static void hci_cmd_task(unsigned long arg)
1345 {
1346         struct hci_dev *hdev = (struct hci_dev *) arg;
1347         struct sk_buff *skb;
1348 
1349         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1350 
1351         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1352                 BT_ERR("%s command tx timeout", hdev->name);
1353                 atomic_set(&hdev->cmd_cnt, 1);
1354         }
1355         
1356         /* Send queued commands */
1357         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1358                 if (hdev->sent_cmd)
1359                         kfree_skb(hdev->sent_cmd);
1360 
1361                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1362                         atomic_dec(&hdev->cmd_cnt);
1363                         hci_send_frame(skb);
1364                         hdev->cmd_last_tx = jiffies;
1365                 } else {
1366                         skb_queue_head(&hdev->cmd_q, skb);
1367                         hci_sched_cmd(hdev);
1368                 }
1369         }
1370 }
1371 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp