~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_request.c

Version: ~ [ linux-4.15-rc3 ] ~ [ linux-4.14.5 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.68 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.105 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.47 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.87 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.51 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.96 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3 
  4    Copyright (C) 2014 Intel Corporation
  5 
  6    This program is free software; you can redistribute it and/or modify
  7    it under the terms of the GNU General Public License version 2 as
  8    published by the Free Software Foundation;
  9 
 10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 18 
 19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 21    SOFTWARE IS DISCLAIMED.
 22 */
 23 
 24 #include <linux/sched/signal.h>
 25 
 26 #include <net/bluetooth/bluetooth.h>
 27 #include <net/bluetooth/hci_core.h>
 28 #include <net/bluetooth/mgmt.h>
 29 
 30 #include "smp.h"
 31 #include "hci_request.h"
 32 
 33 #define HCI_REQ_DONE      0
 34 #define HCI_REQ_PEND      1
 35 #define HCI_REQ_CANCELED  2
 36 
 37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
 38 {
 39         skb_queue_head_init(&req->cmd_q);
 40         req->hdev = hdev;
 41         req->err = 0;
 42 }
 43 
 44 void hci_req_purge(struct hci_request *req)
 45 {
 46         skb_queue_purge(&req->cmd_q);
 47 }
 48 
 49 static int req_run(struct hci_request *req, hci_req_complete_t complete,
 50                    hci_req_complete_skb_t complete_skb)
 51 {
 52         struct hci_dev *hdev = req->hdev;
 53         struct sk_buff *skb;
 54         unsigned long flags;
 55 
 56         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
 57 
 58         /* If an error occurred during request building, remove all HCI
 59          * commands queued on the HCI request queue.
 60          */
 61         if (req->err) {
 62                 skb_queue_purge(&req->cmd_q);
 63                 return req->err;
 64         }
 65 
 66         /* Do not allow empty requests */
 67         if (skb_queue_empty(&req->cmd_q))
 68                 return -ENODATA;
 69 
 70         skb = skb_peek_tail(&req->cmd_q);
 71         if (complete) {
 72                 bt_cb(skb)->hci.req_complete = complete;
 73         } else if (complete_skb) {
 74                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
 75                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
 76         }
 77 
 78         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
 79         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
 80         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
 81 
 82         queue_work(hdev->workqueue, &hdev->cmd_work);
 83 
 84         return 0;
 85 }
 86 
 87 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
 88 {
 89         return req_run(req, complete, NULL);
 90 }
 91 
 92 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
 93 {
 94         return req_run(req, NULL, complete);
 95 }
 96 
 97 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 98                                   struct sk_buff *skb)
 99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101 
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 if (skb)
106                         hdev->req_skb = skb_get(skb);
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110 
111 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
112 {
113         BT_DBG("%s err 0x%2.2x", hdev->name, err);
114 
115         if (hdev->req_status == HCI_REQ_PEND) {
116                 hdev->req_result = err;
117                 hdev->req_status = HCI_REQ_CANCELED;
118                 wake_up_interruptible(&hdev->req_wait_q);
119         }
120 }
121 
122 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123                                   const void *param, u8 event, u32 timeout)
124 {
125         DECLARE_WAITQUEUE(wait, current);
126         struct hci_request req;
127         struct sk_buff *skb;
128         int err = 0;
129 
130         BT_DBG("%s", hdev->name);
131 
132         hci_req_init(&req, hdev);
133 
134         hci_req_add_ev(&req, opcode, plen, param, event);
135 
136         hdev->req_status = HCI_REQ_PEND;
137 
138         add_wait_queue(&hdev->req_wait_q, &wait);
139         set_current_state(TASK_INTERRUPTIBLE);
140 
141         err = hci_req_run_skb(&req, hci_req_sync_complete);
142         if (err < 0) {
143                 remove_wait_queue(&hdev->req_wait_q, &wait);
144                 set_current_state(TASK_RUNNING);
145                 return ERR_PTR(err);
146         }
147 
148         schedule_timeout(timeout);
149 
150         remove_wait_queue(&hdev->req_wait_q, &wait);
151 
152         if (signal_pending(current))
153                 return ERR_PTR(-EINTR);
154 
155         switch (hdev->req_status) {
156         case HCI_REQ_DONE:
157                 err = -bt_to_errno(hdev->req_result);
158                 break;
159 
160         case HCI_REQ_CANCELED:
161                 err = -hdev->req_result;
162                 break;
163 
164         default:
165                 err = -ETIMEDOUT;
166                 break;
167         }
168 
169         hdev->req_status = hdev->req_result = 0;
170         skb = hdev->req_skb;
171         hdev->req_skb = NULL;
172 
173         BT_DBG("%s end: err %d", hdev->name, err);
174 
175         if (err < 0) {
176                 kfree_skb(skb);
177                 return ERR_PTR(err);
178         }
179 
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182 
183         return skb;
184 }
185 EXPORT_SYMBOL(__hci_cmd_sync_ev);
186 
187 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
188                                const void *param, u32 timeout)
189 {
190         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
191 }
192 EXPORT_SYMBOL(__hci_cmd_sync);
193 
194 /* Execute request and wait for completion. */
195 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
196                                                      unsigned long opt),
197                    unsigned long opt, u32 timeout, u8 *hci_status)
198 {
199         struct hci_request req;
200         DECLARE_WAITQUEUE(wait, current);
201         int err = 0;
202 
203         BT_DBG("%s start", hdev->name);
204 
205         hci_req_init(&req, hdev);
206 
207         hdev->req_status = HCI_REQ_PEND;
208 
209         err = func(&req, opt);
210         if (err) {
211                 if (hci_status)
212                         *hci_status = HCI_ERROR_UNSPECIFIED;
213                 return err;
214         }
215 
216         add_wait_queue(&hdev->req_wait_q, &wait);
217         set_current_state(TASK_INTERRUPTIBLE);
218 
219         err = hci_req_run_skb(&req, hci_req_sync_complete);
220         if (err < 0) {
221                 hdev->req_status = 0;
222 
223                 remove_wait_queue(&hdev->req_wait_q, &wait);
224                 set_current_state(TASK_RUNNING);
225 
226                 /* ENODATA means the HCI request command queue is empty.
227                  * This can happen when a request with conditionals doesn't
228                  * trigger any commands to be sent. This is normal behavior
229                  * and should not trigger an error return.
230                  */
231                 if (err == -ENODATA) {
232                         if (hci_status)
233                                 *hci_status = 0;
234                         return 0;
235                 }
236 
237                 if (hci_status)
238                         *hci_status = HCI_ERROR_UNSPECIFIED;
239 
240                 return err;
241         }
242 
243         schedule_timeout(timeout);
244 
245         remove_wait_queue(&hdev->req_wait_q, &wait);
246 
247         if (signal_pending(current))
248                 return -EINTR;
249 
250         switch (hdev->req_status) {
251         case HCI_REQ_DONE:
252                 err = -bt_to_errno(hdev->req_result);
253                 if (hci_status)
254                         *hci_status = hdev->req_result;
255                 break;
256 
257         case HCI_REQ_CANCELED:
258                 err = -hdev->req_result;
259                 if (hci_status)
260                         *hci_status = HCI_ERROR_UNSPECIFIED;
261                 break;
262 
263         default:
264                 err = -ETIMEDOUT;
265                 if (hci_status)
266                         *hci_status = HCI_ERROR_UNSPECIFIED;
267                 break;
268         }
269 
270         kfree_skb(hdev->req_skb);
271         hdev->req_skb = NULL;
272         hdev->req_status = hdev->req_result = 0;
273 
274         BT_DBG("%s end: err %d", hdev->name, err);
275 
276         return err;
277 }
278 
279 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
280                                                   unsigned long opt),
281                  unsigned long opt, u32 timeout, u8 *hci_status)
282 {
283         int ret;
284 
285         if (!test_bit(HCI_UP, &hdev->flags))
286                 return -ENETDOWN;
287 
288         /* Serialize all requests */
289         hci_req_sync_lock(hdev);
290         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
291         hci_req_sync_unlock(hdev);
292 
293         return ret;
294 }
295 
296 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
297                                 const void *param)
298 {
299         int len = HCI_COMMAND_HDR_SIZE + plen;
300         struct hci_command_hdr *hdr;
301         struct sk_buff *skb;
302 
303         skb = bt_skb_alloc(len, GFP_ATOMIC);
304         if (!skb)
305                 return NULL;
306 
307         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
308         hdr->opcode = cpu_to_le16(opcode);
309         hdr->plen   = plen;
310 
311         if (plen)
312                 skb_put_data(skb, param, plen);
313 
314         BT_DBG("skb len %d", skb->len);
315 
316         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
317         hci_skb_opcode(skb) = opcode;
318 
319         return skb;
320 }
321 
322 /* Queue a command to an asynchronous HCI request */
323 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
324                     const void *param, u8 event)
325 {
326         struct hci_dev *hdev = req->hdev;
327         struct sk_buff *skb;
328 
329         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
330 
331         /* If an error occurred during request building, there is no point in
332          * queueing the HCI command. We can simply return.
333          */
334         if (req->err)
335                 return;
336 
337         skb = hci_prepare_cmd(hdev, opcode, plen, param);
338         if (!skb) {
339                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
340                            opcode);
341                 req->err = -ENOMEM;
342                 return;
343         }
344 
345         if (skb_queue_empty(&req->cmd_q))
346                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
347 
348         bt_cb(skb)->hci.req_event = event;
349 
350         skb_queue_tail(&req->cmd_q, skb);
351 }
352 
353 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
354                  const void *param)
355 {
356         hci_req_add_ev(req, opcode, plen, param, 0);
357 }
358 
359 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
360 {
361         struct hci_dev *hdev = req->hdev;
362         struct hci_cp_write_page_scan_activity acp;
363         u8 type;
364 
365         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
366                 return;
367 
368         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
369                 return;
370 
371         if (enable) {
372                 type = PAGE_SCAN_TYPE_INTERLACED;
373 
374                 /* 160 msec page scan interval */
375                 acp.interval = cpu_to_le16(0x0100);
376         } else {
377                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
378 
379                 /* default 1.28 sec page scan */
380                 acp.interval = cpu_to_le16(0x0800);
381         }
382 
383         acp.window = cpu_to_le16(0x0012);
384 
385         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
386             __cpu_to_le16(hdev->page_scan_window) != acp.window)
387                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
388                             sizeof(acp), &acp);
389 
390         if (hdev->page_scan_type != type)
391                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
392 }
393 
394 /* This function controls the background scanning based on hdev->pend_le_conns
395  * list. If there are pending LE connection we start the background scanning,
396  * otherwise we stop it.
397  *
398  * This function requires the caller holds hdev->lock.
399  */
400 static void __hci_update_background_scan(struct hci_request *req)
401 {
402         struct hci_dev *hdev = req->hdev;
403 
404         if (!test_bit(HCI_UP, &hdev->flags) ||
405             test_bit(HCI_INIT, &hdev->flags) ||
406             hci_dev_test_flag(hdev, HCI_SETUP) ||
407             hci_dev_test_flag(hdev, HCI_CONFIG) ||
408             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
409             hci_dev_test_flag(hdev, HCI_UNREGISTER))
410                 return;
411 
412         /* No point in doing scanning if LE support hasn't been enabled */
413         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
414                 return;
415 
416         /* If discovery is active don't interfere with it */
417         if (hdev->discovery.state != DISCOVERY_STOPPED)
418                 return;
419 
420         /* Reset RSSI and UUID filters when starting background scanning
421          * since these filters are meant for service discovery only.
422          *
423          * The Start Discovery and Start Service Discovery operations
424          * ensure to set proper values for RSSI threshold and UUID
425          * filter list. So it is safe to just reset them here.
426          */
427         hci_discovery_filter_clear(hdev);
428 
429         if (list_empty(&hdev->pend_le_conns) &&
430             list_empty(&hdev->pend_le_reports)) {
431                 /* If there is no pending LE connections or devices
432                  * to be scanned for, we should stop the background
433                  * scanning.
434                  */
435 
436                 /* If controller is not scanning we are done. */
437                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
438                         return;
439 
440                 hci_req_add_le_scan_disable(req);
441 
442                 BT_DBG("%s stopping background scanning", hdev->name);
443         } else {
444                 /* If there is at least one pending LE connection, we should
445                  * keep the background scan running.
446                  */
447 
448                 /* If controller is connecting, we should not start scanning
449                  * since some controllers are not able to scan and connect at
450                  * the same time.
451                  */
452                 if (hci_lookup_le_connect(hdev))
453                         return;
454 
455                 /* If controller is currently scanning, we stop it to ensure we
456                  * don't miss any advertising (due to duplicates filter).
457                  */
458                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
459                         hci_req_add_le_scan_disable(req);
460 
461                 hci_req_add_le_passive_scan(req);
462 
463                 BT_DBG("%s starting background scanning", hdev->name);
464         }
465 }
466 
467 void __hci_req_update_name(struct hci_request *req)
468 {
469         struct hci_dev *hdev = req->hdev;
470         struct hci_cp_write_local_name cp;
471 
472         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
473 
474         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
475 }
476 
477 #define PNP_INFO_SVCLASS_ID             0x1200
478 
479 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
480 {
481         u8 *ptr = data, *uuids_start = NULL;
482         struct bt_uuid *uuid;
483 
484         if (len < 4)
485                 return ptr;
486 
487         list_for_each_entry(uuid, &hdev->uuids, list) {
488                 u16 uuid16;
489 
490                 if (uuid->size != 16)
491                         continue;
492 
493                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
494                 if (uuid16 < 0x1100)
495                         continue;
496 
497                 if (uuid16 == PNP_INFO_SVCLASS_ID)
498                         continue;
499 
500                 if (!uuids_start) {
501                         uuids_start = ptr;
502                         uuids_start[0] = 1;
503                         uuids_start[1] = EIR_UUID16_ALL;
504                         ptr += 2;
505                 }
506 
507                 /* Stop if not enough space to put next UUID */
508                 if ((ptr - data) + sizeof(u16) > len) {
509                         uuids_start[1] = EIR_UUID16_SOME;
510                         break;
511                 }
512 
513                 *ptr++ = (uuid16 & 0x00ff);
514                 *ptr++ = (uuid16 & 0xff00) >> 8;
515                 uuids_start[0] += sizeof(uuid16);
516         }
517 
518         return ptr;
519 }
520 
521 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
522 {
523         u8 *ptr = data, *uuids_start = NULL;
524         struct bt_uuid *uuid;
525 
526         if (len < 6)
527                 return ptr;
528 
529         list_for_each_entry(uuid, &hdev->uuids, list) {
530                 if (uuid->size != 32)
531                         continue;
532 
533                 if (!uuids_start) {
534                         uuids_start = ptr;
535                         uuids_start[0] = 1;
536                         uuids_start[1] = EIR_UUID32_ALL;
537                         ptr += 2;
538                 }
539 
540                 /* Stop if not enough space to put next UUID */
541                 if ((ptr - data) + sizeof(u32) > len) {
542                         uuids_start[1] = EIR_UUID32_SOME;
543                         break;
544                 }
545 
546                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
547                 ptr += sizeof(u32);
548                 uuids_start[0] += sizeof(u32);
549         }
550 
551         return ptr;
552 }
553 
554 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
555 {
556         u8 *ptr = data, *uuids_start = NULL;
557         struct bt_uuid *uuid;
558 
559         if (len < 18)
560                 return ptr;
561 
562         list_for_each_entry(uuid, &hdev->uuids, list) {
563                 if (uuid->size != 128)
564                         continue;
565 
566                 if (!uuids_start) {
567                         uuids_start = ptr;
568                         uuids_start[0] = 1;
569                         uuids_start[1] = EIR_UUID128_ALL;
570                         ptr += 2;
571                 }
572 
573                 /* Stop if not enough space to put next UUID */
574                 if ((ptr - data) + 16 > len) {
575                         uuids_start[1] = EIR_UUID128_SOME;
576                         break;
577                 }
578 
579                 memcpy(ptr, uuid->uuid, 16);
580                 ptr += 16;
581                 uuids_start[0] += 16;
582         }
583 
584         return ptr;
585 }
586 
587 static void create_eir(struct hci_dev *hdev, u8 *data)
588 {
589         u8 *ptr = data;
590         size_t name_len;
591 
592         name_len = strlen(hdev->dev_name);
593 
594         if (name_len > 0) {
595                 /* EIR Data type */
596                 if (name_len > 48) {
597                         name_len = 48;
598                         ptr[1] = EIR_NAME_SHORT;
599                 } else
600                         ptr[1] = EIR_NAME_COMPLETE;
601 
602                 /* EIR Data length */
603                 ptr[0] = name_len + 1;
604 
605                 memcpy(ptr + 2, hdev->dev_name, name_len);
606 
607                 ptr += (name_len + 2);
608         }
609 
610         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
611                 ptr[0] = 2;
612                 ptr[1] = EIR_TX_POWER;
613                 ptr[2] = (u8) hdev->inq_tx_power;
614 
615                 ptr += 3;
616         }
617 
618         if (hdev->devid_source > 0) {
619                 ptr[0] = 9;
620                 ptr[1] = EIR_DEVICE_ID;
621 
622                 put_unaligned_le16(hdev->devid_source, ptr + 2);
623                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
624                 put_unaligned_le16(hdev->devid_product, ptr + 6);
625                 put_unaligned_le16(hdev->devid_version, ptr + 8);
626 
627                 ptr += 10;
628         }
629 
630         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
631         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
632         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
633 }
634 
635 void __hci_req_update_eir(struct hci_request *req)
636 {
637         struct hci_dev *hdev = req->hdev;
638         struct hci_cp_write_eir cp;
639 
640         if (!hdev_is_powered(hdev))
641                 return;
642 
643         if (!lmp_ext_inq_capable(hdev))
644                 return;
645 
646         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
647                 return;
648 
649         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
650                 return;
651 
652         memset(&cp, 0, sizeof(cp));
653 
654         create_eir(hdev, cp.data);
655 
656         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
657                 return;
658 
659         memcpy(hdev->eir, cp.data, sizeof(cp.data));
660 
661         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
662 }
663 
664 void hci_req_add_le_scan_disable(struct hci_request *req)
665 {
666         struct hci_cp_le_set_scan_enable cp;
667 
668         memset(&cp, 0, sizeof(cp));
669         cp.enable = LE_SCAN_DISABLE;
670         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
671 }
672 
673 static void add_to_white_list(struct hci_request *req,
674                               struct hci_conn_params *params)
675 {
676         struct hci_cp_le_add_to_white_list cp;
677 
678         cp.bdaddr_type = params->addr_type;
679         bacpy(&cp.bdaddr, &params->addr);
680 
681         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682 }
683 
684 static u8 update_white_list(struct hci_request *req)
685 {
686         struct hci_dev *hdev = req->hdev;
687         struct hci_conn_params *params;
688         struct bdaddr_list *b;
689         uint8_t white_list_entries = 0;
690 
691         /* Go through the current white list programmed into the
692          * controller one by one and check if that address is still
693          * in the list of pending connections or list of devices to
694          * report. If not present in either list, then queue the
695          * command to remove it from the controller.
696          */
697         list_for_each_entry(b, &hdev->le_white_list, list) {
698                 /* If the device is neither in pend_le_conns nor
699                  * pend_le_reports then remove it from the whitelist.
700                  */
701                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702                                                &b->bdaddr, b->bdaddr_type) &&
703                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704                                                &b->bdaddr, b->bdaddr_type)) {
705                         struct hci_cp_le_del_from_white_list cp;
706 
707                         cp.bdaddr_type = b->bdaddr_type;
708                         bacpy(&cp.bdaddr, &b->bdaddr);
709 
710                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711                                     sizeof(cp), &cp);
712                         continue;
713                 }
714 
715                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716                         /* White list can not be used with RPAs */
717                         return 0x00;
718                 }
719 
720                 white_list_entries++;
721         }
722 
723         /* Since all no longer valid white list entries have been
724          * removed, walk through the list of pending connections
725          * and ensure that any new device gets programmed into
726          * the controller.
727          *
728          * If the list of the devices is larger than the list of
729          * available white list entries in the controller, then
730          * just abort and return filer policy value to not use the
731          * white list.
732          */
733         list_for_each_entry(params, &hdev->pend_le_conns, action) {
734                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735                                            &params->addr, params->addr_type))
736                         continue;
737 
738                 if (white_list_entries >= hdev->le_white_list_size) {
739                         /* Select filter policy to accept all advertising */
740                         return 0x00;
741                 }
742 
743                 if (hci_find_irk_by_addr(hdev, &params->addr,
744                                          params->addr_type)) {
745                         /* White list can not be used with RPAs */
746                         return 0x00;
747                 }
748 
749                 white_list_entries++;
750                 add_to_white_list(req, params);
751         }
752 
753         /* After adding all new pending connections, walk through
754          * the list of pending reports and also add these to the
755          * white list if there is still space.
756          */
757         list_for_each_entry(params, &hdev->pend_le_reports, action) {
758                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759                                            &params->addr, params->addr_type))
760                         continue;
761 
762                 if (white_list_entries >= hdev->le_white_list_size) {
763                         /* Select filter policy to accept all advertising */
764                         return 0x00;
765                 }
766 
767                 if (hci_find_irk_by_addr(hdev, &params->addr,
768                                          params->addr_type)) {
769                         /* White list can not be used with RPAs */
770                         return 0x00;
771                 }
772 
773                 white_list_entries++;
774                 add_to_white_list(req, params);
775         }
776 
777         /* Select filter policy to use white list */
778         return 0x01;
779 }
780 
781 static bool scan_use_rpa(struct hci_dev *hdev)
782 {
783         return hci_dev_test_flag(hdev, HCI_PRIVACY);
784 }
785 
786 void hci_req_add_le_passive_scan(struct hci_request *req)
787 {
788         struct hci_cp_le_set_scan_param param_cp;
789         struct hci_cp_le_set_scan_enable enable_cp;
790         struct hci_dev *hdev = req->hdev;
791         u8 own_addr_type;
792         u8 filter_policy;
793 
794         /* Set require_privacy to false since no SCAN_REQ are send
795          * during passive scanning. Not using an non-resolvable address
796          * here is important so that peer devices using direct
797          * advertising with our address will be correctly reported
798          * by the controller.
799          */
800         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
801                                       &own_addr_type))
802                 return;
803 
804         /* Adding or removing entries from the white list must
805          * happen before enabling scanning. The controller does
806          * not allow white list modification while scanning.
807          */
808         filter_policy = update_white_list(req);
809 
810         /* When the controller is using random resolvable addresses and
811          * with that having LE privacy enabled, then controllers with
812          * Extended Scanner Filter Policies support can now enable support
813          * for handling directed advertising.
814          *
815          * So instead of using filter polices 0x00 (no whitelist)
816          * and 0x01 (whitelist enabled) use the new filter policies
817          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
818          */
819         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
820             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
821                 filter_policy |= 0x02;
822 
823         memset(&param_cp, 0, sizeof(param_cp));
824         param_cp.type = LE_SCAN_PASSIVE;
825         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
826         param_cp.window = cpu_to_le16(hdev->le_scan_window);
827         param_cp.own_address_type = own_addr_type;
828         param_cp.filter_policy = filter_policy;
829         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
830                     &param_cp);
831 
832         memset(&enable_cp, 0, sizeof(enable_cp));
833         enable_cp.enable = LE_SCAN_ENABLE;
834         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
836                     &enable_cp);
837 }
838 
839 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
840 {
841         u8 instance = hdev->cur_adv_instance;
842         struct adv_info *adv_instance;
843 
844         /* Ignore instance 0 */
845         if (instance == 0x00)
846                 return 0;
847 
848         adv_instance = hci_find_adv_instance(hdev, instance);
849         if (!adv_instance)
850                 return 0;
851 
852         /* TODO: Take into account the "appearance" and "local-name" flags here.
853          * These are currently being ignored as they are not supported.
854          */
855         return adv_instance->scan_rsp_len;
856 }
857 
858 void __hci_req_disable_advertising(struct hci_request *req)
859 {
860         u8 enable = 0x00;
861 
862         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
863 }
864 
865 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
866 {
867         u32 flags;
868         struct adv_info *adv_instance;
869 
870         if (instance == 0x00) {
871                 /* Instance 0 always manages the "Tx Power" and "Flags"
872                  * fields
873                  */
874                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
875 
876                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
877                  * corresponds to the "connectable" instance flag.
878                  */
879                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
880                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
881 
882                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
883                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
884                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885                         flags |= MGMT_ADV_FLAG_DISCOV;
886 
887                 return flags;
888         }
889 
890         adv_instance = hci_find_adv_instance(hdev, instance);
891 
892         /* Return 0 when we got an invalid instance identifier. */
893         if (!adv_instance)
894                 return 0;
895 
896         return adv_instance->flags;
897 }
898 
899 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
900 {
901         /* If privacy is not enabled don't use RPA */
902         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
903                 return false;
904 
905         /* If basic privacy mode is enabled use RPA */
906         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
907                 return true;
908 
909         /* If limited privacy mode is enabled don't use RPA if we're
910          * both discoverable and bondable.
911          */
912         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
913             hci_dev_test_flag(hdev, HCI_BONDABLE))
914                 return false;
915 
916         /* We're neither bondable nor discoverable in the limited
917          * privacy mode, therefore use RPA.
918          */
919         return true;
920 }
921 
922 void __hci_req_enable_advertising(struct hci_request *req)
923 {
924         struct hci_dev *hdev = req->hdev;
925         struct hci_cp_le_set_adv_param cp;
926         u8 own_addr_type, enable = 0x01;
927         bool connectable;
928         u32 flags;
929 
930         if (hci_conn_num(hdev, LE_LINK) > 0)
931                 return;
932 
933         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
934                 __hci_req_disable_advertising(req);
935 
936         /* Clear the HCI_LE_ADV bit temporarily so that the
937          * hci_update_random_address knows that it's safe to go ahead
938          * and write a new random address. The flag will be set back on
939          * as soon as the SET_ADV_ENABLE HCI command completes.
940          */
941         hci_dev_clear_flag(hdev, HCI_LE_ADV);
942 
943         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
944 
945         /* If the "connectable" instance flag was not set, then choose between
946          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
947          */
948         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
949                       mgmt_get_connectable(hdev);
950 
951         /* Set require_privacy to true only when non-connectable
952          * advertising is used. In that case it is fine to use a
953          * non-resolvable private address.
954          */
955         if (hci_update_random_address(req, !connectable,
956                                       adv_use_rpa(hdev, flags),
957                                       &own_addr_type) < 0)
958                 return;
959 
960         memset(&cp, 0, sizeof(cp));
961         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
962         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
963 
964         if (connectable)
965                 cp.type = LE_ADV_IND;
966         else if (get_cur_adv_instance_scan_rsp_len(hdev))
967                 cp.type = LE_ADV_SCAN_IND;
968         else
969                 cp.type = LE_ADV_NONCONN_IND;
970 
971         cp.own_address_type = own_addr_type;
972         cp.channel_map = hdev->le_adv_channel_map;
973 
974         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
975 
976         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
977 }
978 
979 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
980 {
981         size_t short_len;
982         size_t complete_len;
983 
984         /* no space left for name (+ NULL + type + len) */
985         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
986                 return ad_len;
987 
988         /* use complete name if present and fits */
989         complete_len = strlen(hdev->dev_name);
990         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
991                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
992                                        hdev->dev_name, complete_len + 1);
993 
994         /* use short name if present */
995         short_len = strlen(hdev->short_name);
996         if (short_len)
997                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
998                                        hdev->short_name, short_len + 1);
999 
1000         /* use shortened full name if present, we already know that name
1001          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1002          */
1003         if (complete_len) {
1004                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1005 
1006                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1007                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1008 
1009                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1010                                        sizeof(name));
1011         }
1012 
1013         return ad_len;
1014 }
1015 
1016 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1017 {
1018         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1019 }
1020 
1021 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1022 {
1023         u8 scan_rsp_len = 0;
1024 
1025         if (hdev->appearance) {
1026                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1027         }
1028 
1029         return append_local_name(hdev, ptr, scan_rsp_len);
1030 }
1031 
1032 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1033                                         u8 *ptr)
1034 {
1035         struct adv_info *adv_instance;
1036         u32 instance_flags;
1037         u8 scan_rsp_len = 0;
1038 
1039         adv_instance = hci_find_adv_instance(hdev, instance);
1040         if (!adv_instance)
1041                 return 0;
1042 
1043         instance_flags = adv_instance->flags;
1044 
1045         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1046                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1047         }
1048 
1049         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1050                adv_instance->scan_rsp_len);
1051 
1052         scan_rsp_len += adv_instance->scan_rsp_len;
1053 
1054         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1055                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1056 
1057         return scan_rsp_len;
1058 }
1059 
1060 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1061 {
1062         struct hci_dev *hdev = req->hdev;
1063         struct hci_cp_le_set_scan_rsp_data cp;
1064         u8 len;
1065 
1066         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1067                 return;
1068 
1069         memset(&cp, 0, sizeof(cp));
1070 
1071         if (instance)
1072                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1073         else
1074                 len = create_default_scan_rsp_data(hdev, cp.data);
1075 
1076         if (hdev->scan_rsp_data_len == len &&
1077             !memcmp(cp.data, hdev->scan_rsp_data, len))
1078                 return;
1079 
1080         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1081         hdev->scan_rsp_data_len = len;
1082 
1083         cp.length = len;
1084 
1085         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1086 }
1087 
1088 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1089 {
1090         struct adv_info *adv_instance = NULL;
1091         u8 ad_len = 0, flags = 0;
1092         u32 instance_flags;
1093 
1094         /* Return 0 when the current instance identifier is invalid. */
1095         if (instance) {
1096                 adv_instance = hci_find_adv_instance(hdev, instance);
1097                 if (!adv_instance)
1098                         return 0;
1099         }
1100 
1101         instance_flags = get_adv_instance_flags(hdev, instance);
1102 
1103         /* The Add Advertising command allows userspace to set both the general
1104          * and limited discoverable flags.
1105          */
1106         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1107                 flags |= LE_AD_GENERAL;
1108 
1109         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1110                 flags |= LE_AD_LIMITED;
1111 
1112         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1113                 flags |= LE_AD_NO_BREDR;
1114 
1115         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1116                 /* If a discovery flag wasn't provided, simply use the global
1117                  * settings.
1118                  */
1119                 if (!flags)
1120                         flags |= mgmt_get_adv_discov_flags(hdev);
1121 
1122                 /* If flags would still be empty, then there is no need to
1123                  * include the "Flags" AD field".
1124                  */
1125                 if (flags) {
1126                         ptr[0] = 0x02;
1127                         ptr[1] = EIR_FLAGS;
1128                         ptr[2] = flags;
1129 
1130                         ad_len += 3;
1131                         ptr += 3;
1132                 }
1133         }
1134 
1135         if (adv_instance) {
1136                 memcpy(ptr, adv_instance->adv_data,
1137                        adv_instance->adv_data_len);
1138                 ad_len += adv_instance->adv_data_len;
1139                 ptr += adv_instance->adv_data_len;
1140         }
1141 
1142         /* Provide Tx Power only if we can provide a valid value for it */
1143         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1144             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1145                 ptr[0] = 0x02;
1146                 ptr[1] = EIR_TX_POWER;
1147                 ptr[2] = (u8)hdev->adv_tx_power;
1148 
1149                 ad_len += 3;
1150                 ptr += 3;
1151         }
1152 
1153         return ad_len;
1154 }
1155 
1156 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1157 {
1158         struct hci_dev *hdev = req->hdev;
1159         struct hci_cp_le_set_adv_data cp;
1160         u8 len;
1161 
1162         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1163                 return;
1164 
1165         memset(&cp, 0, sizeof(cp));
1166 
1167         len = create_instance_adv_data(hdev, instance, cp.data);
1168 
1169         /* There's nothing to do if the data hasn't changed */
1170         if (hdev->adv_data_len == len &&
1171             memcmp(cp.data, hdev->adv_data, len) == 0)
1172                 return;
1173 
1174         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1175         hdev->adv_data_len = len;
1176 
1177         cp.length = len;
1178 
1179         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1180 }
1181 
1182 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1183 {
1184         struct hci_request req;
1185 
1186         hci_req_init(&req, hdev);
1187         __hci_req_update_adv_data(&req, instance);
1188 
1189         return hci_req_run(&req, NULL);
1190 }
1191 
1192 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1193 {
1194         BT_DBG("%s status %u", hdev->name, status);
1195 }
1196 
1197 void hci_req_reenable_advertising(struct hci_dev *hdev)
1198 {
1199         struct hci_request req;
1200 
1201         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1202             list_empty(&hdev->adv_instances))
1203                 return;
1204 
1205         hci_req_init(&req, hdev);
1206 
1207         if (hdev->cur_adv_instance) {
1208                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1209                                                 true);
1210         } else {
1211                 __hci_req_update_adv_data(&req, 0x00);
1212                 __hci_req_update_scan_rsp_data(&req, 0x00);
1213                 __hci_req_enable_advertising(&req);
1214         }
1215 
1216         hci_req_run(&req, adv_enable_complete);
1217 }
1218 
1219 static void adv_timeout_expire(struct work_struct *work)
1220 {
1221         struct hci_dev *hdev = container_of(work, struct hci_dev,
1222                                             adv_instance_expire.work);
1223 
1224         struct hci_request req;
1225         u8 instance;
1226 
1227         BT_DBG("%s", hdev->name);
1228 
1229         hci_dev_lock(hdev);
1230 
1231         hdev->adv_instance_timeout = 0;
1232 
1233         instance = hdev->cur_adv_instance;
1234         if (instance == 0x00)
1235                 goto unlock;
1236 
1237         hci_req_init(&req, hdev);
1238 
1239         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1240 
1241         if (list_empty(&hdev->adv_instances))
1242                 __hci_req_disable_advertising(&req);
1243 
1244         hci_req_run(&req, NULL);
1245 
1246 unlock:
1247         hci_dev_unlock(hdev);
1248 }
1249 
1250 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1251                                     bool force)
1252 {
1253         struct hci_dev *hdev = req->hdev;
1254         struct adv_info *adv_instance = NULL;
1255         u16 timeout;
1256 
1257         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1258             list_empty(&hdev->adv_instances))
1259                 return -EPERM;
1260 
1261         if (hdev->adv_instance_timeout)
1262                 return -EBUSY;
1263 
1264         adv_instance = hci_find_adv_instance(hdev, instance);
1265         if (!adv_instance)
1266                 return -ENOENT;
1267 
1268         /* A zero timeout means unlimited advertising. As long as there is
1269          * only one instance, duration should be ignored. We still set a timeout
1270          * in case further instances are being added later on.
1271          *
1272          * If the remaining lifetime of the instance is more than the duration
1273          * then the timeout corresponds to the duration, otherwise it will be
1274          * reduced to the remaining instance lifetime.
1275          */
1276         if (adv_instance->timeout == 0 ||
1277             adv_instance->duration <= adv_instance->remaining_time)
1278                 timeout = adv_instance->duration;
1279         else
1280                 timeout = adv_instance->remaining_time;
1281 
1282         /* The remaining time is being reduced unless the instance is being
1283          * advertised without time limit.
1284          */
1285         if (adv_instance->timeout)
1286                 adv_instance->remaining_time =
1287                                 adv_instance->remaining_time - timeout;
1288 
1289         hdev->adv_instance_timeout = timeout;
1290         queue_delayed_work(hdev->req_workqueue,
1291                            &hdev->adv_instance_expire,
1292                            msecs_to_jiffies(timeout * 1000));
1293 
1294         /* If we're just re-scheduling the same instance again then do not
1295          * execute any HCI commands. This happens when a single instance is
1296          * being advertised.
1297          */
1298         if (!force && hdev->cur_adv_instance == instance &&
1299             hci_dev_test_flag(hdev, HCI_LE_ADV))
1300                 return 0;
1301 
1302         hdev->cur_adv_instance = instance;
1303         __hci_req_update_adv_data(req, instance);
1304         __hci_req_update_scan_rsp_data(req, instance);
1305         __hci_req_enable_advertising(req);
1306 
1307         return 0;
1308 }
1309 
1310 static void cancel_adv_timeout(struct hci_dev *hdev)
1311 {
1312         if (hdev->adv_instance_timeout) {
1313                 hdev->adv_instance_timeout = 0;
1314                 cancel_delayed_work(&hdev->adv_instance_expire);
1315         }
1316 }
1317 
1318 /* For a single instance:
1319  * - force == true: The instance will be removed even when its remaining
1320  *   lifetime is not zero.
1321  * - force == false: the instance will be deactivated but kept stored unless
1322  *   the remaining lifetime is zero.
1323  *
1324  * For instance == 0x00:
1325  * - force == true: All instances will be removed regardless of their timeout
1326  *   setting.
1327  * - force == false: Only instances that have a timeout will be removed.
1328  */
1329 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1330                                 struct hci_request *req, u8 instance,
1331                                 bool force)
1332 {
1333         struct adv_info *adv_instance, *n, *next_instance = NULL;
1334         int err;
1335         u8 rem_inst;
1336 
1337         /* Cancel any timeout concerning the removed instance(s). */
1338         if (!instance || hdev->cur_adv_instance == instance)
1339                 cancel_adv_timeout(hdev);
1340 
1341         /* Get the next instance to advertise BEFORE we remove
1342          * the current one. This can be the same instance again
1343          * if there is only one instance.
1344          */
1345         if (instance && hdev->cur_adv_instance == instance)
1346                 next_instance = hci_get_next_instance(hdev, instance);
1347 
1348         if (instance == 0x00) {
1349                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1350                                          list) {
1351                         if (!(force || adv_instance->timeout))
1352                                 continue;
1353 
1354                         rem_inst = adv_instance->instance;
1355                         err = hci_remove_adv_instance(hdev, rem_inst);
1356                         if (!err)
1357                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1358                 }
1359         } else {
1360                 adv_instance = hci_find_adv_instance(hdev, instance);
1361 
1362                 if (force || (adv_instance && adv_instance->timeout &&
1363                               !adv_instance->remaining_time)) {
1364                         /* Don't advertise a removed instance. */
1365                         if (next_instance &&
1366                             next_instance->instance == instance)
1367                                 next_instance = NULL;
1368 
1369                         err = hci_remove_adv_instance(hdev, instance);
1370                         if (!err)
1371                                 mgmt_advertising_removed(sk, hdev, instance);
1372                 }
1373         }
1374 
1375         if (!req || !hdev_is_powered(hdev) ||
1376             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1377                 return;
1378 
1379         if (next_instance)
1380                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1381                                                 false);
1382 }
1383 
1384 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1385 {
1386         struct hci_dev *hdev = req->hdev;
1387 
1388         /* If we're advertising or initiating an LE connection we can't
1389          * go ahead and change the random address at this time. This is
1390          * because the eventual initiator address used for the
1391          * subsequently created connection will be undefined (some
1392          * controllers use the new address and others the one we had
1393          * when the operation started).
1394          *
1395          * In this kind of scenario skip the update and let the random
1396          * address be updated at the next cycle.
1397          */
1398         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1399             hci_lookup_le_connect(hdev)) {
1400                 BT_DBG("Deferring random address update");
1401                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1402                 return;
1403         }
1404 
1405         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1406 }
1407 
1408 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1409                               bool use_rpa, u8 *own_addr_type)
1410 {
1411         struct hci_dev *hdev = req->hdev;
1412         int err;
1413 
1414         /* If privacy is enabled use a resolvable private address. If
1415          * current RPA has expired or there is something else than
1416          * the current RPA in use, then generate a new one.
1417          */
1418         if (use_rpa) {
1419                 int to;
1420 
1421                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1422 
1423                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1424                     !bacmp(&hdev->random_addr, &hdev->rpa))
1425                         return 0;
1426 
1427                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1428                 if (err < 0) {
1429                         bt_dev_err(hdev, "failed to generate new RPA");
1430                         return err;
1431                 }
1432 
1433                 set_random_addr(req, &hdev->rpa);
1434 
1435                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1436                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1437 
1438                 return 0;
1439         }
1440 
1441         /* In case of required privacy without resolvable private address,
1442          * use an non-resolvable private address. This is useful for active
1443          * scanning and non-connectable advertising.
1444          */
1445         if (require_privacy) {
1446                 bdaddr_t nrpa;
1447 
1448                 while (true) {
1449                         /* The non-resolvable private address is generated
1450                          * from random six bytes with the two most significant
1451                          * bits cleared.
1452                          */
1453                         get_random_bytes(&nrpa, 6);
1454                         nrpa.b[5] &= 0x3f;
1455 
1456                         /* The non-resolvable private address shall not be
1457                          * equal to the public address.
1458                          */
1459                         if (bacmp(&hdev->bdaddr, &nrpa))
1460                                 break;
1461                 }
1462 
1463                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1464                 set_random_addr(req, &nrpa);
1465                 return 0;
1466         }
1467 
1468         /* If forcing static address is in use or there is no public
1469          * address use the static address as random address (but skip
1470          * the HCI command if the current random address is already the
1471          * static one.
1472          *
1473          * In case BR/EDR has been disabled on a dual-mode controller
1474          * and a static address has been configured, then use that
1475          * address instead of the public BR/EDR address.
1476          */
1477         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1478             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1479             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1480              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1481                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1482                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1483                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1484                                     &hdev->static_addr);
1485                 return 0;
1486         }
1487 
1488         /* Neither privacy nor static address is being used so use a
1489          * public address.
1490          */
1491         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1492 
1493         return 0;
1494 }
1495 
1496 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1497 {
1498         struct bdaddr_list *b;
1499 
1500         list_for_each_entry(b, &hdev->whitelist, list) {
1501                 struct hci_conn *conn;
1502 
1503                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1504                 if (!conn)
1505                         return true;
1506 
1507                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1508                         return true;
1509         }
1510 
1511         return false;
1512 }
1513 
1514 void __hci_req_update_scan(struct hci_request *req)
1515 {
1516         struct hci_dev *hdev = req->hdev;
1517         u8 scan;
1518 
1519         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1520                 return;
1521 
1522         if (!hdev_is_powered(hdev))
1523                 return;
1524 
1525         if (mgmt_powering_down(hdev))
1526                 return;
1527 
1528         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1529             disconnected_whitelist_entries(hdev))
1530                 scan = SCAN_PAGE;
1531         else
1532                 scan = SCAN_DISABLED;
1533 
1534         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1535                 scan |= SCAN_INQUIRY;
1536 
1537         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1538             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1539                 return;
1540 
1541         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1542 }
1543 
1544 static int update_scan(struct hci_request *req, unsigned long opt)
1545 {
1546         hci_dev_lock(req->hdev);
1547         __hci_req_update_scan(req);
1548         hci_dev_unlock(req->hdev);
1549         return 0;
1550 }
1551 
1552 static void scan_update_work(struct work_struct *work)
1553 {
1554         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1555 
1556         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1557 }
1558 
1559 static int connectable_update(struct hci_request *req, unsigned long opt)
1560 {
1561         struct hci_dev *hdev = req->hdev;
1562 
1563         hci_dev_lock(hdev);
1564 
1565         __hci_req_update_scan(req);
1566 
1567         /* If BR/EDR is not enabled and we disable advertising as a
1568          * by-product of disabling connectable, we need to update the
1569          * advertising flags.
1570          */
1571         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1572                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1573 
1574         /* Update the advertising parameters if necessary */
1575         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1576             !list_empty(&hdev->adv_instances))
1577                 __hci_req_enable_advertising(req);
1578 
1579         __hci_update_background_scan(req);
1580 
1581         hci_dev_unlock(hdev);
1582 
1583         return 0;
1584 }
1585 
1586 static void connectable_update_work(struct work_struct *work)
1587 {
1588         struct hci_dev *hdev = container_of(work, struct hci_dev,
1589                                             connectable_update);
1590         u8 status;
1591 
1592         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1593         mgmt_set_connectable_complete(hdev, status);
1594 }
1595 
1596 static u8 get_service_classes(struct hci_dev *hdev)
1597 {
1598         struct bt_uuid *uuid;
1599         u8 val = 0;
1600 
1601         list_for_each_entry(uuid, &hdev->uuids, list)
1602                 val |= uuid->svc_hint;
1603 
1604         return val;
1605 }
1606 
1607 void __hci_req_update_class(struct hci_request *req)
1608 {
1609         struct hci_dev *hdev = req->hdev;
1610         u8 cod[3];
1611 
1612         BT_DBG("%s", hdev->name);
1613 
1614         if (!hdev_is_powered(hdev))
1615                 return;
1616 
1617         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1618                 return;
1619 
1620         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1621                 return;
1622 
1623         cod[0] = hdev->minor_class;
1624         cod[1] = hdev->major_class;
1625         cod[2] = get_service_classes(hdev);
1626 
1627         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1628                 cod[1] |= 0x20;
1629 
1630         if (memcmp(cod, hdev->dev_class, 3) == 0)
1631                 return;
1632 
1633         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1634 }
1635 
1636 static void write_iac(struct hci_request *req)
1637 {
1638         struct hci_dev *hdev = req->hdev;
1639         struct hci_cp_write_current_iac_lap cp;
1640 
1641         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1642                 return;
1643 
1644         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1645                 /* Limited discoverable mode */
1646                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1647                 cp.iac_lap[0] = 0x00;   /* LIAC */
1648                 cp.iac_lap[1] = 0x8b;
1649                 cp.iac_lap[2] = 0x9e;
1650                 cp.iac_lap[3] = 0x33;   /* GIAC */
1651                 cp.iac_lap[4] = 0x8b;
1652                 cp.iac_lap[5] = 0x9e;
1653         } else {
1654                 /* General discoverable mode */
1655                 cp.num_iac = 1;
1656                 cp.iac_lap[0] = 0x33;   /* GIAC */
1657                 cp.iac_lap[1] = 0x8b;
1658                 cp.iac_lap[2] = 0x9e;
1659         }
1660 
1661         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1662                     (cp.num_iac * 3) + 1, &cp);
1663 }
1664 
1665 static int discoverable_update(struct hci_request *req, unsigned long opt)
1666 {
1667         struct hci_dev *hdev = req->hdev;
1668 
1669         hci_dev_lock(hdev);
1670 
1671         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1672                 write_iac(req);
1673                 __hci_req_update_scan(req);
1674                 __hci_req_update_class(req);
1675         }
1676 
1677         /* Advertising instances don't use the global discoverable setting, so
1678          * only update AD if advertising was enabled using Set Advertising.
1679          */
1680         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1681                 __hci_req_update_adv_data(req, 0x00);
1682 
1683                 /* Discoverable mode affects the local advertising
1684                  * address in limited privacy mode.
1685                  */
1686                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1687                         __hci_req_enable_advertising(req);
1688         }
1689 
1690         hci_dev_unlock(hdev);
1691 
1692         return 0;
1693 }
1694 
1695 static void discoverable_update_work(struct work_struct *work)
1696 {
1697         struct hci_dev *hdev = container_of(work, struct hci_dev,
1698                                             discoverable_update);
1699         u8 status;
1700 
1701         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1702         mgmt_set_discoverable_complete(hdev, status);
1703 }
1704 
1705 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1706                       u8 reason)
1707 {
1708         switch (conn->state) {
1709         case BT_CONNECTED:
1710         case BT_CONFIG:
1711                 if (conn->type == AMP_LINK) {
1712                         struct hci_cp_disconn_phy_link cp;
1713 
1714                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1715                         cp.reason = reason;
1716                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1717                                     &cp);
1718                 } else {
1719                         struct hci_cp_disconnect dc;
1720 
1721                         dc.handle = cpu_to_le16(conn->handle);
1722                         dc.reason = reason;
1723                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1724                 }
1725 
1726                 conn->state = BT_DISCONN;
1727 
1728                 break;
1729         case BT_CONNECT:
1730                 if (conn->type == LE_LINK) {
1731                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1732                                 break;
1733                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1734                                     0, NULL);
1735                 } else if (conn->type == ACL_LINK) {
1736                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1737                                 break;
1738                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1739                                     6, &conn->dst);
1740                 }
1741                 break;
1742         case BT_CONNECT2:
1743                 if (conn->type == ACL_LINK) {
1744                         struct hci_cp_reject_conn_req rej;
1745 
1746                         bacpy(&rej.bdaddr, &conn->dst);
1747                         rej.reason = reason;
1748 
1749                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1750                                     sizeof(rej), &rej);
1751                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1752                         struct hci_cp_reject_sync_conn_req rej;
1753 
1754                         bacpy(&rej.bdaddr, &conn->dst);
1755 
1756                         /* SCO rejection has its own limited set of
1757                          * allowed error values (0x0D-0x0F) which isn't
1758                          * compatible with most values passed to this
1759                          * function. To be safe hard-code one of the
1760                          * values that's suitable for SCO.
1761                          */
1762                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1763 
1764                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1765                                     sizeof(rej), &rej);
1766                 }
1767                 break;
1768         default:
1769                 conn->state = BT_CLOSED;
1770                 break;
1771         }
1772 }
1773 
1774 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1775 {
1776         if (status)
1777                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1778 }
1779 
1780 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1781 {
1782         struct hci_request req;
1783         int err;
1784 
1785         hci_req_init(&req, conn->hdev);
1786 
1787         __hci_abort_conn(&req, conn, reason);
1788 
1789         err = hci_req_run(&req, abort_conn_complete);
1790         if (err && err != -ENODATA) {
1791                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1792                 return err;
1793         }
1794 
1795         return 0;
1796 }
1797 
1798 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1799 {
1800         hci_dev_lock(req->hdev);
1801         __hci_update_background_scan(req);
1802         hci_dev_unlock(req->hdev);
1803         return 0;
1804 }
1805 
1806 static void bg_scan_update(struct work_struct *work)
1807 {
1808         struct hci_dev *hdev = container_of(work, struct hci_dev,
1809                                             bg_scan_update);
1810         struct hci_conn *conn;
1811         u8 status;
1812         int err;
1813 
1814         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1815         if (!err)
1816                 return;
1817 
1818         hci_dev_lock(hdev);
1819 
1820         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1821         if (conn)
1822                 hci_le_conn_failed(conn, status);
1823 
1824         hci_dev_unlock(hdev);
1825 }
1826 
1827 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1828 {
1829         hci_req_add_le_scan_disable(req);
1830         return 0;
1831 }
1832 
1833 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1834 {
1835         u8 length = opt;
1836         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1837         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1838         struct hci_cp_inquiry cp;
1839 
1840         BT_DBG("%s", req->hdev->name);
1841 
1842         hci_dev_lock(req->hdev);
1843         hci_inquiry_cache_flush(req->hdev);
1844         hci_dev_unlock(req->hdev);
1845 
1846         memset(&cp, 0, sizeof(cp));
1847 
1848         if (req->hdev->discovery.limited)
1849                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1850         else
1851                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1852 
1853         cp.length = length;
1854 
1855         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1856 
1857         return 0;
1858 }
1859 
1860 static void le_scan_disable_work(struct work_struct *work)
1861 {
1862         struct hci_dev *hdev = container_of(work, struct hci_dev,
1863                                             le_scan_disable.work);
1864         u8 status;
1865 
1866         BT_DBG("%s", hdev->name);
1867 
1868         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1869                 return;
1870 
1871         cancel_delayed_work(&hdev->le_scan_restart);
1872 
1873         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1874         if (status) {
1875                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1876                            status);
1877                 return;
1878         }
1879 
1880         hdev->discovery.scan_start = 0;
1881 
1882         /* If we were running LE only scan, change discovery state. If
1883          * we were running both LE and BR/EDR inquiry simultaneously,
1884          * and BR/EDR inquiry is already finished, stop discovery,
1885          * otherwise BR/EDR inquiry will stop discovery when finished.
1886          * If we will resolve remote device name, do not change
1887          * discovery state.
1888          */
1889 
1890         if (hdev->discovery.type == DISCOV_TYPE_LE)
1891                 goto discov_stopped;
1892 
1893         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1894                 return;
1895 
1896         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1897                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1898                     hdev->discovery.state != DISCOVERY_RESOLVING)
1899                         goto discov_stopped;
1900 
1901                 return;
1902         }
1903 
1904         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1905                      HCI_CMD_TIMEOUT, &status);
1906         if (status) {
1907                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
1908                 goto discov_stopped;
1909         }
1910 
1911         return;
1912 
1913 discov_stopped:
1914         hci_dev_lock(hdev);
1915         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1916         hci_dev_unlock(hdev);
1917 }
1918 
1919 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1920 {
1921         struct hci_dev *hdev = req->hdev;
1922         struct hci_cp_le_set_scan_enable cp;
1923 
1924         /* If controller is not scanning we are done. */
1925         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1926                 return 0;
1927 
1928         hci_req_add_le_scan_disable(req);
1929 
1930         memset(&cp, 0, sizeof(cp));
1931         cp.enable = LE_SCAN_ENABLE;
1932         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1933         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1934 
1935         return 0;
1936 }
1937 
1938 static void le_scan_restart_work(struct work_struct *work)
1939 {
1940         struct hci_dev *hdev = container_of(work, struct hci_dev,
1941                                             le_scan_restart.work);
1942         unsigned long timeout, duration, scan_start, now;
1943         u8 status;
1944 
1945         BT_DBG("%s", hdev->name);
1946 
1947         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1948         if (status) {
1949                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
1950                            status);
1951                 return;
1952         }
1953 
1954         hci_dev_lock(hdev);
1955 
1956         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1957             !hdev->discovery.scan_start)
1958                 goto unlock;
1959 
1960         /* When the scan was started, hdev->le_scan_disable has been queued
1961          * after duration from scan_start. During scan restart this job
1962          * has been canceled, and we need to queue it again after proper
1963          * timeout, to make sure that scan does not run indefinitely.
1964          */
1965         duration = hdev->discovery.scan_duration;
1966         scan_start = hdev->discovery.scan_start;
1967         now = jiffies;
1968         if (now - scan_start <= duration) {
1969                 int elapsed;
1970 
1971                 if (now >= scan_start)
1972                         elapsed = now - scan_start;
1973                 else
1974                         elapsed = ULONG_MAX - scan_start + now;
1975 
1976                 timeout = duration - elapsed;
1977         } else {
1978                 timeout = 0;
1979         }
1980 
1981         queue_delayed_work(hdev->req_workqueue,
1982                            &hdev->le_scan_disable, timeout);
1983 
1984 unlock:
1985         hci_dev_unlock(hdev);
1986 }
1987 
1988 static void disable_advertising(struct hci_request *req)
1989 {
1990         u8 enable = 0x00;
1991 
1992         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1993 }
1994 
1995 static int active_scan(struct hci_request *req, unsigned long opt)
1996 {
1997         uint16_t interval = opt;
1998         struct hci_dev *hdev = req->hdev;
1999         struct hci_cp_le_set_scan_param param_cp;
2000         struct hci_cp_le_set_scan_enable enable_cp;
2001         u8 own_addr_type;
2002         int err;
2003 
2004         BT_DBG("%s", hdev->name);
2005 
2006         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2007                 hci_dev_lock(hdev);
2008 
2009                 /* Don't let discovery abort an outgoing connection attempt
2010                  * that's using directed advertising.
2011                  */
2012                 if (hci_lookup_le_connect(hdev)) {
2013                         hci_dev_unlock(hdev);
2014                         return -EBUSY;
2015                 }
2016 
2017                 cancel_adv_timeout(hdev);
2018                 hci_dev_unlock(hdev);
2019 
2020                 disable_advertising(req);
2021         }
2022 
2023         /* If controller is scanning, it means the background scanning is
2024          * running. Thus, we should temporarily stop it in order to set the
2025          * discovery scanning parameters.
2026          */
2027         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2028                 hci_req_add_le_scan_disable(req);
2029 
2030         /* All active scans will be done with either a resolvable private
2031          * address (when privacy feature has been enabled) or non-resolvable
2032          * private address.
2033          */
2034         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2035                                         &own_addr_type);
2036         if (err < 0)
2037                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2038 
2039         memset(&param_cp, 0, sizeof(param_cp));
2040         param_cp.type = LE_SCAN_ACTIVE;
2041         param_cp.interval = cpu_to_le16(interval);
2042         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2043         param_cp.own_address_type = own_addr_type;
2044 
2045         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2046                     &param_cp);
2047 
2048         memset(&enable_cp, 0, sizeof(enable_cp));
2049         enable_cp.enable = LE_SCAN_ENABLE;
2050         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2051 
2052         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2053                     &enable_cp);
2054 
2055         return 0;
2056 }
2057 
2058 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2059 {
2060         int err;
2061 
2062         BT_DBG("%s", req->hdev->name);
2063 
2064         err = active_scan(req, opt);
2065         if (err)
2066                 return err;
2067 
2068         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2069 }
2070 
2071 static void start_discovery(struct hci_dev *hdev, u8 *status)
2072 {
2073         unsigned long timeout;
2074 
2075         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2076 
2077         switch (hdev->discovery.type) {
2078         case DISCOV_TYPE_BREDR:
2079                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2080                         hci_req_sync(hdev, bredr_inquiry,
2081                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2082                                      status);
2083                 return;
2084         case DISCOV_TYPE_INTERLEAVED:
2085                 /* When running simultaneous discovery, the LE scanning time
2086                  * should occupy the whole discovery time sine BR/EDR inquiry
2087                  * and LE scanning are scheduled by the controller.
2088                  *
2089                  * For interleaving discovery in comparison, BR/EDR inquiry
2090                  * and LE scanning are done sequentially with separate
2091                  * timeouts.
2092                  */
2093                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2094                              &hdev->quirks)) {
2095                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2096                         /* During simultaneous discovery, we double LE scan
2097                          * interval. We must leave some time for the controller
2098                          * to do BR/EDR inquiry.
2099                          */
2100                         hci_req_sync(hdev, interleaved_discov,
2101                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2102                                      status);
2103                         break;
2104                 }
2105 
2106                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2107                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2108                              HCI_CMD_TIMEOUT, status);
2109                 break;
2110         case DISCOV_TYPE_LE:
2111                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2112                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2113                              HCI_CMD_TIMEOUT, status);
2114                 break;
2115         default:
2116                 *status = HCI_ERROR_UNSPECIFIED;
2117                 return;
2118         }
2119 
2120         if (*status)
2121                 return;
2122 
2123         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2124 
2125         /* When service discovery is used and the controller has a
2126          * strict duplicate filter, it is important to remember the
2127          * start and duration of the scan. This is required for
2128          * restarting scanning during the discovery phase.
2129          */
2130         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2131                      hdev->discovery.result_filtering) {
2132                 hdev->discovery.scan_start = jiffies;
2133                 hdev->discovery.scan_duration = timeout;
2134         }
2135 
2136         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2137                            timeout);
2138 }
2139 
2140 bool hci_req_stop_discovery(struct hci_request *req)
2141 {
2142         struct hci_dev *hdev = req->hdev;
2143         struct discovery_state *d = &hdev->discovery;
2144         struct hci_cp_remote_name_req_cancel cp;
2145         struct inquiry_entry *e;
2146         bool ret = false;
2147 
2148         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2149 
2150         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2151                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2152                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2153 
2154                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2155                         cancel_delayed_work(&hdev->le_scan_disable);
2156                         hci_req_add_le_scan_disable(req);
2157                 }
2158 
2159                 ret = true;
2160         } else {
2161                 /* Passive scanning */
2162                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2163                         hci_req_add_le_scan_disable(req);
2164                         ret = true;
2165                 }
2166         }
2167 
2168         /* No further actions needed for LE-only discovery */
2169         if (d->type == DISCOV_TYPE_LE)
2170                 return ret;
2171 
2172         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2173                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2174                                                      NAME_PENDING);
2175                 if (!e)
2176                         return ret;
2177 
2178                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2179                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2180                             &cp);
2181                 ret = true;
2182         }
2183 
2184         return ret;
2185 }
2186 
2187 static int stop_discovery(struct hci_request *req, unsigned long opt)
2188 {
2189         hci_dev_lock(req->hdev);
2190         hci_req_stop_discovery(req);
2191         hci_dev_unlock(req->hdev);
2192 
2193         return 0;
2194 }
2195 
2196 static void discov_update(struct work_struct *work)
2197 {
2198         struct hci_dev *hdev = container_of(work, struct hci_dev,
2199                                             discov_update);
2200         u8 status = 0;
2201 
2202         switch (hdev->discovery.state) {
2203         case DISCOVERY_STARTING:
2204                 start_discovery(hdev, &status);
2205                 mgmt_start_discovery_complete(hdev, status);
2206                 if (status)
2207                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2208                 else
2209                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2210                 break;
2211         case DISCOVERY_STOPPING:
2212                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2213                 mgmt_stop_discovery_complete(hdev, status);
2214                 if (!status)
2215                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2216                 break;
2217         case DISCOVERY_STOPPED:
2218         default:
2219                 return;
2220         }
2221 }
2222 
2223 static void discov_off(struct work_struct *work)
2224 {
2225         struct hci_dev *hdev = container_of(work, struct hci_dev,
2226                                             discov_off.work);
2227 
2228         BT_DBG("%s", hdev->name);
2229 
2230         hci_dev_lock(hdev);
2231 
2232         /* When discoverable timeout triggers, then just make sure
2233          * the limited discoverable flag is cleared. Even in the case
2234          * of a timeout triggered from general discoverable, it is
2235          * safe to unconditionally clear the flag.
2236          */
2237         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2238         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2239         hdev->discov_timeout = 0;
2240 
2241         hci_dev_unlock(hdev);
2242 
2243         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2244         mgmt_new_settings(hdev);
2245 }
2246 
2247 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2248 {
2249         struct hci_dev *hdev = req->hdev;
2250         u8 link_sec;
2251 
2252         hci_dev_lock(hdev);
2253 
2254         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2255             !lmp_host_ssp_capable(hdev)) {
2256                 u8 mode = 0x01;
2257 
2258                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2259 
2260                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2261                         u8 support = 0x01;
2262 
2263                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2264                                     sizeof(support), &support);
2265                 }
2266         }
2267 
2268         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2269             lmp_bredr_capable(hdev)) {
2270                 struct hci_cp_write_le_host_supported cp;
2271 
2272                 cp.le = 0x01;
2273                 cp.simul = 0x00;
2274 
2275                 /* Check first if we already have the right
2276                  * host state (host features set)
2277                  */
2278                 if (cp.le != lmp_host_le_capable(hdev) ||
2279                     cp.simul != lmp_host_le_br_capable(hdev))
2280                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2281                                     sizeof(cp), &cp);
2282         }
2283 
2284         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2285                 /* Make sure the controller has a good default for
2286                  * advertising data. This also applies to the case
2287                  * where BR/EDR was toggled during the AUTO_OFF phase.
2288                  */
2289                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2290                     list_empty(&hdev->adv_instances)) {
2291                         __hci_req_update_adv_data(req, 0x00);
2292                         __hci_req_update_scan_rsp_data(req, 0x00);
2293 
2294                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2295                                 __hci_req_enable_advertising(req);
2296                 } else if (!list_empty(&hdev->adv_instances)) {
2297                         struct adv_info *adv_instance;
2298 
2299                         adv_instance = list_first_entry(&hdev->adv_instances,
2300                                                         struct adv_info, list);
2301                         __hci_req_schedule_adv_instance(req,
2302                                                         adv_instance->instance,
2303                                                         true);
2304                 }
2305         }
2306 
2307         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2308         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2309                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2310                             sizeof(link_sec), &link_sec);
2311 
2312         if (lmp_bredr_capable(hdev)) {
2313                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2314                         __hci_req_write_fast_connectable(req, true);
2315                 else
2316                         __hci_req_write_fast_connectable(req, false);
2317                 __hci_req_update_scan(req);
2318                 __hci_req_update_class(req);
2319                 __hci_req_update_name(req);
2320                 __hci_req_update_eir(req);
2321         }
2322 
2323         hci_dev_unlock(hdev);
2324         return 0;
2325 }
2326 
2327 int __hci_req_hci_power_on(struct hci_dev *hdev)
2328 {
2329         /* Register the available SMP channels (BR/EDR and LE) only when
2330          * successfully powering on the controller. This late
2331          * registration is required so that LE SMP can clearly decide if
2332          * the public address or static address is used.
2333          */
2334         smp_register(hdev);
2335 
2336         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2337                               NULL);
2338 }
2339 
2340 void hci_request_setup(struct hci_dev *hdev)
2341 {
2342         INIT_WORK(&hdev->discov_update, discov_update);
2343         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2344         INIT_WORK(&hdev->scan_update, scan_update_work);
2345         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2346         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2347         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2348         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2349         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2350         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2351 }
2352 
2353 void hci_request_cancel_all(struct hci_dev *hdev)
2354 {
2355         hci_req_sync_cancel(hdev, ENODEV);
2356 
2357         cancel_work_sync(&hdev->discov_update);
2358         cancel_work_sync(&hdev->bg_scan_update);
2359         cancel_work_sync(&hdev->scan_update);
2360         cancel_work_sync(&hdev->connectable_update);
2361         cancel_work_sync(&hdev->discoverable_update);
2362         cancel_delayed_work_sync(&hdev->discov_off);
2363         cancel_delayed_work_sync(&hdev->le_scan_disable);
2364         cancel_delayed_work_sync(&hdev->le_scan_restart);
2365 
2366         if (hdev->adv_instance_timeout) {
2367                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2368                 hdev->adv_instance_timeout = 0;
2369         }
2370 }
2371 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp