~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_request.c

Version: ~ [ linux-4.17-rc6 ] ~ [ linux-4.16.10 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.42 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.101 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.132 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.51 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.109 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.56 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.101 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3 
  4    Copyright (C) 2014 Intel Corporation
  5 
  6    This program is free software; you can redistribute it and/or modify
  7    it under the terms of the GNU General Public License version 2 as
  8    published by the Free Software Foundation;
  9 
 10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 18 
 19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 21    SOFTWARE IS DISCLAIMED.
 22 */
 23 
 24 #include <linux/sched/signal.h>
 25 
 26 #include <net/bluetooth/bluetooth.h>
 27 #include <net/bluetooth/hci_core.h>
 28 #include <net/bluetooth/mgmt.h>
 29 
 30 #include "smp.h"
 31 #include "hci_request.h"
 32 
 33 #define HCI_REQ_DONE      0
 34 #define HCI_REQ_PEND      1
 35 #define HCI_REQ_CANCELED  2
 36 
 37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
 38 {
 39         skb_queue_head_init(&req->cmd_q);
 40         req->hdev = hdev;
 41         req->err = 0;
 42 }
 43 
 44 void hci_req_purge(struct hci_request *req)
 45 {
 46         skb_queue_purge(&req->cmd_q);
 47 }
 48 
 49 static int req_run(struct hci_request *req, hci_req_complete_t complete,
 50                    hci_req_complete_skb_t complete_skb)
 51 {
 52         struct hci_dev *hdev = req->hdev;
 53         struct sk_buff *skb;
 54         unsigned long flags;
 55 
 56         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
 57 
 58         /* If an error occurred during request building, remove all HCI
 59          * commands queued on the HCI request queue.
 60          */
 61         if (req->err) {
 62                 skb_queue_purge(&req->cmd_q);
 63                 return req->err;
 64         }
 65 
 66         /* Do not allow empty requests */
 67         if (skb_queue_empty(&req->cmd_q))
 68                 return -ENODATA;
 69 
 70         skb = skb_peek_tail(&req->cmd_q);
 71         if (complete) {
 72                 bt_cb(skb)->hci.req_complete = complete;
 73         } else if (complete_skb) {
 74                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
 75                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
 76         }
 77 
 78         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
 79         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
 80         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
 81 
 82         queue_work(hdev->workqueue, &hdev->cmd_work);
 83 
 84         return 0;
 85 }
 86 
 87 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
 88 {
 89         return req_run(req, complete, NULL);
 90 }
 91 
 92 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
 93 {
 94         return req_run(req, NULL, complete);
 95 }
 96 
 97 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 98                                   struct sk_buff *skb)
 99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101 
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 if (skb)
106                         hdev->req_skb = skb_get(skb);
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110 
111 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
112 {
113         BT_DBG("%s err 0x%2.2x", hdev->name, err);
114 
115         if (hdev->req_status == HCI_REQ_PEND) {
116                 hdev->req_result = err;
117                 hdev->req_status = HCI_REQ_CANCELED;
118                 wake_up_interruptible(&hdev->req_wait_q);
119         }
120 }
121 
122 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123                                   const void *param, u8 event, u32 timeout)
124 {
125         DECLARE_WAITQUEUE(wait, current);
126         struct hci_request req;
127         struct sk_buff *skb;
128         int err = 0;
129 
130         BT_DBG("%s", hdev->name);
131 
132         hci_req_init(&req, hdev);
133 
134         hci_req_add_ev(&req, opcode, plen, param, event);
135 
136         hdev->req_status = HCI_REQ_PEND;
137 
138         add_wait_queue(&hdev->req_wait_q, &wait);
139         set_current_state(TASK_INTERRUPTIBLE);
140 
141         err = hci_req_run_skb(&req, hci_req_sync_complete);
142         if (err < 0) {
143                 remove_wait_queue(&hdev->req_wait_q, &wait);
144                 set_current_state(TASK_RUNNING);
145                 return ERR_PTR(err);
146         }
147 
148         schedule_timeout(timeout);
149 
150         remove_wait_queue(&hdev->req_wait_q, &wait);
151 
152         if (signal_pending(current))
153                 return ERR_PTR(-EINTR);
154 
155         switch (hdev->req_status) {
156         case HCI_REQ_DONE:
157                 err = -bt_to_errno(hdev->req_result);
158                 break;
159 
160         case HCI_REQ_CANCELED:
161                 err = -hdev->req_result;
162                 break;
163 
164         default:
165                 err = -ETIMEDOUT;
166                 break;
167         }
168 
169         hdev->req_status = hdev->req_result = 0;
170         skb = hdev->req_skb;
171         hdev->req_skb = NULL;
172 
173         BT_DBG("%s end: err %d", hdev->name, err);
174 
175         if (err < 0) {
176                 kfree_skb(skb);
177                 return ERR_PTR(err);
178         }
179 
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182 
183         return skb;
184 }
185 EXPORT_SYMBOL(__hci_cmd_sync_ev);
186 
187 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
188                                const void *param, u32 timeout)
189 {
190         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
191 }
192 EXPORT_SYMBOL(__hci_cmd_sync);
193 
194 /* Execute request and wait for completion. */
195 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
196                                                      unsigned long opt),
197                    unsigned long opt, u32 timeout, u8 *hci_status)
198 {
199         struct hci_request req;
200         DECLARE_WAITQUEUE(wait, current);
201         int err = 0;
202 
203         BT_DBG("%s start", hdev->name);
204 
205         hci_req_init(&req, hdev);
206 
207         hdev->req_status = HCI_REQ_PEND;
208 
209         err = func(&req, opt);
210         if (err) {
211                 if (hci_status)
212                         *hci_status = HCI_ERROR_UNSPECIFIED;
213                 return err;
214         }
215 
216         add_wait_queue(&hdev->req_wait_q, &wait);
217         set_current_state(TASK_INTERRUPTIBLE);
218 
219         err = hci_req_run_skb(&req, hci_req_sync_complete);
220         if (err < 0) {
221                 hdev->req_status = 0;
222 
223                 remove_wait_queue(&hdev->req_wait_q, &wait);
224                 set_current_state(TASK_RUNNING);
225 
226                 /* ENODATA means the HCI request command queue is empty.
227                  * This can happen when a request with conditionals doesn't
228                  * trigger any commands to be sent. This is normal behavior
229                  * and should not trigger an error return.
230                  */
231                 if (err == -ENODATA) {
232                         if (hci_status)
233                                 *hci_status = 0;
234                         return 0;
235                 }
236 
237                 if (hci_status)
238                         *hci_status = HCI_ERROR_UNSPECIFIED;
239 
240                 return err;
241         }
242 
243         schedule_timeout(timeout);
244 
245         remove_wait_queue(&hdev->req_wait_q, &wait);
246 
247         if (signal_pending(current))
248                 return -EINTR;
249 
250         switch (hdev->req_status) {
251         case HCI_REQ_DONE:
252                 err = -bt_to_errno(hdev->req_result);
253                 if (hci_status)
254                         *hci_status = hdev->req_result;
255                 break;
256 
257         case HCI_REQ_CANCELED:
258                 err = -hdev->req_result;
259                 if (hci_status)
260                         *hci_status = HCI_ERROR_UNSPECIFIED;
261                 break;
262 
263         default:
264                 err = -ETIMEDOUT;
265                 if (hci_status)
266                         *hci_status = HCI_ERROR_UNSPECIFIED;
267                 break;
268         }
269 
270         kfree_skb(hdev->req_skb);
271         hdev->req_skb = NULL;
272         hdev->req_status = hdev->req_result = 0;
273 
274         BT_DBG("%s end: err %d", hdev->name, err);
275 
276         return err;
277 }
278 
279 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
280                                                   unsigned long opt),
281                  unsigned long opt, u32 timeout, u8 *hci_status)
282 {
283         int ret;
284 
285         if (!test_bit(HCI_UP, &hdev->flags))
286                 return -ENETDOWN;
287 
288         /* Serialize all requests */
289         hci_req_sync_lock(hdev);
290         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
291         hci_req_sync_unlock(hdev);
292 
293         return ret;
294 }
295 
296 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
297                                 const void *param)
298 {
299         int len = HCI_COMMAND_HDR_SIZE + plen;
300         struct hci_command_hdr *hdr;
301         struct sk_buff *skb;
302 
303         skb = bt_skb_alloc(len, GFP_ATOMIC);
304         if (!skb)
305                 return NULL;
306 
307         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
308         hdr->opcode = cpu_to_le16(opcode);
309         hdr->plen   = plen;
310 
311         if (plen)
312                 skb_put_data(skb, param, plen);
313 
314         BT_DBG("skb len %d", skb->len);
315 
316         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
317         hci_skb_opcode(skb) = opcode;
318 
319         return skb;
320 }
321 
322 /* Queue a command to an asynchronous HCI request */
323 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
324                     const void *param, u8 event)
325 {
326         struct hci_dev *hdev = req->hdev;
327         struct sk_buff *skb;
328 
329         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
330 
331         /* If an error occurred during request building, there is no point in
332          * queueing the HCI command. We can simply return.
333          */
334         if (req->err)
335                 return;
336 
337         skb = hci_prepare_cmd(hdev, opcode, plen, param);
338         if (!skb) {
339                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
340                            opcode);
341                 req->err = -ENOMEM;
342                 return;
343         }
344 
345         if (skb_queue_empty(&req->cmd_q))
346                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
347 
348         bt_cb(skb)->hci.req_event = event;
349 
350         skb_queue_tail(&req->cmd_q, skb);
351 }
352 
353 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
354                  const void *param)
355 {
356         hci_req_add_ev(req, opcode, plen, param, 0);
357 }
358 
359 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
360 {
361         struct hci_dev *hdev = req->hdev;
362         struct hci_cp_write_page_scan_activity acp;
363         u8 type;
364 
365         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
366                 return;
367 
368         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
369                 return;
370 
371         if (enable) {
372                 type = PAGE_SCAN_TYPE_INTERLACED;
373 
374                 /* 160 msec page scan interval */
375                 acp.interval = cpu_to_le16(0x0100);
376         } else {
377                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
378 
379                 /* default 1.28 sec page scan */
380                 acp.interval = cpu_to_le16(0x0800);
381         }
382 
383         acp.window = cpu_to_le16(0x0012);
384 
385         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
386             __cpu_to_le16(hdev->page_scan_window) != acp.window)
387                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
388                             sizeof(acp), &acp);
389 
390         if (hdev->page_scan_type != type)
391                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
392 }
393 
394 /* This function controls the background scanning based on hdev->pend_le_conns
395  * list. If there are pending LE connection we start the background scanning,
396  * otherwise we stop it.
397  *
398  * This function requires the caller holds hdev->lock.
399  */
400 static void __hci_update_background_scan(struct hci_request *req)
401 {
402         struct hci_dev *hdev = req->hdev;
403 
404         if (!test_bit(HCI_UP, &hdev->flags) ||
405             test_bit(HCI_INIT, &hdev->flags) ||
406             hci_dev_test_flag(hdev, HCI_SETUP) ||
407             hci_dev_test_flag(hdev, HCI_CONFIG) ||
408             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
409             hci_dev_test_flag(hdev, HCI_UNREGISTER))
410                 return;
411 
412         /* No point in doing scanning if LE support hasn't been enabled */
413         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
414                 return;
415 
416         /* If discovery is active don't interfere with it */
417         if (hdev->discovery.state != DISCOVERY_STOPPED)
418                 return;
419 
420         /* Reset RSSI and UUID filters when starting background scanning
421          * since these filters are meant for service discovery only.
422          *
423          * The Start Discovery and Start Service Discovery operations
424          * ensure to set proper values for RSSI threshold and UUID
425          * filter list. So it is safe to just reset them here.
426          */
427         hci_discovery_filter_clear(hdev);
428 
429         if (list_empty(&hdev->pend_le_conns) &&
430             list_empty(&hdev->pend_le_reports)) {
431                 /* If there is no pending LE connections or devices
432                  * to be scanned for, we should stop the background
433                  * scanning.
434                  */
435 
436                 /* If controller is not scanning we are done. */
437                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
438                         return;
439 
440                 hci_req_add_le_scan_disable(req);
441 
442                 BT_DBG("%s stopping background scanning", hdev->name);
443         } else {
444                 /* If there is at least one pending LE connection, we should
445                  * keep the background scan running.
446                  */
447 
448                 /* If controller is connecting, we should not start scanning
449                  * since some controllers are not able to scan and connect at
450                  * the same time.
451                  */
452                 if (hci_lookup_le_connect(hdev))
453                         return;
454 
455                 /* If controller is currently scanning, we stop it to ensure we
456                  * don't miss any advertising (due to duplicates filter).
457                  */
458                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
459                         hci_req_add_le_scan_disable(req);
460 
461                 hci_req_add_le_passive_scan(req);
462 
463                 BT_DBG("%s starting background scanning", hdev->name);
464         }
465 }
466 
467 void __hci_req_update_name(struct hci_request *req)
468 {
469         struct hci_dev *hdev = req->hdev;
470         struct hci_cp_write_local_name cp;
471 
472         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
473 
474         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
475 }
476 
477 #define PNP_INFO_SVCLASS_ID             0x1200
478 
479 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
480 {
481         u8 *ptr = data, *uuids_start = NULL;
482         struct bt_uuid *uuid;
483 
484         if (len < 4)
485                 return ptr;
486 
487         list_for_each_entry(uuid, &hdev->uuids, list) {
488                 u16 uuid16;
489 
490                 if (uuid->size != 16)
491                         continue;
492 
493                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
494                 if (uuid16 < 0x1100)
495                         continue;
496 
497                 if (uuid16 == PNP_INFO_SVCLASS_ID)
498                         continue;
499 
500                 if (!uuids_start) {
501                         uuids_start = ptr;
502                         uuids_start[0] = 1;
503                         uuids_start[1] = EIR_UUID16_ALL;
504                         ptr += 2;
505                 }
506 
507                 /* Stop if not enough space to put next UUID */
508                 if ((ptr - data) + sizeof(u16) > len) {
509                         uuids_start[1] = EIR_UUID16_SOME;
510                         break;
511                 }
512 
513                 *ptr++ = (uuid16 & 0x00ff);
514                 *ptr++ = (uuid16 & 0xff00) >> 8;
515                 uuids_start[0] += sizeof(uuid16);
516         }
517 
518         return ptr;
519 }
520 
521 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
522 {
523         u8 *ptr = data, *uuids_start = NULL;
524         struct bt_uuid *uuid;
525 
526         if (len < 6)
527                 return ptr;
528 
529         list_for_each_entry(uuid, &hdev->uuids, list) {
530                 if (uuid->size != 32)
531                         continue;
532 
533                 if (!uuids_start) {
534                         uuids_start = ptr;
535                         uuids_start[0] = 1;
536                         uuids_start[1] = EIR_UUID32_ALL;
537                         ptr += 2;
538                 }
539 
540                 /* Stop if not enough space to put next UUID */
541                 if ((ptr - data) + sizeof(u32) > len) {
542                         uuids_start[1] = EIR_UUID32_SOME;
543                         break;
544                 }
545 
546                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
547                 ptr += sizeof(u32);
548                 uuids_start[0] += sizeof(u32);
549         }
550 
551         return ptr;
552 }
553 
554 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
555 {
556         u8 *ptr = data, *uuids_start = NULL;
557         struct bt_uuid *uuid;
558 
559         if (len < 18)
560                 return ptr;
561 
562         list_for_each_entry(uuid, &hdev->uuids, list) {
563                 if (uuid->size != 128)
564                         continue;
565 
566                 if (!uuids_start) {
567                         uuids_start = ptr;
568                         uuids_start[0] = 1;
569                         uuids_start[1] = EIR_UUID128_ALL;
570                         ptr += 2;
571                 }
572 
573                 /* Stop if not enough space to put next UUID */
574                 if ((ptr - data) + 16 > len) {
575                         uuids_start[1] = EIR_UUID128_SOME;
576                         break;
577                 }
578 
579                 memcpy(ptr, uuid->uuid, 16);
580                 ptr += 16;
581                 uuids_start[0] += 16;
582         }
583 
584         return ptr;
585 }
586 
587 static void create_eir(struct hci_dev *hdev, u8 *data)
588 {
589         u8 *ptr = data;
590         size_t name_len;
591 
592         name_len = strlen(hdev->dev_name);
593 
594         if (name_len > 0) {
595                 /* EIR Data type */
596                 if (name_len > 48) {
597                         name_len = 48;
598                         ptr[1] = EIR_NAME_SHORT;
599                 } else
600                         ptr[1] = EIR_NAME_COMPLETE;
601 
602                 /* EIR Data length */
603                 ptr[0] = name_len + 1;
604 
605                 memcpy(ptr + 2, hdev->dev_name, name_len);
606 
607                 ptr += (name_len + 2);
608         }
609 
610         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
611                 ptr[0] = 2;
612                 ptr[1] = EIR_TX_POWER;
613                 ptr[2] = (u8) hdev->inq_tx_power;
614 
615                 ptr += 3;
616         }
617 
618         if (hdev->devid_source > 0) {
619                 ptr[0] = 9;
620                 ptr[1] = EIR_DEVICE_ID;
621 
622                 put_unaligned_le16(hdev->devid_source, ptr + 2);
623                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
624                 put_unaligned_le16(hdev->devid_product, ptr + 6);
625                 put_unaligned_le16(hdev->devid_version, ptr + 8);
626 
627                 ptr += 10;
628         }
629 
630         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
631         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
632         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
633 }
634 
635 void __hci_req_update_eir(struct hci_request *req)
636 {
637         struct hci_dev *hdev = req->hdev;
638         struct hci_cp_write_eir cp;
639 
640         if (!hdev_is_powered(hdev))
641                 return;
642 
643         if (!lmp_ext_inq_capable(hdev))
644                 return;
645 
646         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
647                 return;
648 
649         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
650                 return;
651 
652         memset(&cp, 0, sizeof(cp));
653 
654         create_eir(hdev, cp.data);
655 
656         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
657                 return;
658 
659         memcpy(hdev->eir, cp.data, sizeof(cp.data));
660 
661         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
662 }
663 
664 void hci_req_add_le_scan_disable(struct hci_request *req)
665 {
666         struct hci_cp_le_set_scan_enable cp;
667 
668         memset(&cp, 0, sizeof(cp));
669         cp.enable = LE_SCAN_DISABLE;
670         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
671 }
672 
673 static void add_to_white_list(struct hci_request *req,
674                               struct hci_conn_params *params)
675 {
676         struct hci_cp_le_add_to_white_list cp;
677 
678         cp.bdaddr_type = params->addr_type;
679         bacpy(&cp.bdaddr, &params->addr);
680 
681         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682 }
683 
684 static u8 update_white_list(struct hci_request *req)
685 {
686         struct hci_dev *hdev = req->hdev;
687         struct hci_conn_params *params;
688         struct bdaddr_list *b;
689         uint8_t white_list_entries = 0;
690 
691         /* Go through the current white list programmed into the
692          * controller one by one and check if that address is still
693          * in the list of pending connections or list of devices to
694          * report. If not present in either list, then queue the
695          * command to remove it from the controller.
696          */
697         list_for_each_entry(b, &hdev->le_white_list, list) {
698                 /* If the device is neither in pend_le_conns nor
699                  * pend_le_reports then remove it from the whitelist.
700                  */
701                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702                                                &b->bdaddr, b->bdaddr_type) &&
703                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704                                                &b->bdaddr, b->bdaddr_type)) {
705                         struct hci_cp_le_del_from_white_list cp;
706 
707                         cp.bdaddr_type = b->bdaddr_type;
708                         bacpy(&cp.bdaddr, &b->bdaddr);
709 
710                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711                                     sizeof(cp), &cp);
712                         continue;
713                 }
714 
715                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716                         /* White list can not be used with RPAs */
717                         return 0x00;
718                 }
719 
720                 white_list_entries++;
721         }
722 
723         /* Since all no longer valid white list entries have been
724          * removed, walk through the list of pending connections
725          * and ensure that any new device gets programmed into
726          * the controller.
727          *
728          * If the list of the devices is larger than the list of
729          * available white list entries in the controller, then
730          * just abort and return filer policy value to not use the
731          * white list.
732          */
733         list_for_each_entry(params, &hdev->pend_le_conns, action) {
734                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735                                            &params->addr, params->addr_type))
736                         continue;
737 
738                 if (white_list_entries >= hdev->le_white_list_size) {
739                         /* Select filter policy to accept all advertising */
740                         return 0x00;
741                 }
742 
743                 if (hci_find_irk_by_addr(hdev, &params->addr,
744                                          params->addr_type)) {
745                         /* White list can not be used with RPAs */
746                         return 0x00;
747                 }
748 
749                 white_list_entries++;
750                 add_to_white_list(req, params);
751         }
752 
753         /* After adding all new pending connections, walk through
754          * the list of pending reports and also add these to the
755          * white list if there is still space.
756          */
757         list_for_each_entry(params, &hdev->pend_le_reports, action) {
758                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759                                            &params->addr, params->addr_type))
760                         continue;
761 
762                 if (white_list_entries >= hdev->le_white_list_size) {
763                         /* Select filter policy to accept all advertising */
764                         return 0x00;
765                 }
766 
767                 if (hci_find_irk_by_addr(hdev, &params->addr,
768                                          params->addr_type)) {
769                         /* White list can not be used with RPAs */
770                         return 0x00;
771                 }
772 
773                 white_list_entries++;
774                 add_to_white_list(req, params);
775         }
776 
777         /* Select filter policy to use white list */
778         return 0x01;
779 }
780 
781 static bool scan_use_rpa(struct hci_dev *hdev)
782 {
783         return hci_dev_test_flag(hdev, HCI_PRIVACY);
784 }
785 
786 void hci_req_add_le_passive_scan(struct hci_request *req)
787 {
788         struct hci_cp_le_set_scan_param param_cp;
789         struct hci_cp_le_set_scan_enable enable_cp;
790         struct hci_dev *hdev = req->hdev;
791         u8 own_addr_type;
792         u8 filter_policy;
793 
794         /* Set require_privacy to false since no SCAN_REQ are send
795          * during passive scanning. Not using an non-resolvable address
796          * here is important so that peer devices using direct
797          * advertising with our address will be correctly reported
798          * by the controller.
799          */
800         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
801                                       &own_addr_type))
802                 return;
803 
804         /* Adding or removing entries from the white list must
805          * happen before enabling scanning. The controller does
806          * not allow white list modification while scanning.
807          */
808         filter_policy = update_white_list(req);
809 
810         /* When the controller is using random resolvable addresses and
811          * with that having LE privacy enabled, then controllers with
812          * Extended Scanner Filter Policies support can now enable support
813          * for handling directed advertising.
814          *
815          * So instead of using filter polices 0x00 (no whitelist)
816          * and 0x01 (whitelist enabled) use the new filter policies
817          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
818          */
819         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
820             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
821                 filter_policy |= 0x02;
822 
823         memset(&param_cp, 0, sizeof(param_cp));
824         param_cp.type = LE_SCAN_PASSIVE;
825         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
826         param_cp.window = cpu_to_le16(hdev->le_scan_window);
827         param_cp.own_address_type = own_addr_type;
828         param_cp.filter_policy = filter_policy;
829         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
830                     &param_cp);
831 
832         memset(&enable_cp, 0, sizeof(enable_cp));
833         enable_cp.enable = LE_SCAN_ENABLE;
834         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
836                     &enable_cp);
837 }
838 
839 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
840 {
841         u8 instance = hdev->cur_adv_instance;
842         struct adv_info *adv_instance;
843 
844         /* Ignore instance 0 */
845         if (instance == 0x00)
846                 return 0;
847 
848         adv_instance = hci_find_adv_instance(hdev, instance);
849         if (!adv_instance)
850                 return 0;
851 
852         /* TODO: Take into account the "appearance" and "local-name" flags here.
853          * These are currently being ignored as they are not supported.
854          */
855         return adv_instance->scan_rsp_len;
856 }
857 
858 void __hci_req_disable_advertising(struct hci_request *req)
859 {
860         u8 enable = 0x00;
861 
862         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
863 }
864 
865 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
866 {
867         u32 flags;
868         struct adv_info *adv_instance;
869 
870         if (instance == 0x00) {
871                 /* Instance 0 always manages the "Tx Power" and "Flags"
872                  * fields
873                  */
874                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
875 
876                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
877                  * corresponds to the "connectable" instance flag.
878                  */
879                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
880                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
881 
882                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
883                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
884                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885                         flags |= MGMT_ADV_FLAG_DISCOV;
886 
887                 return flags;
888         }
889 
890         adv_instance = hci_find_adv_instance(hdev, instance);
891 
892         /* Return 0 when we got an invalid instance identifier. */
893         if (!adv_instance)
894                 return 0;
895 
896         return adv_instance->flags;
897 }
898 
899 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
900 {
901         /* If privacy is not enabled don't use RPA */
902         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
903                 return false;
904 
905         /* If basic privacy mode is enabled use RPA */
906         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
907                 return true;
908 
909         /* If limited privacy mode is enabled don't use RPA if we're
910          * both discoverable and bondable.
911          */
912         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
913             hci_dev_test_flag(hdev, HCI_BONDABLE))
914                 return false;
915 
916         /* We're neither bondable nor discoverable in the limited
917          * privacy mode, therefore use RPA.
918          */
919         return true;
920 }
921 
922 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
923 {
924         /* If there is no connection we are OK to advertise. */
925         if (hci_conn_num(hdev, LE_LINK) == 0)
926                 return true;
927 
928         /* Check le_states if there is any connection in slave role. */
929         if (hdev->conn_hash.le_num_slave > 0) {
930                 /* Slave connection state and non connectable mode bit 20. */
931                 if (!connectable && !(hdev->le_states[2] & 0x10))
932                         return false;
933 
934                 /* Slave connection state and connectable mode bit 38
935                  * and scannable bit 21.
936                  */
937                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
938                                     !(hdev->le_states[2] & 0x20)))
939                         return false;
940         }
941 
942         /* Check le_states if there is any connection in master role. */
943         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
944                 /* Master connection state and non connectable mode bit 18. */
945                 if (!connectable && !(hdev->le_states[2] & 0x02))
946                         return false;
947 
948                 /* Master connection state and connectable mode bit 35 and
949                  * scannable 19.
950                  */
951                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
952                                     !(hdev->le_states[2] & 0x08)))
953                         return false;
954         }
955 
956         return true;
957 }
958 
959 void __hci_req_enable_advertising(struct hci_request *req)
960 {
961         struct hci_dev *hdev = req->hdev;
962         struct hci_cp_le_set_adv_param cp;
963         u8 own_addr_type, enable = 0x01;
964         bool connectable;
965         u32 flags;
966 
967         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
968 
969         /* If the "connectable" instance flag was not set, then choose between
970          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
971          */
972         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
973                       mgmt_get_connectable(hdev);
974 
975         if (!is_advertising_allowed(hdev, connectable))
976                 return;
977 
978         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
979                 __hci_req_disable_advertising(req);
980 
981         /* Clear the HCI_LE_ADV bit temporarily so that the
982          * hci_update_random_address knows that it's safe to go ahead
983          * and write a new random address. The flag will be set back on
984          * as soon as the SET_ADV_ENABLE HCI command completes.
985          */
986         hci_dev_clear_flag(hdev, HCI_LE_ADV);
987 
988         /* Set require_privacy to true only when non-connectable
989          * advertising is used. In that case it is fine to use a
990          * non-resolvable private address.
991          */
992         if (hci_update_random_address(req, !connectable,
993                                       adv_use_rpa(hdev, flags),
994                                       &own_addr_type) < 0)
995                 return;
996 
997         memset(&cp, 0, sizeof(cp));
998         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
999         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1000 
1001         if (connectable)
1002                 cp.type = LE_ADV_IND;
1003         else if (get_cur_adv_instance_scan_rsp_len(hdev))
1004                 cp.type = LE_ADV_SCAN_IND;
1005         else
1006                 cp.type = LE_ADV_NONCONN_IND;
1007 
1008         cp.own_address_type = own_addr_type;
1009         cp.channel_map = hdev->le_adv_channel_map;
1010 
1011         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1012 
1013         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1014 }
1015 
1016 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1017 {
1018         size_t short_len;
1019         size_t complete_len;
1020 
1021         /* no space left for name (+ NULL + type + len) */
1022         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1023                 return ad_len;
1024 
1025         /* use complete name if present and fits */
1026         complete_len = strlen(hdev->dev_name);
1027         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1028                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1029                                        hdev->dev_name, complete_len + 1);
1030 
1031         /* use short name if present */
1032         short_len = strlen(hdev->short_name);
1033         if (short_len)
1034                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1035                                        hdev->short_name, short_len + 1);
1036 
1037         /* use shortened full name if present, we already know that name
1038          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1039          */
1040         if (complete_len) {
1041                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1042 
1043                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1044                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1045 
1046                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1047                                        sizeof(name));
1048         }
1049 
1050         return ad_len;
1051 }
1052 
1053 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1054 {
1055         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1056 }
1057 
1058 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1059 {
1060         u8 scan_rsp_len = 0;
1061 
1062         if (hdev->appearance) {
1063                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1064         }
1065 
1066         return append_local_name(hdev, ptr, scan_rsp_len);
1067 }
1068 
1069 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1070                                         u8 *ptr)
1071 {
1072         struct adv_info *adv_instance;
1073         u32 instance_flags;
1074         u8 scan_rsp_len = 0;
1075 
1076         adv_instance = hci_find_adv_instance(hdev, instance);
1077         if (!adv_instance)
1078                 return 0;
1079 
1080         instance_flags = adv_instance->flags;
1081 
1082         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1083                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1084         }
1085 
1086         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1087                adv_instance->scan_rsp_len);
1088 
1089         scan_rsp_len += adv_instance->scan_rsp_len;
1090 
1091         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1092                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1093 
1094         return scan_rsp_len;
1095 }
1096 
1097 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1098 {
1099         struct hci_dev *hdev = req->hdev;
1100         struct hci_cp_le_set_scan_rsp_data cp;
1101         u8 len;
1102 
1103         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1104                 return;
1105 
1106         memset(&cp, 0, sizeof(cp));
1107 
1108         if (instance)
1109                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1110         else
1111                 len = create_default_scan_rsp_data(hdev, cp.data);
1112 
1113         if (hdev->scan_rsp_data_len == len &&
1114             !memcmp(cp.data, hdev->scan_rsp_data, len))
1115                 return;
1116 
1117         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1118         hdev->scan_rsp_data_len = len;
1119 
1120         cp.length = len;
1121 
1122         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1123 }
1124 
1125 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1126 {
1127         struct adv_info *adv_instance = NULL;
1128         u8 ad_len = 0, flags = 0;
1129         u32 instance_flags;
1130 
1131         /* Return 0 when the current instance identifier is invalid. */
1132         if (instance) {
1133                 adv_instance = hci_find_adv_instance(hdev, instance);
1134                 if (!adv_instance)
1135                         return 0;
1136         }
1137 
1138         instance_flags = get_adv_instance_flags(hdev, instance);
1139 
1140         /* The Add Advertising command allows userspace to set both the general
1141          * and limited discoverable flags.
1142          */
1143         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1144                 flags |= LE_AD_GENERAL;
1145 
1146         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1147                 flags |= LE_AD_LIMITED;
1148 
1149         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1150                 flags |= LE_AD_NO_BREDR;
1151 
1152         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1153                 /* If a discovery flag wasn't provided, simply use the global
1154                  * settings.
1155                  */
1156                 if (!flags)
1157                         flags |= mgmt_get_adv_discov_flags(hdev);
1158 
1159                 /* If flags would still be empty, then there is no need to
1160                  * include the "Flags" AD field".
1161                  */
1162                 if (flags) {
1163                         ptr[0] = 0x02;
1164                         ptr[1] = EIR_FLAGS;
1165                         ptr[2] = flags;
1166 
1167                         ad_len += 3;
1168                         ptr += 3;
1169                 }
1170         }
1171 
1172         if (adv_instance) {
1173                 memcpy(ptr, adv_instance->adv_data,
1174                        adv_instance->adv_data_len);
1175                 ad_len += adv_instance->adv_data_len;
1176                 ptr += adv_instance->adv_data_len;
1177         }
1178 
1179         /* Provide Tx Power only if we can provide a valid value for it */
1180         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1181             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1182                 ptr[0] = 0x02;
1183                 ptr[1] = EIR_TX_POWER;
1184                 ptr[2] = (u8)hdev->adv_tx_power;
1185 
1186                 ad_len += 3;
1187                 ptr += 3;
1188         }
1189 
1190         return ad_len;
1191 }
1192 
1193 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1194 {
1195         struct hci_dev *hdev = req->hdev;
1196         struct hci_cp_le_set_adv_data cp;
1197         u8 len;
1198 
1199         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1200                 return;
1201 
1202         memset(&cp, 0, sizeof(cp));
1203 
1204         len = create_instance_adv_data(hdev, instance, cp.data);
1205 
1206         /* There's nothing to do if the data hasn't changed */
1207         if (hdev->adv_data_len == len &&
1208             memcmp(cp.data, hdev->adv_data, len) == 0)
1209                 return;
1210 
1211         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1212         hdev->adv_data_len = len;
1213 
1214         cp.length = len;
1215 
1216         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1217 }
1218 
1219 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1220 {
1221         struct hci_request req;
1222 
1223         hci_req_init(&req, hdev);
1224         __hci_req_update_adv_data(&req, instance);
1225 
1226         return hci_req_run(&req, NULL);
1227 }
1228 
1229 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1230 {
1231         BT_DBG("%s status %u", hdev->name, status);
1232 }
1233 
1234 void hci_req_reenable_advertising(struct hci_dev *hdev)
1235 {
1236         struct hci_request req;
1237 
1238         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1239             list_empty(&hdev->adv_instances))
1240                 return;
1241 
1242         hci_req_init(&req, hdev);
1243 
1244         if (hdev->cur_adv_instance) {
1245                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1246                                                 true);
1247         } else {
1248                 __hci_req_update_adv_data(&req, 0x00);
1249                 __hci_req_update_scan_rsp_data(&req, 0x00);
1250                 __hci_req_enable_advertising(&req);
1251         }
1252 
1253         hci_req_run(&req, adv_enable_complete);
1254 }
1255 
1256 static void adv_timeout_expire(struct work_struct *work)
1257 {
1258         struct hci_dev *hdev = container_of(work, struct hci_dev,
1259                                             adv_instance_expire.work);
1260 
1261         struct hci_request req;
1262         u8 instance;
1263 
1264         BT_DBG("%s", hdev->name);
1265 
1266         hci_dev_lock(hdev);
1267 
1268         hdev->adv_instance_timeout = 0;
1269 
1270         instance = hdev->cur_adv_instance;
1271         if (instance == 0x00)
1272                 goto unlock;
1273 
1274         hci_req_init(&req, hdev);
1275 
1276         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1277 
1278         if (list_empty(&hdev->adv_instances))
1279                 __hci_req_disable_advertising(&req);
1280 
1281         hci_req_run(&req, NULL);
1282 
1283 unlock:
1284         hci_dev_unlock(hdev);
1285 }
1286 
1287 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1288                                     bool force)
1289 {
1290         struct hci_dev *hdev = req->hdev;
1291         struct adv_info *adv_instance = NULL;
1292         u16 timeout;
1293 
1294         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1295             list_empty(&hdev->adv_instances))
1296                 return -EPERM;
1297 
1298         if (hdev->adv_instance_timeout)
1299                 return -EBUSY;
1300 
1301         adv_instance = hci_find_adv_instance(hdev, instance);
1302         if (!adv_instance)
1303                 return -ENOENT;
1304 
1305         /* A zero timeout means unlimited advertising. As long as there is
1306          * only one instance, duration should be ignored. We still set a timeout
1307          * in case further instances are being added later on.
1308          *
1309          * If the remaining lifetime of the instance is more than the duration
1310          * then the timeout corresponds to the duration, otherwise it will be
1311          * reduced to the remaining instance lifetime.
1312          */
1313         if (adv_instance->timeout == 0 ||
1314             adv_instance->duration <= adv_instance->remaining_time)
1315                 timeout = adv_instance->duration;
1316         else
1317                 timeout = adv_instance->remaining_time;
1318 
1319         /* The remaining time is being reduced unless the instance is being
1320          * advertised without time limit.
1321          */
1322         if (adv_instance->timeout)
1323                 adv_instance->remaining_time =
1324                                 adv_instance->remaining_time - timeout;
1325 
1326         hdev->adv_instance_timeout = timeout;
1327         queue_delayed_work(hdev->req_workqueue,
1328                            &hdev->adv_instance_expire,
1329                            msecs_to_jiffies(timeout * 1000));
1330 
1331         /* If we're just re-scheduling the same instance again then do not
1332          * execute any HCI commands. This happens when a single instance is
1333          * being advertised.
1334          */
1335         if (!force && hdev->cur_adv_instance == instance &&
1336             hci_dev_test_flag(hdev, HCI_LE_ADV))
1337                 return 0;
1338 
1339         hdev->cur_adv_instance = instance;
1340         __hci_req_update_adv_data(req, instance);
1341         __hci_req_update_scan_rsp_data(req, instance);
1342         __hci_req_enable_advertising(req);
1343 
1344         return 0;
1345 }
1346 
1347 static void cancel_adv_timeout(struct hci_dev *hdev)
1348 {
1349         if (hdev->adv_instance_timeout) {
1350                 hdev->adv_instance_timeout = 0;
1351                 cancel_delayed_work(&hdev->adv_instance_expire);
1352         }
1353 }
1354 
1355 /* For a single instance:
1356  * - force == true: The instance will be removed even when its remaining
1357  *   lifetime is not zero.
1358  * - force == false: the instance will be deactivated but kept stored unless
1359  *   the remaining lifetime is zero.
1360  *
1361  * For instance == 0x00:
1362  * - force == true: All instances will be removed regardless of their timeout
1363  *   setting.
1364  * - force == false: Only instances that have a timeout will be removed.
1365  */
1366 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1367                                 struct hci_request *req, u8 instance,
1368                                 bool force)
1369 {
1370         struct adv_info *adv_instance, *n, *next_instance = NULL;
1371         int err;
1372         u8 rem_inst;
1373 
1374         /* Cancel any timeout concerning the removed instance(s). */
1375         if (!instance || hdev->cur_adv_instance == instance)
1376                 cancel_adv_timeout(hdev);
1377 
1378         /* Get the next instance to advertise BEFORE we remove
1379          * the current one. This can be the same instance again
1380          * if there is only one instance.
1381          */
1382         if (instance && hdev->cur_adv_instance == instance)
1383                 next_instance = hci_get_next_instance(hdev, instance);
1384 
1385         if (instance == 0x00) {
1386                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1387                                          list) {
1388                         if (!(force || adv_instance->timeout))
1389                                 continue;
1390 
1391                         rem_inst = adv_instance->instance;
1392                         err = hci_remove_adv_instance(hdev, rem_inst);
1393                         if (!err)
1394                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1395                 }
1396         } else {
1397                 adv_instance = hci_find_adv_instance(hdev, instance);
1398 
1399                 if (force || (adv_instance && adv_instance->timeout &&
1400                               !adv_instance->remaining_time)) {
1401                         /* Don't advertise a removed instance. */
1402                         if (next_instance &&
1403                             next_instance->instance == instance)
1404                                 next_instance = NULL;
1405 
1406                         err = hci_remove_adv_instance(hdev, instance);
1407                         if (!err)
1408                                 mgmt_advertising_removed(sk, hdev, instance);
1409                 }
1410         }
1411 
1412         if (!req || !hdev_is_powered(hdev) ||
1413             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1414                 return;
1415 
1416         if (next_instance)
1417                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1418                                                 false);
1419 }
1420 
1421 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1422 {
1423         struct hci_dev *hdev = req->hdev;
1424 
1425         /* If we're advertising or initiating an LE connection we can't
1426          * go ahead and change the random address at this time. This is
1427          * because the eventual initiator address used for the
1428          * subsequently created connection will be undefined (some
1429          * controllers use the new address and others the one we had
1430          * when the operation started).
1431          *
1432          * In this kind of scenario skip the update and let the random
1433          * address be updated at the next cycle.
1434          */
1435         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1436             hci_lookup_le_connect(hdev)) {
1437                 BT_DBG("Deferring random address update");
1438                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1439                 return;
1440         }
1441 
1442         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1443 }
1444 
1445 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1446                               bool use_rpa, u8 *own_addr_type)
1447 {
1448         struct hci_dev *hdev = req->hdev;
1449         int err;
1450 
1451         /* If privacy is enabled use a resolvable private address. If
1452          * current RPA has expired or there is something else than
1453          * the current RPA in use, then generate a new one.
1454          */
1455         if (use_rpa) {
1456                 int to;
1457 
1458                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1459 
1460                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1461                     !bacmp(&hdev->random_addr, &hdev->rpa))
1462                         return 0;
1463 
1464                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1465                 if (err < 0) {
1466                         bt_dev_err(hdev, "failed to generate new RPA");
1467                         return err;
1468                 }
1469 
1470                 set_random_addr(req, &hdev->rpa);
1471 
1472                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1473                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1474 
1475                 return 0;
1476         }
1477 
1478         /* In case of required privacy without resolvable private address,
1479          * use an non-resolvable private address. This is useful for active
1480          * scanning and non-connectable advertising.
1481          */
1482         if (require_privacy) {
1483                 bdaddr_t nrpa;
1484 
1485                 while (true) {
1486                         /* The non-resolvable private address is generated
1487                          * from random six bytes with the two most significant
1488                          * bits cleared.
1489                          */
1490                         get_random_bytes(&nrpa, 6);
1491                         nrpa.b[5] &= 0x3f;
1492 
1493                         /* The non-resolvable private address shall not be
1494                          * equal to the public address.
1495                          */
1496                         if (bacmp(&hdev->bdaddr, &nrpa))
1497                                 break;
1498                 }
1499 
1500                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1501                 set_random_addr(req, &nrpa);
1502                 return 0;
1503         }
1504 
1505         /* If forcing static address is in use or there is no public
1506          * address use the static address as random address (but skip
1507          * the HCI command if the current random address is already the
1508          * static one.
1509          *
1510          * In case BR/EDR has been disabled on a dual-mode controller
1511          * and a static address has been configured, then use that
1512          * address instead of the public BR/EDR address.
1513          */
1514         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1515             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1516             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1517              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1518                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1519                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1520                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1521                                     &hdev->static_addr);
1522                 return 0;
1523         }
1524 
1525         /* Neither privacy nor static address is being used so use a
1526          * public address.
1527          */
1528         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1529 
1530         return 0;
1531 }
1532 
1533 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1534 {
1535         struct bdaddr_list *b;
1536 
1537         list_for_each_entry(b, &hdev->whitelist, list) {
1538                 struct hci_conn *conn;
1539 
1540                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1541                 if (!conn)
1542                         return true;
1543 
1544                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1545                         return true;
1546         }
1547 
1548         return false;
1549 }
1550 
1551 void __hci_req_update_scan(struct hci_request *req)
1552 {
1553         struct hci_dev *hdev = req->hdev;
1554         u8 scan;
1555 
1556         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1557                 return;
1558 
1559         if (!hdev_is_powered(hdev))
1560                 return;
1561 
1562         if (mgmt_powering_down(hdev))
1563                 return;
1564 
1565         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1566             disconnected_whitelist_entries(hdev))
1567                 scan = SCAN_PAGE;
1568         else
1569                 scan = SCAN_DISABLED;
1570 
1571         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1572                 scan |= SCAN_INQUIRY;
1573 
1574         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1575             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1576                 return;
1577 
1578         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1579 }
1580 
1581 static int update_scan(struct hci_request *req, unsigned long opt)
1582 {
1583         hci_dev_lock(req->hdev);
1584         __hci_req_update_scan(req);
1585         hci_dev_unlock(req->hdev);
1586         return 0;
1587 }
1588 
1589 static void scan_update_work(struct work_struct *work)
1590 {
1591         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1592 
1593         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1594 }
1595 
1596 static int connectable_update(struct hci_request *req, unsigned long opt)
1597 {
1598         struct hci_dev *hdev = req->hdev;
1599 
1600         hci_dev_lock(hdev);
1601 
1602         __hci_req_update_scan(req);
1603 
1604         /* If BR/EDR is not enabled and we disable advertising as a
1605          * by-product of disabling connectable, we need to update the
1606          * advertising flags.
1607          */
1608         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1609                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1610 
1611         /* Update the advertising parameters if necessary */
1612         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1613             !list_empty(&hdev->adv_instances))
1614                 __hci_req_enable_advertising(req);
1615 
1616         __hci_update_background_scan(req);
1617 
1618         hci_dev_unlock(hdev);
1619 
1620         return 0;
1621 }
1622 
1623 static void connectable_update_work(struct work_struct *work)
1624 {
1625         struct hci_dev *hdev = container_of(work, struct hci_dev,
1626                                             connectable_update);
1627         u8 status;
1628 
1629         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1630         mgmt_set_connectable_complete(hdev, status);
1631 }
1632 
1633 static u8 get_service_classes(struct hci_dev *hdev)
1634 {
1635         struct bt_uuid *uuid;
1636         u8 val = 0;
1637 
1638         list_for_each_entry(uuid, &hdev->uuids, list)
1639                 val |= uuid->svc_hint;
1640 
1641         return val;
1642 }
1643 
1644 void __hci_req_update_class(struct hci_request *req)
1645 {
1646         struct hci_dev *hdev = req->hdev;
1647         u8 cod[3];
1648 
1649         BT_DBG("%s", hdev->name);
1650 
1651         if (!hdev_is_powered(hdev))
1652                 return;
1653 
1654         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1655                 return;
1656 
1657         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1658                 return;
1659 
1660         cod[0] = hdev->minor_class;
1661         cod[1] = hdev->major_class;
1662         cod[2] = get_service_classes(hdev);
1663 
1664         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1665                 cod[1] |= 0x20;
1666 
1667         if (memcmp(cod, hdev->dev_class, 3) == 0)
1668                 return;
1669 
1670         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1671 }
1672 
1673 static void write_iac(struct hci_request *req)
1674 {
1675         struct hci_dev *hdev = req->hdev;
1676         struct hci_cp_write_current_iac_lap cp;
1677 
1678         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1679                 return;
1680 
1681         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1682                 /* Limited discoverable mode */
1683                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1684                 cp.iac_lap[0] = 0x00;   /* LIAC */
1685                 cp.iac_lap[1] = 0x8b;
1686                 cp.iac_lap[2] = 0x9e;
1687                 cp.iac_lap[3] = 0x33;   /* GIAC */
1688                 cp.iac_lap[4] = 0x8b;
1689                 cp.iac_lap[5] = 0x9e;
1690         } else {
1691                 /* General discoverable mode */
1692                 cp.num_iac = 1;
1693                 cp.iac_lap[0] = 0x33;   /* GIAC */
1694                 cp.iac_lap[1] = 0x8b;
1695                 cp.iac_lap[2] = 0x9e;
1696         }
1697 
1698         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1699                     (cp.num_iac * 3) + 1, &cp);
1700 }
1701 
1702 static int discoverable_update(struct hci_request *req, unsigned long opt)
1703 {
1704         struct hci_dev *hdev = req->hdev;
1705 
1706         hci_dev_lock(hdev);
1707 
1708         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1709                 write_iac(req);
1710                 __hci_req_update_scan(req);
1711                 __hci_req_update_class(req);
1712         }
1713 
1714         /* Advertising instances don't use the global discoverable setting, so
1715          * only update AD if advertising was enabled using Set Advertising.
1716          */
1717         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1718                 __hci_req_update_adv_data(req, 0x00);
1719 
1720                 /* Discoverable mode affects the local advertising
1721                  * address in limited privacy mode.
1722                  */
1723                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1724                         __hci_req_enable_advertising(req);
1725         }
1726 
1727         hci_dev_unlock(hdev);
1728 
1729         return 0;
1730 }
1731 
1732 static void discoverable_update_work(struct work_struct *work)
1733 {
1734         struct hci_dev *hdev = container_of(work, struct hci_dev,
1735                                             discoverable_update);
1736         u8 status;
1737 
1738         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1739         mgmt_set_discoverable_complete(hdev, status);
1740 }
1741 
1742 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1743                       u8 reason)
1744 {
1745         switch (conn->state) {
1746         case BT_CONNECTED:
1747         case BT_CONFIG:
1748                 if (conn->type == AMP_LINK) {
1749                         struct hci_cp_disconn_phy_link cp;
1750 
1751                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1752                         cp.reason = reason;
1753                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1754                                     &cp);
1755                 } else {
1756                         struct hci_cp_disconnect dc;
1757 
1758                         dc.handle = cpu_to_le16(conn->handle);
1759                         dc.reason = reason;
1760                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1761                 }
1762 
1763                 conn->state = BT_DISCONN;
1764 
1765                 break;
1766         case BT_CONNECT:
1767                 if (conn->type == LE_LINK) {
1768                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1769                                 break;
1770                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1771                                     0, NULL);
1772                 } else if (conn->type == ACL_LINK) {
1773                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1774                                 break;
1775                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1776                                     6, &conn->dst);
1777                 }
1778                 break;
1779         case BT_CONNECT2:
1780                 if (conn->type == ACL_LINK) {
1781                         struct hci_cp_reject_conn_req rej;
1782 
1783                         bacpy(&rej.bdaddr, &conn->dst);
1784                         rej.reason = reason;
1785 
1786                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1787                                     sizeof(rej), &rej);
1788                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1789                         struct hci_cp_reject_sync_conn_req rej;
1790 
1791                         bacpy(&rej.bdaddr, &conn->dst);
1792 
1793                         /* SCO rejection has its own limited set of
1794                          * allowed error values (0x0D-0x0F) which isn't
1795                          * compatible with most values passed to this
1796                          * function. To be safe hard-code one of the
1797                          * values that's suitable for SCO.
1798                          */
1799                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1800 
1801                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1802                                     sizeof(rej), &rej);
1803                 }
1804                 break;
1805         default:
1806                 conn->state = BT_CLOSED;
1807                 break;
1808         }
1809 }
1810 
1811 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1812 {
1813         if (status)
1814                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1815 }
1816 
1817 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1818 {
1819         struct hci_request req;
1820         int err;
1821 
1822         hci_req_init(&req, conn->hdev);
1823 
1824         __hci_abort_conn(&req, conn, reason);
1825 
1826         err = hci_req_run(&req, abort_conn_complete);
1827         if (err && err != -ENODATA) {
1828                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1829                 return err;
1830         }
1831 
1832         return 0;
1833 }
1834 
1835 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1836 {
1837         hci_dev_lock(req->hdev);
1838         __hci_update_background_scan(req);
1839         hci_dev_unlock(req->hdev);
1840         return 0;
1841 }
1842 
1843 static void bg_scan_update(struct work_struct *work)
1844 {
1845         struct hci_dev *hdev = container_of(work, struct hci_dev,
1846                                             bg_scan_update);
1847         struct hci_conn *conn;
1848         u8 status;
1849         int err;
1850 
1851         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1852         if (!err)
1853                 return;
1854 
1855         hci_dev_lock(hdev);
1856 
1857         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1858         if (conn)
1859                 hci_le_conn_failed(conn, status);
1860 
1861         hci_dev_unlock(hdev);
1862 }
1863 
1864 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1865 {
1866         hci_req_add_le_scan_disable(req);
1867         return 0;
1868 }
1869 
1870 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1871 {
1872         u8 length = opt;
1873         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1874         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1875         struct hci_cp_inquiry cp;
1876 
1877         BT_DBG("%s", req->hdev->name);
1878 
1879         hci_dev_lock(req->hdev);
1880         hci_inquiry_cache_flush(req->hdev);
1881         hci_dev_unlock(req->hdev);
1882 
1883         memset(&cp, 0, sizeof(cp));
1884 
1885         if (req->hdev->discovery.limited)
1886                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1887         else
1888                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1889 
1890         cp.length = length;
1891 
1892         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1893 
1894         return 0;
1895 }
1896 
1897 static void le_scan_disable_work(struct work_struct *work)
1898 {
1899         struct hci_dev *hdev = container_of(work, struct hci_dev,
1900                                             le_scan_disable.work);
1901         u8 status;
1902 
1903         BT_DBG("%s", hdev->name);
1904 
1905         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1906                 return;
1907 
1908         cancel_delayed_work(&hdev->le_scan_restart);
1909 
1910         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1911         if (status) {
1912                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1913                            status);
1914                 return;
1915         }
1916 
1917         hdev->discovery.scan_start = 0;
1918 
1919         /* If we were running LE only scan, change discovery state. If
1920          * we were running both LE and BR/EDR inquiry simultaneously,
1921          * and BR/EDR inquiry is already finished, stop discovery,
1922          * otherwise BR/EDR inquiry will stop discovery when finished.
1923          * If we will resolve remote device name, do not change
1924          * discovery state.
1925          */
1926 
1927         if (hdev->discovery.type == DISCOV_TYPE_LE)
1928                 goto discov_stopped;
1929 
1930         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1931                 return;
1932 
1933         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1934                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1935                     hdev->discovery.state != DISCOVERY_RESOLVING)
1936                         goto discov_stopped;
1937 
1938                 return;
1939         }
1940 
1941         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1942                      HCI_CMD_TIMEOUT, &status);
1943         if (status) {
1944                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
1945                 goto discov_stopped;
1946         }
1947 
1948         return;
1949 
1950 discov_stopped:
1951         hci_dev_lock(hdev);
1952         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1953         hci_dev_unlock(hdev);
1954 }
1955 
1956 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1957 {
1958         struct hci_dev *hdev = req->hdev;
1959         struct hci_cp_le_set_scan_enable cp;
1960 
1961         /* If controller is not scanning we are done. */
1962         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1963                 return 0;
1964 
1965         hci_req_add_le_scan_disable(req);
1966 
1967         memset(&cp, 0, sizeof(cp));
1968         cp.enable = LE_SCAN_ENABLE;
1969         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1970         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1971 
1972         return 0;
1973 }
1974 
1975 static void le_scan_restart_work(struct work_struct *work)
1976 {
1977         struct hci_dev *hdev = container_of(work, struct hci_dev,
1978                                             le_scan_restart.work);
1979         unsigned long timeout, duration, scan_start, now;
1980         u8 status;
1981 
1982         BT_DBG("%s", hdev->name);
1983 
1984         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1985         if (status) {
1986                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
1987                            status);
1988                 return;
1989         }
1990 
1991         hci_dev_lock(hdev);
1992 
1993         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1994             !hdev->discovery.scan_start)
1995                 goto unlock;
1996 
1997         /* When the scan was started, hdev->le_scan_disable has been queued
1998          * after duration from scan_start. During scan restart this job
1999          * has been canceled, and we need to queue it again after proper
2000          * timeout, to make sure that scan does not run indefinitely.
2001          */
2002         duration = hdev->discovery.scan_duration;
2003         scan_start = hdev->discovery.scan_start;
2004         now = jiffies;
2005         if (now - scan_start <= duration) {
2006                 int elapsed;
2007 
2008                 if (now >= scan_start)
2009                         elapsed = now - scan_start;
2010                 else
2011                         elapsed = ULONG_MAX - scan_start + now;
2012 
2013                 timeout = duration - elapsed;
2014         } else {
2015                 timeout = 0;
2016         }
2017 
2018         queue_delayed_work(hdev->req_workqueue,
2019                            &hdev->le_scan_disable, timeout);
2020 
2021 unlock:
2022         hci_dev_unlock(hdev);
2023 }
2024 
2025 static int active_scan(struct hci_request *req, unsigned long opt)
2026 {
2027         uint16_t interval = opt;
2028         struct hci_dev *hdev = req->hdev;
2029         struct hci_cp_le_set_scan_param param_cp;
2030         struct hci_cp_le_set_scan_enable enable_cp;
2031         u8 own_addr_type;
2032         int err;
2033 
2034         BT_DBG("%s", hdev->name);
2035 
2036         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2037                 hci_dev_lock(hdev);
2038 
2039                 /* Don't let discovery abort an outgoing connection attempt
2040                  * that's using directed advertising.
2041                  */
2042                 if (hci_lookup_le_connect(hdev)) {
2043                         hci_dev_unlock(hdev);
2044                         return -EBUSY;
2045                 }
2046 
2047                 cancel_adv_timeout(hdev);
2048                 hci_dev_unlock(hdev);
2049 
2050                 __hci_req_disable_advertising(req);
2051         }
2052 
2053         /* If controller is scanning, it means the background scanning is
2054          * running. Thus, we should temporarily stop it in order to set the
2055          * discovery scanning parameters.
2056          */
2057         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2058                 hci_req_add_le_scan_disable(req);
2059 
2060         /* All active scans will be done with either a resolvable private
2061          * address (when privacy feature has been enabled) or non-resolvable
2062          * private address.
2063          */
2064         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2065                                         &own_addr_type);
2066         if (err < 0)
2067                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2068 
2069         memset(&param_cp, 0, sizeof(param_cp));
2070         param_cp.type = LE_SCAN_ACTIVE;
2071         param_cp.interval = cpu_to_le16(interval);
2072         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2073         param_cp.own_address_type = own_addr_type;
2074 
2075         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2076                     &param_cp);
2077 
2078         memset(&enable_cp, 0, sizeof(enable_cp));
2079         enable_cp.enable = LE_SCAN_ENABLE;
2080         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2081 
2082         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2083                     &enable_cp);
2084 
2085         return 0;
2086 }
2087 
2088 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2089 {
2090         int err;
2091 
2092         BT_DBG("%s", req->hdev->name);
2093 
2094         err = active_scan(req, opt);
2095         if (err)
2096                 return err;
2097 
2098         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2099 }
2100 
2101 static void start_discovery(struct hci_dev *hdev, u8 *status)
2102 {
2103         unsigned long timeout;
2104 
2105         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2106 
2107         switch (hdev->discovery.type) {
2108         case DISCOV_TYPE_BREDR:
2109                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2110                         hci_req_sync(hdev, bredr_inquiry,
2111                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2112                                      status);
2113                 return;
2114         case DISCOV_TYPE_INTERLEAVED:
2115                 /* When running simultaneous discovery, the LE scanning time
2116                  * should occupy the whole discovery time sine BR/EDR inquiry
2117                  * and LE scanning are scheduled by the controller.
2118                  *
2119                  * For interleaving discovery in comparison, BR/EDR inquiry
2120                  * and LE scanning are done sequentially with separate
2121                  * timeouts.
2122                  */
2123                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2124                              &hdev->quirks)) {
2125                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2126                         /* During simultaneous discovery, we double LE scan
2127                          * interval. We must leave some time for the controller
2128                          * to do BR/EDR inquiry.
2129                          */
2130                         hci_req_sync(hdev, interleaved_discov,
2131                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2132                                      status);
2133                         break;
2134                 }
2135 
2136                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2137                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2138                              HCI_CMD_TIMEOUT, status);
2139                 break;
2140         case DISCOV_TYPE_LE:
2141                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2142                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2143                              HCI_CMD_TIMEOUT, status);
2144                 break;
2145         default:
2146                 *status = HCI_ERROR_UNSPECIFIED;
2147                 return;
2148         }
2149 
2150         if (*status)
2151                 return;
2152 
2153         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2154 
2155         /* When service discovery is used and the controller has a
2156          * strict duplicate filter, it is important to remember the
2157          * start and duration of the scan. This is required for
2158          * restarting scanning during the discovery phase.
2159          */
2160         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2161                      hdev->discovery.result_filtering) {
2162                 hdev->discovery.scan_start = jiffies;
2163                 hdev->discovery.scan_duration = timeout;
2164         }
2165 
2166         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2167                            timeout);
2168 }
2169 
2170 bool hci_req_stop_discovery(struct hci_request *req)
2171 {
2172         struct hci_dev *hdev = req->hdev;
2173         struct discovery_state *d = &hdev->discovery;
2174         struct hci_cp_remote_name_req_cancel cp;
2175         struct inquiry_entry *e;
2176         bool ret = false;
2177 
2178         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2179 
2180         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2181                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2182                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2183 
2184                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2185                         cancel_delayed_work(&hdev->le_scan_disable);
2186                         hci_req_add_le_scan_disable(req);
2187                 }
2188 
2189                 ret = true;
2190         } else {
2191                 /* Passive scanning */
2192                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2193                         hci_req_add_le_scan_disable(req);
2194                         ret = true;
2195                 }
2196         }
2197 
2198         /* No further actions needed for LE-only discovery */
2199         if (d->type == DISCOV_TYPE_LE)
2200                 return ret;
2201 
2202         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2203                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2204                                                      NAME_PENDING);
2205                 if (!e)
2206                         return ret;
2207 
2208                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2209                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2210                             &cp);
2211                 ret = true;
2212         }
2213 
2214         return ret;
2215 }
2216 
2217 static int stop_discovery(struct hci_request *req, unsigned long opt)
2218 {
2219         hci_dev_lock(req->hdev);
2220         hci_req_stop_discovery(req);
2221         hci_dev_unlock(req->hdev);
2222 
2223         return 0;
2224 }
2225 
2226 static void discov_update(struct work_struct *work)
2227 {
2228         struct hci_dev *hdev = container_of(work, struct hci_dev,
2229                                             discov_update);
2230         u8 status = 0;
2231 
2232         switch (hdev->discovery.state) {
2233         case DISCOVERY_STARTING:
2234                 start_discovery(hdev, &status);
2235                 mgmt_start_discovery_complete(hdev, status);
2236                 if (status)
2237                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2238                 else
2239                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2240                 break;
2241         case DISCOVERY_STOPPING:
2242                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2243                 mgmt_stop_discovery_complete(hdev, status);
2244                 if (!status)
2245                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2246                 break;
2247         case DISCOVERY_STOPPED:
2248         default:
2249                 return;
2250         }
2251 }
2252 
2253 static void discov_off(struct work_struct *work)
2254 {
2255         struct hci_dev *hdev = container_of(work, struct hci_dev,
2256                                             discov_off.work);
2257 
2258         BT_DBG("%s", hdev->name);
2259 
2260         hci_dev_lock(hdev);
2261 
2262         /* When discoverable timeout triggers, then just make sure
2263          * the limited discoverable flag is cleared. Even in the case
2264          * of a timeout triggered from general discoverable, it is
2265          * safe to unconditionally clear the flag.
2266          */
2267         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2268         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2269         hdev->discov_timeout = 0;
2270 
2271         hci_dev_unlock(hdev);
2272 
2273         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2274         mgmt_new_settings(hdev);
2275 }
2276 
2277 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2278 {
2279         struct hci_dev *hdev = req->hdev;
2280         u8 link_sec;
2281 
2282         hci_dev_lock(hdev);
2283 
2284         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2285             !lmp_host_ssp_capable(hdev)) {
2286                 u8 mode = 0x01;
2287 
2288                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2289 
2290                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2291                         u8 support = 0x01;
2292 
2293                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2294                                     sizeof(support), &support);
2295                 }
2296         }
2297 
2298         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2299             lmp_bredr_capable(hdev)) {
2300                 struct hci_cp_write_le_host_supported cp;
2301 
2302                 cp.le = 0x01;
2303                 cp.simul = 0x00;
2304 
2305                 /* Check first if we already have the right
2306                  * host state (host features set)
2307                  */
2308                 if (cp.le != lmp_host_le_capable(hdev) ||
2309                     cp.simul != lmp_host_le_br_capable(hdev))
2310                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2311                                     sizeof(cp), &cp);
2312         }
2313 
2314         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2315                 /* Make sure the controller has a good default for
2316                  * advertising data. This also applies to the case
2317                  * where BR/EDR was toggled during the AUTO_OFF phase.
2318                  */
2319                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2320                     list_empty(&hdev->adv_instances)) {
2321                         __hci_req_update_adv_data(req, 0x00);
2322                         __hci_req_update_scan_rsp_data(req, 0x00);
2323 
2324                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2325                                 __hci_req_enable_advertising(req);
2326                 } else if (!list_empty(&hdev->adv_instances)) {
2327                         struct adv_info *adv_instance;
2328 
2329                         adv_instance = list_first_entry(&hdev->adv_instances,
2330                                                         struct adv_info, list);
2331                         __hci_req_schedule_adv_instance(req,
2332                                                         adv_instance->instance,
2333                                                         true);
2334                 }
2335         }
2336 
2337         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2338         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2339                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2340                             sizeof(link_sec), &link_sec);
2341 
2342         if (lmp_bredr_capable(hdev)) {
2343                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2344                         __hci_req_write_fast_connectable(req, true);
2345                 else
2346                         __hci_req_write_fast_connectable(req, false);
2347                 __hci_req_update_scan(req);
2348                 __hci_req_update_class(req);
2349                 __hci_req_update_name(req);
2350                 __hci_req_update_eir(req);
2351         }
2352 
2353         hci_dev_unlock(hdev);
2354         return 0;
2355 }
2356 
2357 int __hci_req_hci_power_on(struct hci_dev *hdev)
2358 {
2359         /* Register the available SMP channels (BR/EDR and LE) only when
2360          * successfully powering on the controller. This late
2361          * registration is required so that LE SMP can clearly decide if
2362          * the public address or static address is used.
2363          */
2364         smp_register(hdev);
2365 
2366         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2367                               NULL);
2368 }
2369 
2370 void hci_request_setup(struct hci_dev *hdev)
2371 {
2372         INIT_WORK(&hdev->discov_update, discov_update);
2373         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2374         INIT_WORK(&hdev->scan_update, scan_update_work);
2375         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2376         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2377         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2378         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2379         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2380         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2381 }
2382 
2383 void hci_request_cancel_all(struct hci_dev *hdev)
2384 {
2385         hci_req_sync_cancel(hdev, ENODEV);
2386 
2387         cancel_work_sync(&hdev->discov_update);
2388         cancel_work_sync(&hdev->bg_scan_update);
2389         cancel_work_sync(&hdev->scan_update);
2390         cancel_work_sync(&hdev->connectable_update);
2391         cancel_work_sync(&hdev->discoverable_update);
2392         cancel_delayed_work_sync(&hdev->discov_off);
2393         cancel_delayed_work_sync(&hdev->le_scan_disable);
2394         cancel_delayed_work_sync(&hdev->le_scan_restart);
2395 
2396         if (hdev->adv_instance_timeout) {
2397                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2398                 hdev->adv_instance_timeout = 0;
2399         }
2400 }
2401 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp