Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 void hci_req_purge(struct hci_request *req)
45 {
46         skb_queue_purge(&req->cmd_q);
47 }
48
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51         return hdev->req_status == HCI_REQ_PEND;
52 }
53
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55                    hci_req_complete_skb_t complete_skb)
56 {
57         struct hci_dev *hdev = req->hdev;
58         struct sk_buff *skb;
59         unsigned long flags;
60
61         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63         /* If an error occurred during request building, remove all HCI
64          * commands queued on the HCI request queue.
65          */
66         if (req->err) {
67                 skb_queue_purge(&req->cmd_q);
68                 return req->err;
69         }
70
71         /* Do not allow empty requests */
72         if (skb_queue_empty(&req->cmd_q))
73                 return -ENODATA;
74
75         skb = skb_peek_tail(&req->cmd_q);
76         if (complete) {
77                 bt_cb(skb)->hci.req_complete = complete;
78         } else if (complete_skb) {
79                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81         }
82
83         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87         queue_work(hdev->workqueue, &hdev->cmd_work);
88
89         return 0;
90 }
91
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94         return req_run(req, complete, NULL);
95 }
96
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99         return req_run(req, NULL, complete);
100 }
101
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103                                   struct sk_buff *skb)
104 {
105         BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 if (skb)
111                         hdev->req_skb = skb_get(skb);
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118         BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120         if (hdev->req_status == HCI_REQ_PEND) {
121                 hdev->req_result = err;
122                 hdev->req_status = HCI_REQ_CANCELED;
123                 wake_up_interruptible(&hdev->req_wait_q);
124         }
125 }
126
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128                                   const void *param, u8 event, u32 timeout)
129 {
130         struct hci_request req;
131         struct sk_buff *skb;
132         int err = 0;
133
134         BT_DBG("%s", hdev->name);
135
136         hci_req_init(&req, hdev);
137
138         hci_req_add_ev(&req, opcode, plen, param, event);
139
140         hdev->req_status = HCI_REQ_PEND;
141
142         err = hci_req_run_skb(&req, hci_req_sync_complete);
143         if (err < 0)
144                 return ERR_PTR(err);
145
146         err = wait_event_interruptible_timeout(hdev->req_wait_q,
147                         hdev->req_status != HCI_REQ_PEND, timeout);
148
149         if (err == -ERESTARTSYS)
150                 return ERR_PTR(-EINTR);
151
152         switch (hdev->req_status) {
153         case HCI_REQ_DONE:
154                 err = -bt_to_errno(hdev->req_result);
155                 break;
156
157         case HCI_REQ_CANCELED:
158                 err = -hdev->req_result;
159                 break;
160
161         default:
162                 err = -ETIMEDOUT;
163                 break;
164         }
165
166         hdev->req_status = hdev->req_result = 0;
167         skb = hdev->req_skb;
168         hdev->req_skb = NULL;
169
170         BT_DBG("%s end: err %d", hdev->name, err);
171
172         if (err < 0) {
173                 kfree_skb(skb);
174                 return ERR_PTR(err);
175         }
176
177         if (!skb)
178                 return ERR_PTR(-ENODATA);
179
180         return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185                                const void *param, u32 timeout)
186 {
187         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193                                                      unsigned long opt),
194                    unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196         struct hci_request req;
197         int err = 0;
198
199         BT_DBG("%s start", hdev->name);
200
201         hci_req_init(&req, hdev);
202
203         hdev->req_status = HCI_REQ_PEND;
204
205         err = func(&req, opt);
206         if (err) {
207                 if (hci_status)
208                         *hci_status = HCI_ERROR_UNSPECIFIED;
209                 return err;
210         }
211
212         err = hci_req_run_skb(&req, hci_req_sync_complete);
213         if (err < 0) {
214                 hdev->req_status = 0;
215
216                 /* ENODATA means the HCI request command queue is empty.
217                  * This can happen when a request with conditionals doesn't
218                  * trigger any commands to be sent. This is normal behavior
219                  * and should not trigger an error return.
220                  */
221                 if (err == -ENODATA) {
222                         if (hci_status)
223                                 *hci_status = 0;
224                         return 0;
225                 }
226
227                 if (hci_status)
228                         *hci_status = HCI_ERROR_UNSPECIFIED;
229
230                 return err;
231         }
232
233         err = wait_event_interruptible_timeout(hdev->req_wait_q,
234                         hdev->req_status != HCI_REQ_PEND, timeout);
235
236         if (err == -ERESTARTSYS)
237                 return -EINTR;
238
239         switch (hdev->req_status) {
240         case HCI_REQ_DONE:
241                 err = -bt_to_errno(hdev->req_result);
242                 if (hci_status)
243                         *hci_status = hdev->req_result;
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 if (hci_status)
249                         *hci_status = HCI_ERROR_UNSPECIFIED;
250                 break;
251
252         default:
253                 err = -ETIMEDOUT;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257         }
258
259         kfree_skb(hdev->req_skb);
260         hdev->req_skb = NULL;
261         hdev->req_status = hdev->req_result = 0;
262
263         BT_DBG("%s end: err %d", hdev->name, err);
264
265         return err;
266 }
267
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269                                                   unsigned long opt),
270                  unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272         int ret;
273
274         if (!test_bit(HCI_UP, &hdev->flags))
275                 return -ENETDOWN;
276
277         /* Serialize all requests */
278         hci_req_sync_lock(hdev);
279         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280         hci_req_sync_unlock(hdev);
281
282         return ret;
283 }
284
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286                                 const void *param)
287 {
288         int len = HCI_COMMAND_HDR_SIZE + plen;
289         struct hci_command_hdr *hdr;
290         struct sk_buff *skb;
291
292         skb = bt_skb_alloc(len, GFP_ATOMIC);
293         if (!skb)
294                 return NULL;
295
296         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297         hdr->opcode = cpu_to_le16(opcode);
298         hdr->plen   = plen;
299
300         if (plen)
301                 skb_put_data(skb, param, plen);
302
303         BT_DBG("skb len %d", skb->len);
304
305         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306         hci_skb_opcode(skb) = opcode;
307
308         return skb;
309 }
310
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313                     const void *param, u8 event)
314 {
315         struct hci_dev *hdev = req->hdev;
316         struct sk_buff *skb;
317
318         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320         /* If an error occurred during request building, there is no point in
321          * queueing the HCI command. We can simply return.
322          */
323         if (req->err)
324                 return;
325
326         skb = hci_prepare_cmd(hdev, opcode, plen, param);
327         if (!skb) {
328                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329                            opcode);
330                 req->err = -ENOMEM;
331                 return;
332         }
333
334         if (skb_queue_empty(&req->cmd_q))
335                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337         bt_cb(skb)->hci.req_event = event;
338
339         skb_queue_tail(&req->cmd_q, skb);
340 }
341
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343                  const void *param)
344 {
345         hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350         struct hci_dev *hdev = req->hdev;
351         struct hci_cp_write_page_scan_activity acp;
352         u8 type;
353
354         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355                 return;
356
357         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358                 return;
359
360         if (enable) {
361                 type = PAGE_SCAN_TYPE_INTERLACED;
362
363                 /* 160 msec page scan interval */
364                 acp.interval = cpu_to_le16(0x0100);
365         } else {
366                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368                 /* default 1.28 sec page scan */
369                 acp.interval = cpu_to_le16(0x0800);
370         }
371
372         acp.window = cpu_to_le16(0x0012);
373
374         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375             __cpu_to_le16(hdev->page_scan_window) != acp.window)
376                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377                             sizeof(acp), &acp);
378
379         if (hdev->page_scan_type != type)
380                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381 }
382
383 /* This function controls the background scanning based on hdev->pend_le_conns
384  * list. If there are pending LE connection we start the background scanning,
385  * otherwise we stop it.
386  *
387  * This function requires the caller holds hdev->lock.
388  */
389 static void __hci_update_background_scan(struct hci_request *req)
390 {
391         struct hci_dev *hdev = req->hdev;
392
393         if (!test_bit(HCI_UP, &hdev->flags) ||
394             test_bit(HCI_INIT, &hdev->flags) ||
395             hci_dev_test_flag(hdev, HCI_SETUP) ||
396             hci_dev_test_flag(hdev, HCI_CONFIG) ||
397             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398             hci_dev_test_flag(hdev, HCI_UNREGISTER))
399                 return;
400
401         /* No point in doing scanning if LE support hasn't been enabled */
402         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403                 return;
404
405         /* If discovery is active don't interfere with it */
406         if (hdev->discovery.state != DISCOVERY_STOPPED)
407                 return;
408
409         /* Reset RSSI and UUID filters when starting background scanning
410          * since these filters are meant for service discovery only.
411          *
412          * The Start Discovery and Start Service Discovery operations
413          * ensure to set proper values for RSSI threshold and UUID
414          * filter list. So it is safe to just reset them here.
415          */
416         hci_discovery_filter_clear(hdev);
417
418         if (list_empty(&hdev->pend_le_conns) &&
419             list_empty(&hdev->pend_le_reports)) {
420                 /* If there is no pending LE connections or devices
421                  * to be scanned for, we should stop the background
422                  * scanning.
423                  */
424
425                 /* If controller is not scanning we are done. */
426                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427                         return;
428
429                 hci_req_add_le_scan_disable(req);
430
431                 BT_DBG("%s stopping background scanning", hdev->name);
432         } else {
433                 /* If there is at least one pending LE connection, we should
434                  * keep the background scan running.
435                  */
436
437                 /* If controller is connecting, we should not start scanning
438                  * since some controllers are not able to scan and connect at
439                  * the same time.
440                  */
441                 if (hci_lookup_le_connect(hdev))
442                         return;
443
444                 /* If controller is currently scanning, we stop it to ensure we
445                  * don't miss any advertising (due to duplicates filter).
446                  */
447                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448                         hci_req_add_le_scan_disable(req);
449
450                 hci_req_add_le_passive_scan(req);
451
452                 BT_DBG("%s starting background scanning", hdev->name);
453         }
454 }
455
456 void __hci_req_update_name(struct hci_request *req)
457 {
458         struct hci_dev *hdev = req->hdev;
459         struct hci_cp_write_local_name cp;
460
461         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464 }
465
466 #define PNP_INFO_SVCLASS_ID             0x1200
467
468 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469 {
470         u8 *ptr = data, *uuids_start = NULL;
471         struct bt_uuid *uuid;
472
473         if (len < 4)
474                 return ptr;
475
476         list_for_each_entry(uuid, &hdev->uuids, list) {
477                 u16 uuid16;
478
479                 if (uuid->size != 16)
480                         continue;
481
482                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483                 if (uuid16 < 0x1100)
484                         continue;
485
486                 if (uuid16 == PNP_INFO_SVCLASS_ID)
487                         continue;
488
489                 if (!uuids_start) {
490                         uuids_start = ptr;
491                         uuids_start[0] = 1;
492                         uuids_start[1] = EIR_UUID16_ALL;
493                         ptr += 2;
494                 }
495
496                 /* Stop if not enough space to put next UUID */
497                 if ((ptr - data) + sizeof(u16) > len) {
498                         uuids_start[1] = EIR_UUID16_SOME;
499                         break;
500                 }
501
502                 *ptr++ = (uuid16 & 0x00ff);
503                 *ptr++ = (uuid16 & 0xff00) >> 8;
504                 uuids_start[0] += sizeof(uuid16);
505         }
506
507         return ptr;
508 }
509
510 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511 {
512         u8 *ptr = data, *uuids_start = NULL;
513         struct bt_uuid *uuid;
514
515         if (len < 6)
516                 return ptr;
517
518         list_for_each_entry(uuid, &hdev->uuids, list) {
519                 if (uuid->size != 32)
520                         continue;
521
522                 if (!uuids_start) {
523                         uuids_start = ptr;
524                         uuids_start[0] = 1;
525                         uuids_start[1] = EIR_UUID32_ALL;
526                         ptr += 2;
527                 }
528
529                 /* Stop if not enough space to put next UUID */
530                 if ((ptr - data) + sizeof(u32) > len) {
531                         uuids_start[1] = EIR_UUID32_SOME;
532                         break;
533                 }
534
535                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536                 ptr += sizeof(u32);
537                 uuids_start[0] += sizeof(u32);
538         }
539
540         return ptr;
541 }
542
543 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544 {
545         u8 *ptr = data, *uuids_start = NULL;
546         struct bt_uuid *uuid;
547
548         if (len < 18)
549                 return ptr;
550
551         list_for_each_entry(uuid, &hdev->uuids, list) {
552                 if (uuid->size != 128)
553                         continue;
554
555                 if (!uuids_start) {
556                         uuids_start = ptr;
557                         uuids_start[0] = 1;
558                         uuids_start[1] = EIR_UUID128_ALL;
559                         ptr += 2;
560                 }
561
562                 /* Stop if not enough space to put next UUID */
563                 if ((ptr - data) + 16 > len) {
564                         uuids_start[1] = EIR_UUID128_SOME;
565                         break;
566                 }
567
568                 memcpy(ptr, uuid->uuid, 16);
569                 ptr += 16;
570                 uuids_start[0] += 16;
571         }
572
573         return ptr;
574 }
575
576 static void create_eir(struct hci_dev *hdev, u8 *data)
577 {
578         u8 *ptr = data;
579         size_t name_len;
580
581         name_len = strlen(hdev->dev_name);
582
583         if (name_len > 0) {
584                 /* EIR Data type */
585                 if (name_len > 48) {
586                         name_len = 48;
587                         ptr[1] = EIR_NAME_SHORT;
588                 } else
589                         ptr[1] = EIR_NAME_COMPLETE;
590
591                 /* EIR Data length */
592                 ptr[0] = name_len + 1;
593
594                 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596                 ptr += (name_len + 2);
597         }
598
599         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600                 ptr[0] = 2;
601                 ptr[1] = EIR_TX_POWER;
602                 ptr[2] = (u8) hdev->inq_tx_power;
603
604                 ptr += 3;
605         }
606
607         if (hdev->devid_source > 0) {
608                 ptr[0] = 9;
609                 ptr[1] = EIR_DEVICE_ID;
610
611                 put_unaligned_le16(hdev->devid_source, ptr + 2);
612                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613                 put_unaligned_le16(hdev->devid_product, ptr + 6);
614                 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616                 ptr += 10;
617         }
618
619         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 }
623
624 void __hci_req_update_eir(struct hci_request *req)
625 {
626         struct hci_dev *hdev = req->hdev;
627         struct hci_cp_write_eir cp;
628
629         if (!hdev_is_powered(hdev))
630                 return;
631
632         if (!lmp_ext_inq_capable(hdev))
633                 return;
634
635         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636                 return;
637
638         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639                 return;
640
641         memset(&cp, 0, sizeof(cp));
642
643         create_eir(hdev, cp.data);
644
645         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646                 return;
647
648         memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651 }
652
653 void hci_req_add_le_scan_disable(struct hci_request *req)
654 {
655         struct hci_dev *hdev = req->hdev;
656
657         if (use_ext_scan(hdev)) {
658                 struct hci_cp_le_set_ext_scan_enable cp;
659
660                 memset(&cp, 0, sizeof(cp));
661                 cp.enable = LE_SCAN_DISABLE;
662                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663                             &cp);
664         } else {
665                 struct hci_cp_le_set_scan_enable cp;
666
667                 memset(&cp, 0, sizeof(cp));
668                 cp.enable = LE_SCAN_DISABLE;
669                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670         }
671 }
672
673 static void add_to_white_list(struct hci_request *req,
674                               struct hci_conn_params *params)
675 {
676         struct hci_cp_le_add_to_white_list cp;
677
678         cp.bdaddr_type = params->addr_type;
679         bacpy(&cp.bdaddr, &params->addr);
680
681         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682 }
683
684 static u8 update_white_list(struct hci_request *req)
685 {
686         struct hci_dev *hdev = req->hdev;
687         struct hci_conn_params *params;
688         struct bdaddr_list *b;
689         uint8_t white_list_entries = 0;
690
691         /* Go through the current white list programmed into the
692          * controller one by one and check if that address is still
693          * in the list of pending connections or list of devices to
694          * report. If not present in either list, then queue the
695          * command to remove it from the controller.
696          */
697         list_for_each_entry(b, &hdev->le_white_list, list) {
698                 /* If the device is neither in pend_le_conns nor
699                  * pend_le_reports then remove it from the whitelist.
700                  */
701                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702                                                &b->bdaddr, b->bdaddr_type) &&
703                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704                                                &b->bdaddr, b->bdaddr_type)) {
705                         struct hci_cp_le_del_from_white_list cp;
706
707                         cp.bdaddr_type = b->bdaddr_type;
708                         bacpy(&cp.bdaddr, &b->bdaddr);
709
710                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711                                     sizeof(cp), &cp);
712                         continue;
713                 }
714
715                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716                         /* White list can not be used with RPAs */
717                         return 0x00;
718                 }
719
720                 white_list_entries++;
721         }
722
723         /* Since all no longer valid white list entries have been
724          * removed, walk through the list of pending connections
725          * and ensure that any new device gets programmed into
726          * the controller.
727          *
728          * If the list of the devices is larger than the list of
729          * available white list entries in the controller, then
730          * just abort and return filer policy value to not use the
731          * white list.
732          */
733         list_for_each_entry(params, &hdev->pend_le_conns, action) {
734                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735                                            &params->addr, params->addr_type))
736                         continue;
737
738                 if (white_list_entries >= hdev->le_white_list_size) {
739                         /* Select filter policy to accept all advertising */
740                         return 0x00;
741                 }
742
743                 if (hci_find_irk_by_addr(hdev, &params->addr,
744                                          params->addr_type)) {
745                         /* White list can not be used with RPAs */
746                         return 0x00;
747                 }
748
749                 white_list_entries++;
750                 add_to_white_list(req, params);
751         }
752
753         /* After adding all new pending connections, walk through
754          * the list of pending reports and also add these to the
755          * white list if there is still space.
756          */
757         list_for_each_entry(params, &hdev->pend_le_reports, action) {
758                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759                                            &params->addr, params->addr_type))
760                         continue;
761
762                 if (white_list_entries >= hdev->le_white_list_size) {
763                         /* Select filter policy to accept all advertising */
764                         return 0x00;
765                 }
766
767                 if (hci_find_irk_by_addr(hdev, &params->addr,
768                                          params->addr_type)) {
769                         /* White list can not be used with RPAs */
770                         return 0x00;
771                 }
772
773                 white_list_entries++;
774                 add_to_white_list(req, params);
775         }
776
777         /* Select filter policy to use white list */
778         return 0x01;
779 }
780
781 static bool scan_use_rpa(struct hci_dev *hdev)
782 {
783         return hci_dev_test_flag(hdev, HCI_PRIVACY);
784 }
785
786 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787                                u16 window, u8 own_addr_type, u8 filter_policy)
788 {
789         struct hci_dev *hdev = req->hdev;
790
791         /* Use ext scanning if set ext scan param and ext scan enable is
792          * supported
793          */
794         if (use_ext_scan(hdev)) {
795                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797                 struct hci_cp_le_scan_phy_params *phy_params;
798                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799                 u32 plen;
800
801                 ext_param_cp = (void *)data;
802                 phy_params = (void *)ext_param_cp->data;
803
804                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805                 ext_param_cp->own_addr_type = own_addr_type;
806                 ext_param_cp->filter_policy = filter_policy;
807
808                 plen = sizeof(*ext_param_cp);
809
810                 if (scan_1m(hdev) || scan_2m(hdev)) {
811                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813                         memset(phy_params, 0, sizeof(*phy_params));
814                         phy_params->type = type;
815                         phy_params->interval = cpu_to_le16(interval);
816                         phy_params->window = cpu_to_le16(window);
817
818                         plen += sizeof(*phy_params);
819                         phy_params++;
820                 }
821
822                 if (scan_coded(hdev)) {
823                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825                         memset(phy_params, 0, sizeof(*phy_params));
826                         phy_params->type = type;
827                         phy_params->interval = cpu_to_le16(interval);
828                         phy_params->window = cpu_to_le16(window);
829
830                         plen += sizeof(*phy_params);
831                         phy_params++;
832                 }
833
834                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
835                             plen, ext_param_cp);
836
837                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838                 ext_enable_cp.enable = LE_SCAN_ENABLE;
839                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842                             sizeof(ext_enable_cp), &ext_enable_cp);
843         } else {
844                 struct hci_cp_le_set_scan_param param_cp;
845                 struct hci_cp_le_set_scan_enable enable_cp;
846
847                 memset(&param_cp, 0, sizeof(param_cp));
848                 param_cp.type = type;
849                 param_cp.interval = cpu_to_le16(interval);
850                 param_cp.window = cpu_to_le16(window);
851                 param_cp.own_address_type = own_addr_type;
852                 param_cp.filter_policy = filter_policy;
853                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854                             &param_cp);
855
856                 memset(&enable_cp, 0, sizeof(enable_cp));
857                 enable_cp.enable = LE_SCAN_ENABLE;
858                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860                             &enable_cp);
861         }
862 }
863
864 void hci_req_add_le_passive_scan(struct hci_request *req)
865 {
866         struct hci_dev *hdev = req->hdev;
867         u8 own_addr_type;
868         u8 filter_policy;
869
870         /* Set require_privacy to false since no SCAN_REQ are send
871          * during passive scanning. Not using an non-resolvable address
872          * here is important so that peer devices using direct
873          * advertising with our address will be correctly reported
874          * by the controller.
875          */
876         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877                                       &own_addr_type))
878                 return;
879
880         /* Adding or removing entries from the white list must
881          * happen before enabling scanning. The controller does
882          * not allow white list modification while scanning.
883          */
884         filter_policy = update_white_list(req);
885
886         /* When the controller is using random resolvable addresses and
887          * with that having LE privacy enabled, then controllers with
888          * Extended Scanner Filter Policies support can now enable support
889          * for handling directed advertising.
890          *
891          * So instead of using filter polices 0x00 (no whitelist)
892          * and 0x01 (whitelist enabled) use the new filter policies
893          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894          */
895         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897                 filter_policy |= 0x02;
898
899         hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900                            hdev->le_scan_window, own_addr_type, filter_policy);
901 }
902
903 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904 {
905         struct adv_info *adv_instance;
906
907         /* Ignore instance 0 */
908         if (instance == 0x00)
909                 return 0;
910
911         adv_instance = hci_find_adv_instance(hdev, instance);
912         if (!adv_instance)
913                 return 0;
914
915         /* TODO: Take into account the "appearance" and "local-name" flags here.
916          * These are currently being ignored as they are not supported.
917          */
918         return adv_instance->scan_rsp_len;
919 }
920
921 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922 {
923         u8 instance = hdev->cur_adv_instance;
924         struct adv_info *adv_instance;
925
926         /* Ignore instance 0 */
927         if (instance == 0x00)
928                 return 0;
929
930         adv_instance = hci_find_adv_instance(hdev, instance);
931         if (!adv_instance)
932                 return 0;
933
934         /* TODO: Take into account the "appearance" and "local-name" flags here.
935          * These are currently being ignored as they are not supported.
936          */
937         return adv_instance->scan_rsp_len;
938 }
939
940 void __hci_req_disable_advertising(struct hci_request *req)
941 {
942         if (ext_adv_capable(req->hdev)) {
943                 struct hci_cp_le_set_ext_adv_enable cp;
944
945                 cp.enable = 0x00;
946                 /* Disable all sets since we only support one set at the moment */
947                 cp.num_of_sets = 0x00;
948
949                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950         } else {
951                 u8 enable = 0x00;
952
953                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954         }
955 }
956
957 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958 {
959         u32 flags;
960         struct adv_info *adv_instance;
961
962         if (instance == 0x00) {
963                 /* Instance 0 always manages the "Tx Power" and "Flags"
964                  * fields
965                  */
966                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969                  * corresponds to the "connectable" instance flag.
970                  */
971                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
974                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977                         flags |= MGMT_ADV_FLAG_DISCOV;
978
979                 return flags;
980         }
981
982         adv_instance = hci_find_adv_instance(hdev, instance);
983
984         /* Return 0 when we got an invalid instance identifier. */
985         if (!adv_instance)
986                 return 0;
987
988         return adv_instance->flags;
989 }
990
991 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992 {
993         /* If privacy is not enabled don't use RPA */
994         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995                 return false;
996
997         /* If basic privacy mode is enabled use RPA */
998         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999                 return true;
1000
1001         /* If limited privacy mode is enabled don't use RPA if we're
1002          * both discoverable and bondable.
1003          */
1004         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005             hci_dev_test_flag(hdev, HCI_BONDABLE))
1006                 return false;
1007
1008         /* We're neither bondable nor discoverable in the limited
1009          * privacy mode, therefore use RPA.
1010          */
1011         return true;
1012 }
1013
1014 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015 {
1016         /* If there is no connection we are OK to advertise. */
1017         if (hci_conn_num(hdev, LE_LINK) == 0)
1018                 return true;
1019
1020         /* Check le_states if there is any connection in slave role. */
1021         if (hdev->conn_hash.le_num_slave > 0) {
1022                 /* Slave connection state and non connectable mode bit 20. */
1023                 if (!connectable && !(hdev->le_states[2] & 0x10))
1024                         return false;
1025
1026                 /* Slave connection state and connectable mode bit 38
1027                  * and scannable bit 21.
1028                  */
1029                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030                                     !(hdev->le_states[2] & 0x20)))
1031                         return false;
1032         }
1033
1034         /* Check le_states if there is any connection in master role. */
1035         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036                 /* Master connection state and non connectable mode bit 18. */
1037                 if (!connectable && !(hdev->le_states[2] & 0x02))
1038                         return false;
1039
1040                 /* Master connection state and connectable mode bit 35 and
1041                  * scannable 19.
1042                  */
1043                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044                                     !(hdev->le_states[2] & 0x08)))
1045                         return false;
1046         }
1047
1048         return true;
1049 }
1050
1051 void __hci_req_enable_advertising(struct hci_request *req)
1052 {
1053         struct hci_dev *hdev = req->hdev;
1054         struct hci_cp_le_set_adv_param cp;
1055         u8 own_addr_type, enable = 0x01;
1056         bool connectable;
1057         u32 flags;
1058
1059         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1060
1061         /* If the "connectable" instance flag was not set, then choose between
1062          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1063          */
1064         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1065                       mgmt_get_connectable(hdev);
1066
1067         if (!is_advertising_allowed(hdev, connectable))
1068                 return;
1069
1070         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1071                 __hci_req_disable_advertising(req);
1072
1073         /* Clear the HCI_LE_ADV bit temporarily so that the
1074          * hci_update_random_address knows that it's safe to go ahead
1075          * and write a new random address. The flag will be set back on
1076          * as soon as the SET_ADV_ENABLE HCI command completes.
1077          */
1078         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1079
1080         /* Set require_privacy to true only when non-connectable
1081          * advertising is used. In that case it is fine to use a
1082          * non-resolvable private address.
1083          */
1084         if (hci_update_random_address(req, !connectable,
1085                                       adv_use_rpa(hdev, flags),
1086                                       &own_addr_type) < 0)
1087                 return;
1088
1089         memset(&cp, 0, sizeof(cp));
1090         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092
1093         if (connectable)
1094                 cp.type = LE_ADV_IND;
1095         else if (get_cur_adv_instance_scan_rsp_len(hdev))
1096                 cp.type = LE_ADV_SCAN_IND;
1097         else
1098                 cp.type = LE_ADV_NONCONN_IND;
1099
1100         cp.own_address_type = own_addr_type;
1101         cp.channel_map = hdev->le_adv_channel_map;
1102
1103         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1104
1105         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1106 }
1107
1108 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1109 {
1110         size_t short_len;
1111         size_t complete_len;
1112
1113         /* no space left for name (+ NULL + type + len) */
1114         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1115                 return ad_len;
1116
1117         /* use complete name if present and fits */
1118         complete_len = strlen(hdev->dev_name);
1119         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1120                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1121                                        hdev->dev_name, complete_len + 1);
1122
1123         /* use short name if present */
1124         short_len = strlen(hdev->short_name);
1125         if (short_len)
1126                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1127                                        hdev->short_name, short_len + 1);
1128
1129         /* use shortened full name if present, we already know that name
1130          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1131          */
1132         if (complete_len) {
1133                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1134
1135                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1136                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1137
1138                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1139                                        sizeof(name));
1140         }
1141
1142         return ad_len;
1143 }
1144
1145 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1146 {
1147         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1148 }
1149
1150 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1151 {
1152         u8 scan_rsp_len = 0;
1153
1154         if (hdev->appearance) {
1155                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1156         }
1157
1158         return append_local_name(hdev, ptr, scan_rsp_len);
1159 }
1160
1161 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1162                                         u8 *ptr)
1163 {
1164         struct adv_info *adv_instance;
1165         u32 instance_flags;
1166         u8 scan_rsp_len = 0;
1167
1168         adv_instance = hci_find_adv_instance(hdev, instance);
1169         if (!adv_instance)
1170                 return 0;
1171
1172         instance_flags = adv_instance->flags;
1173
1174         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1175                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1176         }
1177
1178         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1179                adv_instance->scan_rsp_len);
1180
1181         scan_rsp_len += adv_instance->scan_rsp_len;
1182
1183         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1184                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1185
1186         return scan_rsp_len;
1187 }
1188
1189 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1190 {
1191         struct hci_dev *hdev = req->hdev;
1192         u8 len;
1193
1194         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1195                 return;
1196
1197         if (ext_adv_capable(hdev)) {
1198                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1199
1200                 memset(&cp, 0, sizeof(cp));
1201
1202                 if (instance)
1203                         len = create_instance_scan_rsp_data(hdev, instance,
1204                                                             cp.data);
1205                 else
1206                         len = create_default_scan_rsp_data(hdev, cp.data);
1207
1208                 if (hdev->scan_rsp_data_len == len &&
1209                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1210                         return;
1211
1212                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1213                 hdev->scan_rsp_data_len = len;
1214
1215                 cp.handle = 0;
1216                 cp.length = len;
1217                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1218                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1219
1220                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1221                             &cp);
1222         } else {
1223                 struct hci_cp_le_set_scan_rsp_data cp;
1224
1225                 memset(&cp, 0, sizeof(cp));
1226
1227                 if (instance)
1228                         len = create_instance_scan_rsp_data(hdev, instance,
1229                                                             cp.data);
1230                 else
1231                         len = create_default_scan_rsp_data(hdev, cp.data);
1232
1233                 if (hdev->scan_rsp_data_len == len &&
1234                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1235                         return;
1236
1237                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1238                 hdev->scan_rsp_data_len = len;
1239
1240                 cp.length = len;
1241
1242                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1243         }
1244 }
1245
1246 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1247 {
1248         struct adv_info *adv_instance = NULL;
1249         u8 ad_len = 0, flags = 0;
1250         u32 instance_flags;
1251
1252         /* Return 0 when the current instance identifier is invalid. */
1253         if (instance) {
1254                 adv_instance = hci_find_adv_instance(hdev, instance);
1255                 if (!adv_instance)
1256                         return 0;
1257         }
1258
1259         instance_flags = get_adv_instance_flags(hdev, instance);
1260
1261         /* The Add Advertising command allows userspace to set both the general
1262          * and limited discoverable flags.
1263          */
1264         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1265                 flags |= LE_AD_GENERAL;
1266
1267         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1268                 flags |= LE_AD_LIMITED;
1269
1270         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1271                 flags |= LE_AD_NO_BREDR;
1272
1273         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1274                 /* If a discovery flag wasn't provided, simply use the global
1275                  * settings.
1276                  */
1277                 if (!flags)
1278                         flags |= mgmt_get_adv_discov_flags(hdev);
1279
1280                 /* If flags would still be empty, then there is no need to
1281                  * include the "Flags" AD field".
1282                  */
1283                 if (flags) {
1284                         ptr[0] = 0x02;
1285                         ptr[1] = EIR_FLAGS;
1286                         ptr[2] = flags;
1287
1288                         ad_len += 3;
1289                         ptr += 3;
1290                 }
1291         }
1292
1293         if (adv_instance) {
1294                 memcpy(ptr, adv_instance->adv_data,
1295                        adv_instance->adv_data_len);
1296                 ad_len += adv_instance->adv_data_len;
1297                 ptr += adv_instance->adv_data_len;
1298         }
1299
1300         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1301                 s8 adv_tx_power;
1302
1303                 if (ext_adv_capable(hdev)) {
1304                         if (adv_instance)
1305                                 adv_tx_power = adv_instance->tx_power;
1306                         else
1307                                 adv_tx_power = hdev->adv_tx_power;
1308                 } else {
1309                         adv_tx_power = hdev->adv_tx_power;
1310                 }
1311
1312                 /* Provide Tx Power only if we can provide a valid value for it */
1313                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1314                         ptr[0] = 0x02;
1315                         ptr[1] = EIR_TX_POWER;
1316                         ptr[2] = (u8)adv_tx_power;
1317
1318                         ad_len += 3;
1319                         ptr += 3;
1320                 }
1321         }
1322
1323         return ad_len;
1324 }
1325
1326 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1327 {
1328         struct hci_dev *hdev = req->hdev;
1329         u8 len;
1330
1331         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1332                 return;
1333
1334         if (ext_adv_capable(hdev)) {
1335                 struct hci_cp_le_set_ext_adv_data cp;
1336
1337                 memset(&cp, 0, sizeof(cp));
1338
1339                 len = create_instance_adv_data(hdev, instance, cp.data);
1340
1341                 /* There's nothing to do if the data hasn't changed */
1342                 if (hdev->adv_data_len == len &&
1343                     memcmp(cp.data, hdev->adv_data, len) == 0)
1344                         return;
1345
1346                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1347                 hdev->adv_data_len = len;
1348
1349                 cp.length = len;
1350                 cp.handle = 0;
1351                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1352                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1353
1354                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1355         } else {
1356                 struct hci_cp_le_set_adv_data cp;
1357
1358                 memset(&cp, 0, sizeof(cp));
1359
1360                 len = create_instance_adv_data(hdev, instance, cp.data);
1361
1362                 /* There's nothing to do if the data hasn't changed */
1363                 if (hdev->adv_data_len == len &&
1364                     memcmp(cp.data, hdev->adv_data, len) == 0)
1365                         return;
1366
1367                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1368                 hdev->adv_data_len = len;
1369
1370                 cp.length = len;
1371
1372                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1373         }
1374 }
1375
1376 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1377 {
1378         struct hci_request req;
1379
1380         hci_req_init(&req, hdev);
1381         __hci_req_update_adv_data(&req, instance);
1382
1383         return hci_req_run(&req, NULL);
1384 }
1385
1386 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1387 {
1388         BT_DBG("%s status %u", hdev->name, status);
1389 }
1390
1391 void hci_req_reenable_advertising(struct hci_dev *hdev)
1392 {
1393         struct hci_request req;
1394
1395         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1396             list_empty(&hdev->adv_instances))
1397                 return;
1398
1399         hci_req_init(&req, hdev);
1400
1401         if (hdev->cur_adv_instance) {
1402                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1403                                                 true);
1404         } else {
1405                 if (ext_adv_capable(hdev)) {
1406                         __hci_req_start_ext_adv(&req, 0x00);
1407                 } else {
1408                         __hci_req_update_adv_data(&req, 0x00);
1409                         __hci_req_update_scan_rsp_data(&req, 0x00);
1410                         __hci_req_enable_advertising(&req);
1411                 }
1412         }
1413
1414         hci_req_run(&req, adv_enable_complete);
1415 }
1416
1417 static void adv_timeout_expire(struct work_struct *work)
1418 {
1419         struct hci_dev *hdev = container_of(work, struct hci_dev,
1420                                             adv_instance_expire.work);
1421
1422         struct hci_request req;
1423         u8 instance;
1424
1425         BT_DBG("%s", hdev->name);
1426
1427         hci_dev_lock(hdev);
1428
1429         hdev->adv_instance_timeout = 0;
1430
1431         instance = hdev->cur_adv_instance;
1432         if (instance == 0x00)
1433                 goto unlock;
1434
1435         hci_req_init(&req, hdev);
1436
1437         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1438
1439         if (list_empty(&hdev->adv_instances))
1440                 __hci_req_disable_advertising(&req);
1441
1442         hci_req_run(&req, NULL);
1443
1444 unlock:
1445         hci_dev_unlock(hdev);
1446 }
1447
1448 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1449                            bool use_rpa, struct adv_info *adv_instance,
1450                            u8 *own_addr_type, bdaddr_t *rand_addr)
1451 {
1452         int err;
1453
1454         bacpy(rand_addr, BDADDR_ANY);
1455
1456         /* If privacy is enabled use a resolvable private address. If
1457          * current RPA has expired then generate a new one.
1458          */
1459         if (use_rpa) {
1460                 int to;
1461
1462                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1463
1464                 if (adv_instance) {
1465                         if (!adv_instance->rpa_expired &&
1466                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
1467                                 return 0;
1468
1469                         adv_instance->rpa_expired = false;
1470                 } else {
1471                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1472                             !bacmp(&hdev->random_addr, &hdev->rpa))
1473                                 return 0;
1474                 }
1475
1476                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1477                 if (err < 0) {
1478                         BT_ERR("%s failed to generate new RPA", hdev->name);
1479                         return err;
1480                 }
1481
1482                 bacpy(rand_addr, &hdev->rpa);
1483
1484                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1485                 if (adv_instance)
1486                         queue_delayed_work(hdev->workqueue,
1487                                            &adv_instance->rpa_expired_cb, to);
1488                 else
1489                         queue_delayed_work(hdev->workqueue,
1490                                            &hdev->rpa_expired, to);
1491
1492                 return 0;
1493         }
1494
1495         /* In case of required privacy without resolvable private address,
1496          * use an non-resolvable private address. This is useful for
1497          * non-connectable advertising.
1498          */
1499         if (require_privacy) {
1500                 bdaddr_t nrpa;
1501
1502                 while (true) {
1503                         /* The non-resolvable private address is generated
1504                          * from random six bytes with the two most significant
1505                          * bits cleared.
1506                          */
1507                         get_random_bytes(&nrpa, 6);
1508                         nrpa.b[5] &= 0x3f;
1509
1510                         /* The non-resolvable private address shall not be
1511                          * equal to the public address.
1512                          */
1513                         if (bacmp(&hdev->bdaddr, &nrpa))
1514                                 break;
1515                 }
1516
1517                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1518                 bacpy(rand_addr, &nrpa);
1519
1520                 return 0;
1521         }
1522
1523         /* No privacy so use a public address. */
1524         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1525
1526         return 0;
1527 }
1528
1529 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1530 {
1531         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1532 }
1533
1534 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1535 {
1536         struct hci_cp_le_set_ext_adv_params cp;
1537         struct hci_dev *hdev = req->hdev;
1538         bool connectable;
1539         u32 flags;
1540         bdaddr_t random_addr;
1541         u8 own_addr_type;
1542         int err;
1543         struct adv_info *adv_instance;
1544         bool secondary_adv;
1545         /* In ext adv set param interval is 3 octets */
1546         const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1547
1548         if (instance > 0) {
1549                 adv_instance = hci_find_adv_instance(hdev, instance);
1550                 if (!adv_instance)
1551                         return -EINVAL;
1552         } else {
1553                 adv_instance = NULL;
1554         }
1555
1556         flags = get_adv_instance_flags(hdev, instance);
1557
1558         /* If the "connectable" instance flag was not set, then choose between
1559          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1560          */
1561         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1562                       mgmt_get_connectable(hdev);
1563
1564         if (!is_advertising_allowed(hdev, connectable))
1565                 return -EPERM;
1566
1567         /* Set require_privacy to true only when non-connectable
1568          * advertising is used. In that case it is fine to use a
1569          * non-resolvable private address.
1570          */
1571         err = hci_get_random_address(hdev, !connectable,
1572                                      adv_use_rpa(hdev, flags), adv_instance,
1573                                      &own_addr_type, &random_addr);
1574         if (err < 0)
1575                 return err;
1576
1577         memset(&cp, 0, sizeof(cp));
1578
1579         memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1580         memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1581
1582         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1583
1584         if (connectable) {
1585                 if (secondary_adv)
1586                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1587                 else
1588                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1589         } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1590                 if (secondary_adv)
1591                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1592                 else
1593                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1594         } else {
1595                 if (secondary_adv)
1596                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1597                 else
1598                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1599         }
1600
1601         cp.own_addr_type = own_addr_type;
1602         cp.channel_map = hdev->le_adv_channel_map;
1603         cp.tx_power = 127;
1604         cp.handle = instance;
1605
1606         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1607                 cp.primary_phy = HCI_ADV_PHY_1M;
1608                 cp.secondary_phy = HCI_ADV_PHY_2M;
1609         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1610                 cp.primary_phy = HCI_ADV_PHY_CODED;
1611                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1612         } else {
1613                 /* In all other cases use 1M */
1614                 cp.primary_phy = HCI_ADV_PHY_1M;
1615                 cp.secondary_phy = HCI_ADV_PHY_1M;
1616         }
1617
1618         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1619
1620         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1621             bacmp(&random_addr, BDADDR_ANY)) {
1622                 struct hci_cp_le_set_adv_set_rand_addr cp;
1623
1624                 /* Check if random address need to be updated */
1625                 if (adv_instance) {
1626                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1627                                 return 0;
1628                 } else {
1629                         if (!bacmp(&random_addr, &hdev->random_addr))
1630                                 return 0;
1631                 }
1632
1633                 memset(&cp, 0, sizeof(cp));
1634
1635                 cp.handle = 0;
1636                 bacpy(&cp.bdaddr, &random_addr);
1637
1638                 hci_req_add(req,
1639                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1640                             sizeof(cp), &cp);
1641         }
1642
1643         return 0;
1644 }
1645
1646 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1647 {
1648         struct hci_dev *hdev = req->hdev;
1649         struct hci_cp_le_set_ext_adv_enable *cp;
1650         struct hci_cp_ext_adv_set *adv_set;
1651         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1652         struct adv_info *adv_instance;
1653
1654         if (instance > 0) {
1655                 adv_instance = hci_find_adv_instance(hdev, instance);
1656                 if (!adv_instance)
1657                         return -EINVAL;
1658         } else {
1659                 adv_instance = NULL;
1660         }
1661
1662         cp = (void *) data;
1663         adv_set = (void *) cp->data;
1664
1665         memset(cp, 0, sizeof(*cp));
1666
1667         cp->enable = 0x01;
1668         cp->num_of_sets = 0x01;
1669
1670         memset(adv_set, 0, sizeof(*adv_set));
1671
1672         adv_set->handle = instance;
1673
1674         /* Set duration per instance since controller is responsible for
1675          * scheduling it.
1676          */
1677         if (adv_instance && adv_instance->duration) {
1678                 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1679
1680                 /* Time = N * 10 ms */
1681                 adv_set->duration = cpu_to_le16(duration / 10);
1682         }
1683
1684         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1685                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1686                     data);
1687
1688         return 0;
1689 }
1690
1691 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1692 {
1693         struct hci_dev *hdev = req->hdev;
1694         int err;
1695
1696         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1697                 __hci_req_disable_advertising(req);
1698
1699         err = __hci_req_setup_ext_adv_instance(req, instance);
1700         if (err < 0)
1701                 return err;
1702
1703         __hci_req_update_scan_rsp_data(req, instance);
1704         __hci_req_enable_ext_advertising(req, instance);
1705
1706         return 0;
1707 }
1708
1709 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1710                                     bool force)
1711 {
1712         struct hci_dev *hdev = req->hdev;
1713         struct adv_info *adv_instance = NULL;
1714         u16 timeout;
1715
1716         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1717             list_empty(&hdev->adv_instances))
1718                 return -EPERM;
1719
1720         if (hdev->adv_instance_timeout)
1721                 return -EBUSY;
1722
1723         adv_instance = hci_find_adv_instance(hdev, instance);
1724         if (!adv_instance)
1725                 return -ENOENT;
1726
1727         /* A zero timeout means unlimited advertising. As long as there is
1728          * only one instance, duration should be ignored. We still set a timeout
1729          * in case further instances are being added later on.
1730          *
1731          * If the remaining lifetime of the instance is more than the duration
1732          * then the timeout corresponds to the duration, otherwise it will be
1733          * reduced to the remaining instance lifetime.
1734          */
1735         if (adv_instance->timeout == 0 ||
1736             adv_instance->duration <= adv_instance->remaining_time)
1737                 timeout = adv_instance->duration;
1738         else
1739                 timeout = adv_instance->remaining_time;
1740
1741         /* The remaining time is being reduced unless the instance is being
1742          * advertised without time limit.
1743          */
1744         if (adv_instance->timeout)
1745                 adv_instance->remaining_time =
1746                                 adv_instance->remaining_time - timeout;
1747
1748         /* Only use work for scheduling instances with legacy advertising */
1749         if (!ext_adv_capable(hdev)) {
1750                 hdev->adv_instance_timeout = timeout;
1751                 queue_delayed_work(hdev->req_workqueue,
1752                            &hdev->adv_instance_expire,
1753                            msecs_to_jiffies(timeout * 1000));
1754         }
1755
1756         /* If we're just re-scheduling the same instance again then do not
1757          * execute any HCI commands. This happens when a single instance is
1758          * being advertised.
1759          */
1760         if (!force && hdev->cur_adv_instance == instance &&
1761             hci_dev_test_flag(hdev, HCI_LE_ADV))
1762                 return 0;
1763
1764         hdev->cur_adv_instance = instance;
1765         if (ext_adv_capable(hdev)) {
1766                 __hci_req_start_ext_adv(req, instance);
1767         } else {
1768                 __hci_req_update_adv_data(req, instance);
1769                 __hci_req_update_scan_rsp_data(req, instance);
1770                 __hci_req_enable_advertising(req);
1771         }
1772
1773         return 0;
1774 }
1775
1776 static void cancel_adv_timeout(struct hci_dev *hdev)
1777 {
1778         if (hdev->adv_instance_timeout) {
1779                 hdev->adv_instance_timeout = 0;
1780                 cancel_delayed_work(&hdev->adv_instance_expire);
1781         }
1782 }
1783
1784 /* For a single instance:
1785  * - force == true: The instance will be removed even when its remaining
1786  *   lifetime is not zero.
1787  * - force == false: the instance will be deactivated but kept stored unless
1788  *   the remaining lifetime is zero.
1789  *
1790  * For instance == 0x00:
1791  * - force == true: All instances will be removed regardless of their timeout
1792  *   setting.
1793  * - force == false: Only instances that have a timeout will be removed.
1794  */
1795 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1796                                 struct hci_request *req, u8 instance,
1797                                 bool force)
1798 {
1799         struct adv_info *adv_instance, *n, *next_instance = NULL;
1800         int err;
1801         u8 rem_inst;
1802
1803         /* Cancel any timeout concerning the removed instance(s). */
1804         if (!instance || hdev->cur_adv_instance == instance)
1805                 cancel_adv_timeout(hdev);
1806
1807         /* Get the next instance to advertise BEFORE we remove
1808          * the current one. This can be the same instance again
1809          * if there is only one instance.
1810          */
1811         if (instance && hdev->cur_adv_instance == instance)
1812                 next_instance = hci_get_next_instance(hdev, instance);
1813
1814         if (instance == 0x00) {
1815                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1816                                          list) {
1817                         if (!(force || adv_instance->timeout))
1818                                 continue;
1819
1820                         rem_inst = adv_instance->instance;
1821                         err = hci_remove_adv_instance(hdev, rem_inst);
1822                         if (!err)
1823                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1824                 }
1825         } else {
1826                 adv_instance = hci_find_adv_instance(hdev, instance);
1827
1828                 if (force || (adv_instance && adv_instance->timeout &&
1829                               !adv_instance->remaining_time)) {
1830                         /* Don't advertise a removed instance. */
1831                         if (next_instance &&
1832                             next_instance->instance == instance)
1833                                 next_instance = NULL;
1834
1835                         err = hci_remove_adv_instance(hdev, instance);
1836                         if (!err)
1837                                 mgmt_advertising_removed(sk, hdev, instance);
1838                 }
1839         }
1840
1841         if (!req || !hdev_is_powered(hdev) ||
1842             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1843                 return;
1844
1845         if (next_instance)
1846                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1847                                                 false);
1848 }
1849
1850 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1851 {
1852         struct hci_dev *hdev = req->hdev;
1853
1854         /* If we're advertising or initiating an LE connection we can't
1855          * go ahead and change the random address at this time. This is
1856          * because the eventual initiator address used for the
1857          * subsequently created connection will be undefined (some
1858          * controllers use the new address and others the one we had
1859          * when the operation started).
1860          *
1861          * In this kind of scenario skip the update and let the random
1862          * address be updated at the next cycle.
1863          */
1864         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1865             hci_lookup_le_connect(hdev)) {
1866                 BT_DBG("Deferring random address update");
1867                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1868                 return;
1869         }
1870
1871         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1872 }
1873
1874 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1875                               bool use_rpa, u8 *own_addr_type)
1876 {
1877         struct hci_dev *hdev = req->hdev;
1878         int err;
1879
1880         /* If privacy is enabled use a resolvable private address. If
1881          * current RPA has expired or there is something else than
1882          * the current RPA in use, then generate a new one.
1883          */
1884         if (use_rpa) {
1885                 int to;
1886
1887                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1888
1889                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1890                     !bacmp(&hdev->random_addr, &hdev->rpa))
1891                         return 0;
1892
1893                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1894                 if (err < 0) {
1895                         bt_dev_err(hdev, "failed to generate new RPA");
1896                         return err;
1897                 }
1898
1899                 set_random_addr(req, &hdev->rpa);
1900
1901                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1902                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1903
1904                 return 0;
1905         }
1906
1907         /* In case of required privacy without resolvable private address,
1908          * use an non-resolvable private address. This is useful for active
1909          * scanning and non-connectable advertising.
1910          */
1911         if (require_privacy) {
1912                 bdaddr_t nrpa;
1913
1914                 while (true) {
1915                         /* The non-resolvable private address is generated
1916                          * from random six bytes with the two most significant
1917                          * bits cleared.
1918                          */
1919                         get_random_bytes(&nrpa, 6);
1920                         nrpa.b[5] &= 0x3f;
1921
1922                         /* The non-resolvable private address shall not be
1923                          * equal to the public address.
1924                          */
1925                         if (bacmp(&hdev->bdaddr, &nrpa))
1926                                 break;
1927                 }
1928
1929                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1930                 set_random_addr(req, &nrpa);
1931                 return 0;
1932         }
1933
1934         /* If forcing static address is in use or there is no public
1935          * address use the static address as random address (but skip
1936          * the HCI command if the current random address is already the
1937          * static one.
1938          *
1939          * In case BR/EDR has been disabled on a dual-mode controller
1940          * and a static address has been configured, then use that
1941          * address instead of the public BR/EDR address.
1942          */
1943         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1944             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1945             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1946              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1947                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1948                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1949                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1950                                     &hdev->static_addr);
1951                 return 0;
1952         }
1953
1954         /* Neither privacy nor static address is being used so use a
1955          * public address.
1956          */
1957         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1958
1959         return 0;
1960 }
1961
1962 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1963 {
1964         struct bdaddr_list *b;
1965
1966         list_for_each_entry(b, &hdev->whitelist, list) {
1967                 struct hci_conn *conn;
1968
1969                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1970                 if (!conn)
1971                         return true;
1972
1973                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1974                         return true;
1975         }
1976
1977         return false;
1978 }
1979
1980 void __hci_req_update_scan(struct hci_request *req)
1981 {
1982         struct hci_dev *hdev = req->hdev;
1983         u8 scan;
1984
1985         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1986                 return;
1987
1988         if (!hdev_is_powered(hdev))
1989                 return;
1990
1991         if (mgmt_powering_down(hdev))
1992                 return;
1993
1994         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1995             disconnected_whitelist_entries(hdev))
1996                 scan = SCAN_PAGE;
1997         else
1998                 scan = SCAN_DISABLED;
1999
2000         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2001                 scan |= SCAN_INQUIRY;
2002
2003         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2004             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2005                 return;
2006
2007         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2008 }
2009
2010 static int update_scan(struct hci_request *req, unsigned long opt)
2011 {
2012         hci_dev_lock(req->hdev);
2013         __hci_req_update_scan(req);
2014         hci_dev_unlock(req->hdev);
2015         return 0;
2016 }
2017
2018 static void scan_update_work(struct work_struct *work)
2019 {
2020         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2021
2022         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2023 }
2024
2025 static int connectable_update(struct hci_request *req, unsigned long opt)
2026 {
2027         struct hci_dev *hdev = req->hdev;
2028
2029         hci_dev_lock(hdev);
2030
2031         __hci_req_update_scan(req);
2032
2033         /* If BR/EDR is not enabled and we disable advertising as a
2034          * by-product of disabling connectable, we need to update the
2035          * advertising flags.
2036          */
2037         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2038                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2039
2040         /* Update the advertising parameters if necessary */
2041         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2042             !list_empty(&hdev->adv_instances)) {
2043                 if (ext_adv_capable(hdev))
2044                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2045                 else
2046                         __hci_req_enable_advertising(req);
2047         }
2048
2049         __hci_update_background_scan(req);
2050
2051         hci_dev_unlock(hdev);
2052
2053         return 0;
2054 }
2055
2056 static void connectable_update_work(struct work_struct *work)
2057 {
2058         struct hci_dev *hdev = container_of(work, struct hci_dev,
2059                                             connectable_update);
2060         u8 status;
2061
2062         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2063         mgmt_set_connectable_complete(hdev, status);
2064 }
2065
2066 static u8 get_service_classes(struct hci_dev *hdev)
2067 {
2068         struct bt_uuid *uuid;
2069         u8 val = 0;
2070
2071         list_for_each_entry(uuid, &hdev->uuids, list)
2072                 val |= uuid->svc_hint;
2073
2074         return val;
2075 }
2076
2077 void __hci_req_update_class(struct hci_request *req)
2078 {
2079         struct hci_dev *hdev = req->hdev;
2080         u8 cod[3];
2081
2082         BT_DBG("%s", hdev->name);
2083
2084         if (!hdev_is_powered(hdev))
2085                 return;
2086
2087         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2088                 return;
2089
2090         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2091                 return;
2092
2093         cod[0] = hdev->minor_class;
2094         cod[1] = hdev->major_class;
2095         cod[2] = get_service_classes(hdev);
2096
2097         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2098                 cod[1] |= 0x20;
2099
2100         if (memcmp(cod, hdev->dev_class, 3) == 0)
2101                 return;
2102
2103         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2104 }
2105
2106 static void write_iac(struct hci_request *req)
2107 {
2108         struct hci_dev *hdev = req->hdev;
2109         struct hci_cp_write_current_iac_lap cp;
2110
2111         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2112                 return;
2113
2114         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2115                 /* Limited discoverable mode */
2116                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2117                 cp.iac_lap[0] = 0x00;   /* LIAC */
2118                 cp.iac_lap[1] = 0x8b;
2119                 cp.iac_lap[2] = 0x9e;
2120                 cp.iac_lap[3] = 0x33;   /* GIAC */
2121                 cp.iac_lap[4] = 0x8b;
2122                 cp.iac_lap[5] = 0x9e;
2123         } else {
2124                 /* General discoverable mode */
2125                 cp.num_iac = 1;
2126                 cp.iac_lap[0] = 0x33;   /* GIAC */
2127                 cp.iac_lap[1] = 0x8b;
2128                 cp.iac_lap[2] = 0x9e;
2129         }
2130
2131         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2132                     (cp.num_iac * 3) + 1, &cp);
2133 }
2134
2135 static int discoverable_update(struct hci_request *req, unsigned long opt)
2136 {
2137         struct hci_dev *hdev = req->hdev;
2138
2139         hci_dev_lock(hdev);
2140
2141         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2142                 write_iac(req);
2143                 __hci_req_update_scan(req);
2144                 __hci_req_update_class(req);
2145         }
2146
2147         /* Advertising instances don't use the global discoverable setting, so
2148          * only update AD if advertising was enabled using Set Advertising.
2149          */
2150         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2151                 __hci_req_update_adv_data(req, 0x00);
2152
2153                 /* Discoverable mode affects the local advertising
2154                  * address in limited privacy mode.
2155                  */
2156                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2157                         if (ext_adv_capable(hdev))
2158                                 __hci_req_start_ext_adv(req, 0x00);
2159                         else
2160                                 __hci_req_enable_advertising(req);
2161                 }
2162         }
2163
2164         hci_dev_unlock(hdev);
2165
2166         return 0;
2167 }
2168
2169 static void discoverable_update_work(struct work_struct *work)
2170 {
2171         struct hci_dev *hdev = container_of(work, struct hci_dev,
2172                                             discoverable_update);
2173         u8 status;
2174
2175         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2176         mgmt_set_discoverable_complete(hdev, status);
2177 }
2178
2179 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2180                       u8 reason)
2181 {
2182         switch (conn->state) {
2183         case BT_CONNECTED:
2184         case BT_CONFIG:
2185                 if (conn->type == AMP_LINK) {
2186                         struct hci_cp_disconn_phy_link cp;
2187
2188                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2189                         cp.reason = reason;
2190                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2191                                     &cp);
2192                 } else {
2193                         struct hci_cp_disconnect dc;
2194
2195                         dc.handle = cpu_to_le16(conn->handle);
2196                         dc.reason = reason;
2197                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2198                 }
2199
2200                 conn->state = BT_DISCONN;
2201
2202                 break;
2203         case BT_CONNECT:
2204                 if (conn->type == LE_LINK) {
2205                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2206                                 break;
2207                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2208                                     0, NULL);
2209                 } else if (conn->type == ACL_LINK) {
2210                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2211                                 break;
2212                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2213                                     6, &conn->dst);
2214                 }
2215                 break;
2216         case BT_CONNECT2:
2217                 if (conn->type == ACL_LINK) {
2218                         struct hci_cp_reject_conn_req rej;
2219
2220                         bacpy(&rej.bdaddr, &conn->dst);
2221                         rej.reason = reason;
2222
2223                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2224                                     sizeof(rej), &rej);
2225                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2226                         struct hci_cp_reject_sync_conn_req rej;
2227
2228                         bacpy(&rej.bdaddr, &conn->dst);
2229
2230                         /* SCO rejection has its own limited set of
2231                          * allowed error values (0x0D-0x0F) which isn't
2232                          * compatible with most values passed to this
2233                          * function. To be safe hard-code one of the
2234                          * values that's suitable for SCO.
2235                          */
2236                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2237
2238                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2239                                     sizeof(rej), &rej);
2240                 }
2241                 break;
2242         default:
2243                 conn->state = BT_CLOSED;
2244                 break;
2245         }
2246 }
2247
2248 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2249 {
2250         if (status)
2251                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2252 }
2253
2254 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2255 {
2256         struct hci_request req;
2257         int err;
2258
2259         hci_req_init(&req, conn->hdev);
2260
2261         __hci_abort_conn(&req, conn, reason);
2262
2263         err = hci_req_run(&req, abort_conn_complete);
2264         if (err && err != -ENODATA) {
2265                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2266                 return err;
2267         }
2268
2269         return 0;
2270 }
2271
2272 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2273 {
2274         hci_dev_lock(req->hdev);
2275         __hci_update_background_scan(req);
2276         hci_dev_unlock(req->hdev);
2277         return 0;
2278 }
2279
2280 static void bg_scan_update(struct work_struct *work)
2281 {
2282         struct hci_dev *hdev = container_of(work, struct hci_dev,
2283                                             bg_scan_update);
2284         struct hci_conn *conn;
2285         u8 status;
2286         int err;
2287
2288         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2289         if (!err)
2290                 return;
2291
2292         hci_dev_lock(hdev);
2293
2294         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2295         if (conn)
2296                 hci_le_conn_failed(conn, status);
2297
2298         hci_dev_unlock(hdev);
2299 }
2300
2301 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2302 {
2303         hci_req_add_le_scan_disable(req);
2304         return 0;
2305 }
2306
2307 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2308 {
2309         u8 length = opt;
2310         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2311         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2312         struct hci_cp_inquiry cp;
2313
2314         BT_DBG("%s", req->hdev->name);
2315
2316         hci_dev_lock(req->hdev);
2317         hci_inquiry_cache_flush(req->hdev);
2318         hci_dev_unlock(req->hdev);
2319
2320         memset(&cp, 0, sizeof(cp));
2321
2322         if (req->hdev->discovery.limited)
2323                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2324         else
2325                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2326
2327         cp.length = length;
2328
2329         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2330
2331         return 0;
2332 }
2333
2334 static void le_scan_disable_work(struct work_struct *work)
2335 {
2336         struct hci_dev *hdev = container_of(work, struct hci_dev,
2337                                             le_scan_disable.work);
2338         u8 status;
2339
2340         BT_DBG("%s", hdev->name);
2341
2342         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2343                 return;
2344
2345         cancel_delayed_work(&hdev->le_scan_restart);
2346
2347         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2348         if (status) {
2349                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2350                            status);
2351                 return;
2352         }
2353
2354         hdev->discovery.scan_start = 0;
2355
2356         /* If we were running LE only scan, change discovery state. If
2357          * we were running both LE and BR/EDR inquiry simultaneously,
2358          * and BR/EDR inquiry is already finished, stop discovery,
2359          * otherwise BR/EDR inquiry will stop discovery when finished.
2360          * If we will resolve remote device name, do not change
2361          * discovery state.
2362          */
2363
2364         if (hdev->discovery.type == DISCOV_TYPE_LE)
2365                 goto discov_stopped;
2366
2367         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2368                 return;
2369
2370         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2371                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2372                     hdev->discovery.state != DISCOVERY_RESOLVING)
2373                         goto discov_stopped;
2374
2375                 return;
2376         }
2377
2378         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2379                      HCI_CMD_TIMEOUT, &status);
2380         if (status) {
2381                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2382                 goto discov_stopped;
2383         }
2384
2385         return;
2386
2387 discov_stopped:
2388         hci_dev_lock(hdev);
2389         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2390         hci_dev_unlock(hdev);
2391 }
2392
2393 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2394 {
2395         struct hci_dev *hdev = req->hdev;
2396
2397         /* If controller is not scanning we are done. */
2398         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2399                 return 0;
2400
2401         hci_req_add_le_scan_disable(req);
2402
2403         if (use_ext_scan(hdev)) {
2404                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2405
2406                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2407                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2408                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2409
2410                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2411                             sizeof(ext_enable_cp), &ext_enable_cp);
2412         } else {
2413                 struct hci_cp_le_set_scan_enable cp;
2414
2415                 memset(&cp, 0, sizeof(cp));
2416                 cp.enable = LE_SCAN_ENABLE;
2417                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2418                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2419         }
2420
2421         return 0;
2422 }
2423
2424 static void le_scan_restart_work(struct work_struct *work)
2425 {
2426         struct hci_dev *hdev = container_of(work, struct hci_dev,
2427                                             le_scan_restart.work);
2428         unsigned long timeout, duration, scan_start, now;
2429         u8 status;
2430
2431         BT_DBG("%s", hdev->name);
2432
2433         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2434         if (status) {
2435                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2436                            status);
2437                 return;
2438         }
2439
2440         hci_dev_lock(hdev);
2441
2442         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2443             !hdev->discovery.scan_start)
2444                 goto unlock;
2445
2446         /* When the scan was started, hdev->le_scan_disable has been queued
2447          * after duration from scan_start. During scan restart this job
2448          * has been canceled, and we need to queue it again after proper
2449          * timeout, to make sure that scan does not run indefinitely.
2450          */
2451         duration = hdev->discovery.scan_duration;
2452         scan_start = hdev->discovery.scan_start;
2453         now = jiffies;
2454         if (now - scan_start <= duration) {
2455                 int elapsed;
2456
2457                 if (now >= scan_start)
2458                         elapsed = now - scan_start;
2459                 else
2460                         elapsed = ULONG_MAX - scan_start + now;
2461
2462                 timeout = duration - elapsed;
2463         } else {
2464                 timeout = 0;
2465         }
2466
2467         queue_delayed_work(hdev->req_workqueue,
2468                            &hdev->le_scan_disable, timeout);
2469
2470 unlock:
2471         hci_dev_unlock(hdev);
2472 }
2473
2474 static int active_scan(struct hci_request *req, unsigned long opt)
2475 {
2476         uint16_t interval = opt;
2477         struct hci_dev *hdev = req->hdev;
2478         u8 own_addr_type;
2479         int err;
2480
2481         BT_DBG("%s", hdev->name);
2482
2483         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2484                 hci_dev_lock(hdev);
2485
2486                 /* Don't let discovery abort an outgoing connection attempt
2487                  * that's using directed advertising.
2488                  */
2489                 if (hci_lookup_le_connect(hdev)) {
2490                         hci_dev_unlock(hdev);
2491                         return -EBUSY;
2492                 }
2493
2494                 cancel_adv_timeout(hdev);
2495                 hci_dev_unlock(hdev);
2496
2497                 __hci_req_disable_advertising(req);
2498         }
2499
2500         /* If controller is scanning, it means the background scanning is
2501          * running. Thus, we should temporarily stop it in order to set the
2502          * discovery scanning parameters.
2503          */
2504         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2505                 hci_req_add_le_scan_disable(req);
2506
2507         /* All active scans will be done with either a resolvable private
2508          * address (when privacy feature has been enabled) or non-resolvable
2509          * private address.
2510          */
2511         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2512                                         &own_addr_type);
2513         if (err < 0)
2514                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2515
2516         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2517                            own_addr_type, 0);
2518         return 0;
2519 }
2520
2521 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2522 {
2523         int err;
2524
2525         BT_DBG("%s", req->hdev->name);
2526
2527         err = active_scan(req, opt);
2528         if (err)
2529                 return err;
2530
2531         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2532 }
2533
2534 static void start_discovery(struct hci_dev *hdev, u8 *status)
2535 {
2536         unsigned long timeout;
2537
2538         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2539
2540         switch (hdev->discovery.type) {
2541         case DISCOV_TYPE_BREDR:
2542                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2543                         hci_req_sync(hdev, bredr_inquiry,
2544                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2545                                      status);
2546                 return;
2547         case DISCOV_TYPE_INTERLEAVED:
2548                 /* When running simultaneous discovery, the LE scanning time
2549                  * should occupy the whole discovery time sine BR/EDR inquiry
2550                  * and LE scanning are scheduled by the controller.
2551                  *
2552                  * For interleaving discovery in comparison, BR/EDR inquiry
2553                  * and LE scanning are done sequentially with separate
2554                  * timeouts.
2555                  */
2556                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2557                              &hdev->quirks)) {
2558                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2559                         /* During simultaneous discovery, we double LE scan
2560                          * interval. We must leave some time for the controller
2561                          * to do BR/EDR inquiry.
2562                          */
2563                         hci_req_sync(hdev, interleaved_discov,
2564                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2565                                      status);
2566                         break;
2567                 }
2568
2569                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2570                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2571                              HCI_CMD_TIMEOUT, status);
2572                 break;
2573         case DISCOV_TYPE_LE:
2574                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2575                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2576                              HCI_CMD_TIMEOUT, status);
2577                 break;
2578         default:
2579                 *status = HCI_ERROR_UNSPECIFIED;
2580                 return;
2581         }
2582
2583         if (*status)
2584                 return;
2585
2586         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2587
2588         /* When service discovery is used and the controller has a
2589          * strict duplicate filter, it is important to remember the
2590          * start and duration of the scan. This is required for
2591          * restarting scanning during the discovery phase.
2592          */
2593         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2594                      hdev->discovery.result_filtering) {
2595                 hdev->discovery.scan_start = jiffies;
2596                 hdev->discovery.scan_duration = timeout;
2597         }
2598
2599         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2600                            timeout);
2601 }
2602
2603 bool hci_req_stop_discovery(struct hci_request *req)
2604 {
2605         struct hci_dev *hdev = req->hdev;
2606         struct discovery_state *d = &hdev->discovery;
2607         struct hci_cp_remote_name_req_cancel cp;
2608         struct inquiry_entry *e;
2609         bool ret = false;
2610
2611         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2612
2613         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2614                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2615                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2616
2617                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2618                         cancel_delayed_work(&hdev->le_scan_disable);
2619                         hci_req_add_le_scan_disable(req);
2620                 }
2621
2622                 ret = true;
2623         } else {
2624                 /* Passive scanning */
2625                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2626                         hci_req_add_le_scan_disable(req);
2627                         ret = true;
2628                 }
2629         }
2630
2631         /* No further actions needed for LE-only discovery */
2632         if (d->type == DISCOV_TYPE_LE)
2633                 return ret;
2634
2635         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2636                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2637                                                      NAME_PENDING);
2638                 if (!e)
2639                         return ret;
2640
2641                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2642                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2643                             &cp);
2644                 ret = true;
2645         }
2646
2647         return ret;
2648 }
2649
2650 static int stop_discovery(struct hci_request *req, unsigned long opt)
2651 {
2652         hci_dev_lock(req->hdev);
2653         hci_req_stop_discovery(req);
2654         hci_dev_unlock(req->hdev);
2655
2656         return 0;
2657 }
2658
2659 static void discov_update(struct work_struct *work)
2660 {
2661         struct hci_dev *hdev = container_of(work, struct hci_dev,
2662                                             discov_update);
2663         u8 status = 0;
2664
2665         switch (hdev->discovery.state) {
2666         case DISCOVERY_STARTING:
2667                 start_discovery(hdev, &status);
2668                 mgmt_start_discovery_complete(hdev, status);
2669                 if (status)
2670                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2671                 else
2672                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2673                 break;
2674         case DISCOVERY_STOPPING:
2675                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2676                 mgmt_stop_discovery_complete(hdev, status);
2677                 if (!status)
2678                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2679                 break;
2680         case DISCOVERY_STOPPED:
2681         default:
2682                 return;
2683         }
2684 }
2685
2686 static void discov_off(struct work_struct *work)
2687 {
2688         struct hci_dev *hdev = container_of(work, struct hci_dev,
2689                                             discov_off.work);
2690
2691         BT_DBG("%s", hdev->name);
2692
2693         hci_dev_lock(hdev);
2694
2695         /* When discoverable timeout triggers, then just make sure
2696          * the limited discoverable flag is cleared. Even in the case
2697          * of a timeout triggered from general discoverable, it is
2698          * safe to unconditionally clear the flag.
2699          */
2700         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2701         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2702         hdev->discov_timeout = 0;
2703
2704         hci_dev_unlock(hdev);
2705
2706         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2707         mgmt_new_settings(hdev);
2708 }
2709
2710 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2711 {
2712         struct hci_dev *hdev = req->hdev;
2713         u8 link_sec;
2714
2715         hci_dev_lock(hdev);
2716
2717         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2718             !lmp_host_ssp_capable(hdev)) {
2719                 u8 mode = 0x01;
2720
2721                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2722
2723                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2724                         u8 support = 0x01;
2725
2726                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2727                                     sizeof(support), &support);
2728                 }
2729         }
2730
2731         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2732             lmp_bredr_capable(hdev)) {
2733                 struct hci_cp_write_le_host_supported cp;
2734
2735                 cp.le = 0x01;
2736                 cp.simul = 0x00;
2737
2738                 /* Check first if we already have the right
2739                  * host state (host features set)
2740                  */
2741                 if (cp.le != lmp_host_le_capable(hdev) ||
2742                     cp.simul != lmp_host_le_br_capable(hdev))
2743                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2744                                     sizeof(cp), &cp);
2745         }
2746
2747         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2748                 /* Make sure the controller has a good default for
2749                  * advertising data. This also applies to the case
2750                  * where BR/EDR was toggled during the AUTO_OFF phase.
2751                  */
2752                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2753                     list_empty(&hdev->adv_instances)) {
2754                         int err;
2755
2756                         if (ext_adv_capable(hdev)) {
2757                                 err = __hci_req_setup_ext_adv_instance(req,
2758                                                                        0x00);
2759                                 if (!err)
2760                                         __hci_req_update_scan_rsp_data(req,
2761                                                                        0x00);
2762                         } else {
2763                                 err = 0;
2764                                 __hci_req_update_adv_data(req, 0x00);
2765                                 __hci_req_update_scan_rsp_data(req, 0x00);
2766                         }
2767
2768                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2769                                 if (!ext_adv_capable(hdev))
2770                                         __hci_req_enable_advertising(req);
2771                                 else if (!err)
2772                                         __hci_req_enable_ext_advertising(req,
2773                                                                          0x00);
2774                         }
2775                 } else if (!list_empty(&hdev->adv_instances)) {
2776                         struct adv_info *adv_instance;
2777
2778                         adv_instance = list_first_entry(&hdev->adv_instances,
2779                                                         struct adv_info, list);
2780                         __hci_req_schedule_adv_instance(req,
2781                                                         adv_instance->instance,
2782                                                         true);
2783                 }
2784         }
2785
2786         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2787         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2788                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2789                             sizeof(link_sec), &link_sec);
2790
2791         if (lmp_bredr_capable(hdev)) {
2792                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2793                         __hci_req_write_fast_connectable(req, true);
2794                 else
2795                         __hci_req_write_fast_connectable(req, false);
2796                 __hci_req_update_scan(req);
2797                 __hci_req_update_class(req);
2798                 __hci_req_update_name(req);
2799                 __hci_req_update_eir(req);
2800         }
2801
2802         hci_dev_unlock(hdev);
2803         return 0;
2804 }
2805
2806 int __hci_req_hci_power_on(struct hci_dev *hdev)
2807 {
2808         /* Register the available SMP channels (BR/EDR and LE) only when
2809          * successfully powering on the controller. This late
2810          * registration is required so that LE SMP can clearly decide if
2811          * the public address or static address is used.
2812          */
2813         smp_register(hdev);
2814
2815         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2816                               NULL);
2817 }
2818
2819 void hci_request_setup(struct hci_dev *hdev)
2820 {
2821         INIT_WORK(&hdev->discov_update, discov_update);
2822         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2823         INIT_WORK(&hdev->scan_update, scan_update_work);
2824         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2825         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2826         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2827         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2828         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2829         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2830 }
2831
2832 void hci_request_cancel_all(struct hci_dev *hdev)
2833 {
2834         hci_req_sync_cancel(hdev, ENODEV);
2835
2836         cancel_work_sync(&hdev->discov_update);
2837         cancel_work_sync(&hdev->bg_scan_update);
2838         cancel_work_sync(&hdev->scan_update);
2839         cancel_work_sync(&hdev->connectable_update);
2840         cancel_work_sync(&hdev->discoverable_update);
2841         cancel_delayed_work_sync(&hdev->discov_off);
2842         cancel_delayed_work_sync(&hdev->le_scan_disable);
2843         cancel_delayed_work_sync(&hdev->le_scan_restart);
2844
2845         if (hdev->adv_instance_timeout) {
2846                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2847                 hdev->adv_instance_timeout = 0;
2848         }
2849 }