These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI debugfs entries ---- */
69
70 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71                              size_t count, loff_t *ppos)
72 {
73         struct hci_dev *hdev = file->private_data;
74         char buf[3];
75
76         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
77         buf[1] = '\n';
78         buf[2] = '\0';
79         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80 }
81
82 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83                               size_t count, loff_t *ppos)
84 {
85         struct hci_dev *hdev = file->private_data;
86         struct sk_buff *skb;
87         char buf[32];
88         size_t buf_size = min(count, (sizeof(buf)-1));
89         bool enable;
90
91         if (!test_bit(HCI_UP, &hdev->flags))
92                 return -ENETDOWN;
93
94         if (copy_from_user(buf, user_buf, buf_size))
95                 return -EFAULT;
96
97         buf[buf_size] = '\0';
98         if (strtobool(buf, &enable))
99                 return -EINVAL;
100
101         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
102                 return -EALREADY;
103
104         hci_req_lock(hdev);
105         if (enable)
106                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         else
109                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110                                      HCI_CMD_TIMEOUT);
111         hci_req_unlock(hdev);
112
113         if (IS_ERR(skb))
114                 return PTR_ERR(skb);
115
116         kfree_skb(skb);
117
118         hci_dev_change_flag(hdev, HCI_DUT_MODE);
119
120         return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124         .open           = simple_open,
125         .read           = dut_mode_read,
126         .write          = dut_mode_write,
127         .llseek         = default_llseek,
128 };
129
130 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131                                 size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         char buf[3];
135
136         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137         buf[1] = '\n';
138         buf[2] = '\0';
139         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140 }
141
142 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143                                  size_t count, loff_t *ppos)
144 {
145         struct hci_dev *hdev = file->private_data;
146         char buf[32];
147         size_t buf_size = min(count, (sizeof(buf)-1));
148         bool enable;
149         int err;
150
151         if (copy_from_user(buf, user_buf, buf_size))
152                 return -EFAULT;
153
154         buf[buf_size] = '\0';
155         if (strtobool(buf, &enable))
156                 return -EINVAL;
157
158         /* When the diagnostic flags are not persistent and the transport
159          * is not active, then there is no need for the vendor callback.
160          *
161          * Instead just store the desired value. If needed the setting
162          * will be programmed when the controller gets powered on.
163          */
164         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165             !test_bit(HCI_RUNNING, &hdev->flags))
166                 goto done;
167
168         hci_req_lock(hdev);
169         err = hdev->set_diag(hdev, enable);
170         hci_req_unlock(hdev);
171
172         if (err < 0)
173                 return err;
174
175 done:
176         if (enable)
177                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178         else
179                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181         return count;
182 }
183
184 static const struct file_operations vendor_diag_fops = {
185         .open           = simple_open,
186         .read           = vendor_diag_read,
187         .write          = vendor_diag_write,
188         .llseek         = default_llseek,
189 };
190
191 static void hci_debugfs_create_basic(struct hci_dev *hdev)
192 {
193         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194                             &dut_mode_fops);
195
196         if (hdev->set_diag)
197                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198                                     &vendor_diag_fops);
199 }
200
201 /* ---- HCI requests ---- */
202
203 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204                                   struct sk_buff *skb)
205 {
206         BT_DBG("%s result 0x%2.2x", hdev->name, result);
207
208         if (hdev->req_status == HCI_REQ_PEND) {
209                 hdev->req_result = result;
210                 hdev->req_status = HCI_REQ_DONE;
211                 if (skb)
212                         hdev->req_skb = skb_get(skb);
213                 wake_up_interruptible(&hdev->req_wait_q);
214         }
215 }
216
217 static void hci_req_cancel(struct hci_dev *hdev, int err)
218 {
219         BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221         if (hdev->req_status == HCI_REQ_PEND) {
222                 hdev->req_result = err;
223                 hdev->req_status = HCI_REQ_CANCELED;
224                 wake_up_interruptible(&hdev->req_wait_q);
225         }
226 }
227
228 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
229                                   const void *param, u8 event, u32 timeout)
230 {
231         DECLARE_WAITQUEUE(wait, current);
232         struct hci_request req;
233         struct sk_buff *skb;
234         int err = 0;
235
236         BT_DBG("%s", hdev->name);
237
238         hci_req_init(&req, hdev);
239
240         hci_req_add_ev(&req, opcode, plen, param, event);
241
242         hdev->req_status = HCI_REQ_PEND;
243
244         add_wait_queue(&hdev->req_wait_q, &wait);
245         set_current_state(TASK_INTERRUPTIBLE);
246
247         err = hci_req_run_skb(&req, hci_req_sync_complete);
248         if (err < 0) {
249                 remove_wait_queue(&hdev->req_wait_q, &wait);
250                 set_current_state(TASK_RUNNING);
251                 return ERR_PTR(err);
252         }
253
254         schedule_timeout(timeout);
255
256         remove_wait_queue(&hdev->req_wait_q, &wait);
257
258         if (signal_pending(current))
259                 return ERR_PTR(-EINTR);
260
261         switch (hdev->req_status) {
262         case HCI_REQ_DONE:
263                 err = -bt_to_errno(hdev->req_result);
264                 break;
265
266         case HCI_REQ_CANCELED:
267                 err = -hdev->req_result;
268                 break;
269
270         default:
271                 err = -ETIMEDOUT;
272                 break;
273         }
274
275         hdev->req_status = hdev->req_result = 0;
276         skb = hdev->req_skb;
277         hdev->req_skb = NULL;
278
279         BT_DBG("%s end: err %d", hdev->name, err);
280
281         if (err < 0) {
282                 kfree_skb(skb);
283                 return ERR_PTR(err);
284         }
285
286         if (!skb)
287                 return ERR_PTR(-ENODATA);
288
289         return skb;
290 }
291 EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
294                                const void *param, u32 timeout)
295 {
296         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
297 }
298 EXPORT_SYMBOL(__hci_cmd_sync);
299
300 /* Execute request and wait for completion. */
301 static int __hci_req_sync(struct hci_dev *hdev,
302                           void (*func)(struct hci_request *req,
303                                       unsigned long opt),
304                           unsigned long opt, __u32 timeout)
305 {
306         struct hci_request req;
307         DECLARE_WAITQUEUE(wait, current);
308         int err = 0;
309
310         BT_DBG("%s start", hdev->name);
311
312         hci_req_init(&req, hdev);
313
314         hdev->req_status = HCI_REQ_PEND;
315
316         func(&req, opt);
317
318         add_wait_queue(&hdev->req_wait_q, &wait);
319         set_current_state(TASK_INTERRUPTIBLE);
320
321         err = hci_req_run_skb(&req, hci_req_sync_complete);
322         if (err < 0) {
323                 hdev->req_status = 0;
324
325                 remove_wait_queue(&hdev->req_wait_q, &wait);
326                 set_current_state(TASK_RUNNING);
327
328                 /* ENODATA means the HCI request command queue is empty.
329                  * This can happen when a request with conditionals doesn't
330                  * trigger any commands to be sent. This is normal behavior
331                  * and should not trigger an error return.
332                  */
333                 if (err == -ENODATA)
334                         return 0;
335
336                 return err;
337         }
338
339         schedule_timeout(timeout);
340
341         remove_wait_queue(&hdev->req_wait_q, &wait);
342
343         if (signal_pending(current))
344                 return -EINTR;
345
346         switch (hdev->req_status) {
347         case HCI_REQ_DONE:
348                 err = -bt_to_errno(hdev->req_result);
349                 break;
350
351         case HCI_REQ_CANCELED:
352                 err = -hdev->req_result;
353                 break;
354
355         default:
356                 err = -ETIMEDOUT;
357                 break;
358         }
359
360         hdev->req_status = hdev->req_result = 0;
361
362         BT_DBG("%s end: err %d", hdev->name, err);
363
364         return err;
365 }
366
367 static int hci_req_sync(struct hci_dev *hdev,
368                         void (*req)(struct hci_request *req,
369                                     unsigned long opt),
370                         unsigned long opt, __u32 timeout)
371 {
372         int ret;
373
374         if (!test_bit(HCI_UP, &hdev->flags))
375                 return -ENETDOWN;
376
377         /* Serialize all requests */
378         hci_req_lock(hdev);
379         ret = __hci_req_sync(hdev, req, opt, timeout);
380         hci_req_unlock(hdev);
381
382         return ret;
383 }
384
385 static void hci_reset_req(struct hci_request *req, unsigned long opt)
386 {
387         BT_DBG("%s %ld", req->hdev->name, opt);
388
389         /* Reset device */
390         set_bit(HCI_RESET, &req->hdev->flags);
391         hci_req_add(req, HCI_OP_RESET, 0, NULL);
392 }
393
394 static void bredr_init(struct hci_request *req)
395 {
396         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
397
398         /* Read Local Supported Features */
399         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
400
401         /* Read Local Version */
402         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
403
404         /* Read BD Address */
405         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
406 }
407
408 static void amp_init1(struct hci_request *req)
409 {
410         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
411
412         /* Read Local Version */
413         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
414
415         /* Read Local Supported Commands */
416         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
418         /* Read Local AMP Info */
419         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
420
421         /* Read Data Blk size */
422         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
423
424         /* Read Flow Control Mode */
425         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
427         /* Read Location Data */
428         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
429 }
430
431 static void amp_init2(struct hci_request *req)
432 {
433         /* Read Local Supported Features. Not all AMP controllers
434          * support this so it's placed conditionally in the second
435          * stage init.
436          */
437         if (req->hdev->commands[14] & 0x20)
438                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439 }
440
441 static void hci_init1_req(struct hci_request *req, unsigned long opt)
442 {
443         struct hci_dev *hdev = req->hdev;
444
445         BT_DBG("%s %ld", hdev->name, opt);
446
447         /* Reset */
448         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
449                 hci_reset_req(req, 0);
450
451         switch (hdev->dev_type) {
452         case HCI_BREDR:
453                 bredr_init(req);
454                 break;
455
456         case HCI_AMP:
457                 amp_init1(req);
458                 break;
459
460         default:
461                 BT_ERR("Unknown device type %d", hdev->dev_type);
462                 break;
463         }
464 }
465
466 static void bredr_setup(struct hci_request *req)
467 {
468         __le16 param;
469         __u8 flt_type;
470
471         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
472         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
473
474         /* Read Class of Device */
475         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
476
477         /* Read Local Name */
478         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
479
480         /* Read Voice Setting */
481         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
482
483         /* Read Number of Supported IAC */
484         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
486         /* Read Current IAC LAP */
487         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
489         /* Clear Event Filters */
490         flt_type = HCI_FLT_CLEAR_ALL;
491         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
492
493         /* Connection accept timeout ~20 secs */
494         param = cpu_to_le16(0x7d00);
495         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
496 }
497
498 static void le_setup(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501
502         /* Read LE Buffer Size */
503         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
504
505         /* Read LE Local Supported Features */
506         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
507
508         /* Read LE Supported States */
509         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
511         /* LE-only controllers have LE implicitly enabled */
512         if (!lmp_bredr_capable(hdev))
513                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
514 }
515
516 static void hci_setup_event_mask(struct hci_request *req)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         /* The second byte is 0xff instead of 0x9f (two reserved bits
521          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522          * command otherwise.
523          */
524         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
525
526         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527          * any event mask for pre 1.2 devices.
528          */
529         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530                 return;
531
532         if (lmp_bredr_capable(hdev)) {
533                 events[4] |= 0x01; /* Flow Specification Complete */
534                 events[4] |= 0x02; /* Inquiry Result with RSSI */
535                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
536                 events[5] |= 0x08; /* Synchronous Connection Complete */
537                 events[5] |= 0x10; /* Synchronous Connection Changed */
538         } else {
539                 /* Use a different default for LE-only devices */
540                 memset(events, 0, sizeof(events));
541                 events[0] |= 0x10; /* Disconnection Complete */
542                 events[1] |= 0x08; /* Read Remote Version Information Complete */
543                 events[1] |= 0x20; /* Command Complete */
544                 events[1] |= 0x40; /* Command Status */
545                 events[1] |= 0x80; /* Hardware Error */
546                 events[2] |= 0x04; /* Number of Completed Packets */
547                 events[3] |= 0x02; /* Data Buffer Overflow */
548
549                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
550                         events[0] |= 0x80; /* Encryption Change */
551                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
552                 }
553         }
554
555         if (lmp_inq_rssi_capable(hdev))
556                 events[4] |= 0x02; /* Inquiry Result with RSSI */
557
558         if (lmp_sniffsubr_capable(hdev))
559                 events[5] |= 0x20; /* Sniff Subrating */
560
561         if (lmp_pause_enc_capable(hdev))
562                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
563
564         if (lmp_ext_inq_capable(hdev))
565                 events[5] |= 0x40; /* Extended Inquiry Result */
566
567         if (lmp_no_flush_capable(hdev))
568                 events[7] |= 0x01; /* Enhanced Flush Complete */
569
570         if (lmp_lsto_capable(hdev))
571                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
572
573         if (lmp_ssp_capable(hdev)) {
574                 events[6] |= 0x01;      /* IO Capability Request */
575                 events[6] |= 0x02;      /* IO Capability Response */
576                 events[6] |= 0x04;      /* User Confirmation Request */
577                 events[6] |= 0x08;      /* User Passkey Request */
578                 events[6] |= 0x10;      /* Remote OOB Data Request */
579                 events[6] |= 0x20;      /* Simple Pairing Complete */
580                 events[7] |= 0x04;      /* User Passkey Notification */
581                 events[7] |= 0x08;      /* Keypress Notification */
582                 events[7] |= 0x10;      /* Remote Host Supported
583                                          * Features Notification
584                                          */
585         }
586
587         if (lmp_le_capable(hdev))
588                 events[7] |= 0x20;      /* LE Meta-Event */
589
590         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
591 }
592
593 static void hci_init2_req(struct hci_request *req, unsigned long opt)
594 {
595         struct hci_dev *hdev = req->hdev;
596
597         if (hdev->dev_type == HCI_AMP)
598                 return amp_init2(req);
599
600         if (lmp_bredr_capable(hdev))
601                 bredr_setup(req);
602         else
603                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
604
605         if (lmp_le_capable(hdev))
606                 le_setup(req);
607
608         /* All Bluetooth 1.2 and later controllers should support the
609          * HCI command for reading the local supported commands.
610          *
611          * Unfortunately some controllers indicate Bluetooth 1.2 support,
612          * but do not have support for this command. If that is the case,
613          * the driver can quirk the behavior and skip reading the local
614          * supported commands.
615          */
616         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
617             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
618                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
619
620         if (lmp_ssp_capable(hdev)) {
621                 /* When SSP is available, then the host features page
622                  * should also be available as well. However some
623                  * controllers list the max_page as 0 as long as SSP
624                  * has not been enabled. To achieve proper debugging
625                  * output, force the minimum max_page to 1 at least.
626                  */
627                 hdev->max_page = 0x01;
628
629                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
630                         u8 mode = 0x01;
631
632                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
633                                     sizeof(mode), &mode);
634                 } else {
635                         struct hci_cp_write_eir cp;
636
637                         memset(hdev->eir, 0, sizeof(hdev->eir));
638                         memset(&cp, 0, sizeof(cp));
639
640                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
641                 }
642         }
643
644         if (lmp_inq_rssi_capable(hdev) ||
645             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
646                 u8 mode;
647
648                 /* If Extended Inquiry Result events are supported, then
649                  * they are clearly preferred over Inquiry Result with RSSI
650                  * events.
651                  */
652                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
653
654                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
655         }
656
657         if (lmp_inq_tx_pwr_capable(hdev))
658                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
659
660         if (lmp_ext_feat_capable(hdev)) {
661                 struct hci_cp_read_local_ext_features cp;
662
663                 cp.page = 0x01;
664                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
665                             sizeof(cp), &cp);
666         }
667
668         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
669                 u8 enable = 1;
670                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
671                             &enable);
672         }
673 }
674
675 static void hci_setup_link_policy(struct hci_request *req)
676 {
677         struct hci_dev *hdev = req->hdev;
678         struct hci_cp_write_def_link_policy cp;
679         u16 link_policy = 0;
680
681         if (lmp_rswitch_capable(hdev))
682                 link_policy |= HCI_LP_RSWITCH;
683         if (lmp_hold_capable(hdev))
684                 link_policy |= HCI_LP_HOLD;
685         if (lmp_sniff_capable(hdev))
686                 link_policy |= HCI_LP_SNIFF;
687         if (lmp_park_capable(hdev))
688                 link_policy |= HCI_LP_PARK;
689
690         cp.policy = cpu_to_le16(link_policy);
691         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
692 }
693
694 static void hci_set_le_support(struct hci_request *req)
695 {
696         struct hci_dev *hdev = req->hdev;
697         struct hci_cp_write_le_host_supported cp;
698
699         /* LE-only devices do not support explicit enablement */
700         if (!lmp_bredr_capable(hdev))
701                 return;
702
703         memset(&cp, 0, sizeof(cp));
704
705         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
706                 cp.le = 0x01;
707                 cp.simul = 0x00;
708         }
709
710         if (cp.le != lmp_host_le_capable(hdev))
711                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
712                             &cp);
713 }
714
715 static void hci_set_event_mask_page_2(struct hci_request *req)
716 {
717         struct hci_dev *hdev = req->hdev;
718         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
719
720         /* If Connectionless Slave Broadcast master role is supported
721          * enable all necessary events for it.
722          */
723         if (lmp_csb_master_capable(hdev)) {
724                 events[1] |= 0x40;      /* Triggered Clock Capture */
725                 events[1] |= 0x80;      /* Synchronization Train Complete */
726                 events[2] |= 0x10;      /* Slave Page Response Timeout */
727                 events[2] |= 0x20;      /* CSB Channel Map Change */
728         }
729
730         /* If Connectionless Slave Broadcast slave role is supported
731          * enable all necessary events for it.
732          */
733         if (lmp_csb_slave_capable(hdev)) {
734                 events[2] |= 0x01;      /* Synchronization Train Received */
735                 events[2] |= 0x02;      /* CSB Receive */
736                 events[2] |= 0x04;      /* CSB Timeout */
737                 events[2] |= 0x08;      /* Truncated Page Complete */
738         }
739
740         /* Enable Authenticated Payload Timeout Expired event if supported */
741         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
742                 events[2] |= 0x80;
743
744         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
745 }
746
747 static void hci_init3_req(struct hci_request *req, unsigned long opt)
748 {
749         struct hci_dev *hdev = req->hdev;
750         u8 p;
751
752         hci_setup_event_mask(req);
753
754         if (hdev->commands[6] & 0x20 &&
755             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
756                 struct hci_cp_read_stored_link_key cp;
757
758                 bacpy(&cp.bdaddr, BDADDR_ANY);
759                 cp.read_all = 0x01;
760                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
761         }
762
763         if (hdev->commands[5] & 0x10)
764                 hci_setup_link_policy(req);
765
766         if (hdev->commands[8] & 0x01)
767                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
768
769         /* Some older Broadcom based Bluetooth 1.2 controllers do not
770          * support the Read Page Scan Type command. Check support for
771          * this command in the bit mask of supported commands.
772          */
773         if (hdev->commands[13] & 0x01)
774                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
775
776         if (lmp_le_capable(hdev)) {
777                 u8 events[8];
778
779                 memset(events, 0, sizeof(events));
780                 events[0] = 0x0f;
781
782                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
783                         events[0] |= 0x10;      /* LE Long Term Key Request */
784
785                 /* If controller supports the Connection Parameters Request
786                  * Link Layer Procedure, enable the corresponding event.
787                  */
788                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
789                         events[0] |= 0x20;      /* LE Remote Connection
790                                                  * Parameter Request
791                                                  */
792
793                 /* If the controller supports the Data Length Extension
794                  * feature, enable the corresponding event.
795                  */
796                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
797                         events[0] |= 0x40;      /* LE Data Length Change */
798
799                 /* If the controller supports Extended Scanner Filter
800                  * Policies, enable the correspondig event.
801                  */
802                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
803                         events[1] |= 0x04;      /* LE Direct Advertising
804                                                  * Report
805                                                  */
806
807                 /* If the controller supports the LE Read Local P-256
808                  * Public Key command, enable the corresponding event.
809                  */
810                 if (hdev->commands[34] & 0x02)
811                         events[0] |= 0x80;      /* LE Read Local P-256
812                                                  * Public Key Complete
813                                                  */
814
815                 /* If the controller supports the LE Generate DHKey
816                  * command, enable the corresponding event.
817                  */
818                 if (hdev->commands[34] & 0x04)
819                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
820
821                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
822                             events);
823
824                 if (hdev->commands[25] & 0x40) {
825                         /* Read LE Advertising Channel TX Power */
826                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
827                 }
828
829                 if (hdev->commands[26] & 0x40) {
830                         /* Read LE White List Size */
831                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
832                                     0, NULL);
833                 }
834
835                 if (hdev->commands[26] & 0x80) {
836                         /* Clear LE White List */
837                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
838                 }
839
840                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
841                         /* Read LE Maximum Data Length */
842                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
843
844                         /* Read LE Suggested Default Data Length */
845                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
846                 }
847
848                 hci_set_le_support(req);
849         }
850
851         /* Read features beyond page 1 if available */
852         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
853                 struct hci_cp_read_local_ext_features cp;
854
855                 cp.page = p;
856                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
857                             sizeof(cp), &cp);
858         }
859 }
860
861 static void hci_init4_req(struct hci_request *req, unsigned long opt)
862 {
863         struct hci_dev *hdev = req->hdev;
864
865         /* Some Broadcom based Bluetooth controllers do not support the
866          * Delete Stored Link Key command. They are clearly indicating its
867          * absence in the bit mask of supported commands.
868          *
869          * Check the supported commands and only if the the command is marked
870          * as supported send it. If not supported assume that the controller
871          * does not have actual support for stored link keys which makes this
872          * command redundant anyway.
873          *
874          * Some controllers indicate that they support handling deleting
875          * stored link keys, but they don't. The quirk lets a driver
876          * just disable this command.
877          */
878         if (hdev->commands[6] & 0x80 &&
879             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
880                 struct hci_cp_delete_stored_link_key cp;
881
882                 bacpy(&cp.bdaddr, BDADDR_ANY);
883                 cp.delete_all = 0x01;
884                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
885                             sizeof(cp), &cp);
886         }
887
888         /* Set event mask page 2 if the HCI command for it is supported */
889         if (hdev->commands[22] & 0x04)
890                 hci_set_event_mask_page_2(req);
891
892         /* Read local codec list if the HCI command is supported */
893         if (hdev->commands[29] & 0x20)
894                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
895
896         /* Get MWS transport configuration if the HCI command is supported */
897         if (hdev->commands[30] & 0x08)
898                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
899
900         /* Check for Synchronization Train support */
901         if (lmp_sync_train_capable(hdev))
902                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
903
904         /* Enable Secure Connections if supported and configured */
905         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
906             bredr_sc_enabled(hdev)) {
907                 u8 support = 0x01;
908
909                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
910                             sizeof(support), &support);
911         }
912 }
913
914 static int __hci_init(struct hci_dev *hdev)
915 {
916         int err;
917
918         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
919         if (err < 0)
920                 return err;
921
922         if (hci_dev_test_flag(hdev, HCI_SETUP))
923                 hci_debugfs_create_basic(hdev);
924
925         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
926         if (err < 0)
927                 return err;
928
929         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
930          * BR/EDR/LE type controllers. AMP controllers only need the
931          * first two stages of init.
932          */
933         if (hdev->dev_type != HCI_BREDR)
934                 return 0;
935
936         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
937         if (err < 0)
938                 return err;
939
940         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
941         if (err < 0)
942                 return err;
943
944         /* This function is only called when the controller is actually in
945          * configured state. When the controller is marked as unconfigured,
946          * this initialization procedure is not run.
947          *
948          * It means that it is possible that a controller runs through its
949          * setup phase and then discovers missing settings. If that is the
950          * case, then this function will not be called. It then will only
951          * be called during the config phase.
952          *
953          * So only when in setup phase or config phase, create the debugfs
954          * entries and register the SMP channels.
955          */
956         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
957             !hci_dev_test_flag(hdev, HCI_CONFIG))
958                 return 0;
959
960         hci_debugfs_create_common(hdev);
961
962         if (lmp_bredr_capable(hdev))
963                 hci_debugfs_create_bredr(hdev);
964
965         if (lmp_le_capable(hdev))
966                 hci_debugfs_create_le(hdev);
967
968         return 0;
969 }
970
971 static void hci_init0_req(struct hci_request *req, unsigned long opt)
972 {
973         struct hci_dev *hdev = req->hdev;
974
975         BT_DBG("%s %ld", hdev->name, opt);
976
977         /* Reset */
978         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
979                 hci_reset_req(req, 0);
980
981         /* Read Local Version */
982         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
983
984         /* Read BD Address */
985         if (hdev->set_bdaddr)
986                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
987 }
988
989 static int __hci_unconf_init(struct hci_dev *hdev)
990 {
991         int err;
992
993         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
994                 return 0;
995
996         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
997         if (err < 0)
998                 return err;
999
1000         if (hci_dev_test_flag(hdev, HCI_SETUP))
1001                 hci_debugfs_create_basic(hdev);
1002
1003         return 0;
1004 }
1005
1006 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1007 {
1008         __u8 scan = opt;
1009
1010         BT_DBG("%s %x", req->hdev->name, scan);
1011
1012         /* Inquiry and Page scans */
1013         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1014 }
1015
1016 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1017 {
1018         __u8 auth = opt;
1019
1020         BT_DBG("%s %x", req->hdev->name, auth);
1021
1022         /* Authentication */
1023         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1024 }
1025
1026 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027 {
1028         __u8 encrypt = opt;
1029
1030         BT_DBG("%s %x", req->hdev->name, encrypt);
1031
1032         /* Encryption */
1033         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1034 }
1035
1036 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1037 {
1038         __le16 policy = cpu_to_le16(opt);
1039
1040         BT_DBG("%s %x", req->hdev->name, policy);
1041
1042         /* Default link policy */
1043         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1044 }
1045
1046 /* Get HCI device by index.
1047  * Device is held on return. */
1048 struct hci_dev *hci_dev_get(int index)
1049 {
1050         struct hci_dev *hdev = NULL, *d;
1051
1052         BT_DBG("%d", index);
1053
1054         if (index < 0)
1055                 return NULL;
1056
1057         read_lock(&hci_dev_list_lock);
1058         list_for_each_entry(d, &hci_dev_list, list) {
1059                 if (d->id == index) {
1060                         hdev = hci_dev_hold(d);
1061                         break;
1062                 }
1063         }
1064         read_unlock(&hci_dev_list_lock);
1065         return hdev;
1066 }
1067
1068 /* ---- Inquiry support ---- */
1069
1070 bool hci_discovery_active(struct hci_dev *hdev)
1071 {
1072         struct discovery_state *discov = &hdev->discovery;
1073
1074         switch (discov->state) {
1075         case DISCOVERY_FINDING:
1076         case DISCOVERY_RESOLVING:
1077                 return true;
1078
1079         default:
1080                 return false;
1081         }
1082 }
1083
1084 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1085 {
1086         int old_state = hdev->discovery.state;
1087
1088         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1089
1090         if (old_state == state)
1091                 return;
1092
1093         hdev->discovery.state = state;
1094
1095         switch (state) {
1096         case DISCOVERY_STOPPED:
1097                 hci_update_background_scan(hdev);
1098
1099                 if (old_state != DISCOVERY_STARTING)
1100                         mgmt_discovering(hdev, 0);
1101                 break;
1102         case DISCOVERY_STARTING:
1103                 break;
1104         case DISCOVERY_FINDING:
1105                 mgmt_discovering(hdev, 1);
1106                 break;
1107         case DISCOVERY_RESOLVING:
1108                 break;
1109         case DISCOVERY_STOPPING:
1110                 break;
1111         }
1112 }
1113
1114 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1115 {
1116         struct discovery_state *cache = &hdev->discovery;
1117         struct inquiry_entry *p, *n;
1118
1119         list_for_each_entry_safe(p, n, &cache->all, all) {
1120                 list_del(&p->all);
1121                 kfree(p);
1122         }
1123
1124         INIT_LIST_HEAD(&cache->unknown);
1125         INIT_LIST_HEAD(&cache->resolve);
1126 }
1127
1128 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1129                                                bdaddr_t *bdaddr)
1130 {
1131         struct discovery_state *cache = &hdev->discovery;
1132         struct inquiry_entry *e;
1133
1134         BT_DBG("cache %p, %pMR", cache, bdaddr);
1135
1136         list_for_each_entry(e, &cache->all, all) {
1137                 if (!bacmp(&e->data.bdaddr, bdaddr))
1138                         return e;
1139         }
1140
1141         return NULL;
1142 }
1143
1144 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1145                                                        bdaddr_t *bdaddr)
1146 {
1147         struct discovery_state *cache = &hdev->discovery;
1148         struct inquiry_entry *e;
1149
1150         BT_DBG("cache %p, %pMR", cache, bdaddr);
1151
1152         list_for_each_entry(e, &cache->unknown, list) {
1153                 if (!bacmp(&e->data.bdaddr, bdaddr))
1154                         return e;
1155         }
1156
1157         return NULL;
1158 }
1159
1160 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1161                                                        bdaddr_t *bdaddr,
1162                                                        int state)
1163 {
1164         struct discovery_state *cache = &hdev->discovery;
1165         struct inquiry_entry *e;
1166
1167         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1168
1169         list_for_each_entry(e, &cache->resolve, list) {
1170                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1171                         return e;
1172                 if (!bacmp(&e->data.bdaddr, bdaddr))
1173                         return e;
1174         }
1175
1176         return NULL;
1177 }
1178
1179 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1180                                       struct inquiry_entry *ie)
1181 {
1182         struct discovery_state *cache = &hdev->discovery;
1183         struct list_head *pos = &cache->resolve;
1184         struct inquiry_entry *p;
1185
1186         list_del(&ie->list);
1187
1188         list_for_each_entry(p, &cache->resolve, list) {
1189                 if (p->name_state != NAME_PENDING &&
1190                     abs(p->data.rssi) >= abs(ie->data.rssi))
1191                         break;
1192                 pos = &p->list;
1193         }
1194
1195         list_add(&ie->list, pos);
1196 }
1197
1198 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1199                              bool name_known)
1200 {
1201         struct discovery_state *cache = &hdev->discovery;
1202         struct inquiry_entry *ie;
1203         u32 flags = 0;
1204
1205         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1206
1207         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1208
1209         if (!data->ssp_mode)
1210                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1211
1212         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1213         if (ie) {
1214                 if (!ie->data.ssp_mode)
1215                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1216
1217                 if (ie->name_state == NAME_NEEDED &&
1218                     data->rssi != ie->data.rssi) {
1219                         ie->data.rssi = data->rssi;
1220                         hci_inquiry_cache_update_resolve(hdev, ie);
1221                 }
1222
1223                 goto update;
1224         }
1225
1226         /* Entry not in the cache. Add new one. */
1227         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1228         if (!ie) {
1229                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1230                 goto done;
1231         }
1232
1233         list_add(&ie->all, &cache->all);
1234
1235         if (name_known) {
1236                 ie->name_state = NAME_KNOWN;
1237         } else {
1238                 ie->name_state = NAME_NOT_KNOWN;
1239                 list_add(&ie->list, &cache->unknown);
1240         }
1241
1242 update:
1243         if (name_known && ie->name_state != NAME_KNOWN &&
1244             ie->name_state != NAME_PENDING) {
1245                 ie->name_state = NAME_KNOWN;
1246                 list_del(&ie->list);
1247         }
1248
1249         memcpy(&ie->data, data, sizeof(*data));
1250         ie->timestamp = jiffies;
1251         cache->timestamp = jiffies;
1252
1253         if (ie->name_state == NAME_NOT_KNOWN)
1254                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1255
1256 done:
1257         return flags;
1258 }
1259
1260 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1261 {
1262         struct discovery_state *cache = &hdev->discovery;
1263         struct inquiry_info *info = (struct inquiry_info *) buf;
1264         struct inquiry_entry *e;
1265         int copied = 0;
1266
1267         list_for_each_entry(e, &cache->all, all) {
1268                 struct inquiry_data *data = &e->data;
1269
1270                 if (copied >= num)
1271                         break;
1272
1273                 bacpy(&info->bdaddr, &data->bdaddr);
1274                 info->pscan_rep_mode    = data->pscan_rep_mode;
1275                 info->pscan_period_mode = data->pscan_period_mode;
1276                 info->pscan_mode        = data->pscan_mode;
1277                 memcpy(info->dev_class, data->dev_class, 3);
1278                 info->clock_offset      = data->clock_offset;
1279
1280                 info++;
1281                 copied++;
1282         }
1283
1284         BT_DBG("cache %p, copied %d", cache, copied);
1285         return copied;
1286 }
1287
1288 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1289 {
1290         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1291         struct hci_dev *hdev = req->hdev;
1292         struct hci_cp_inquiry cp;
1293
1294         BT_DBG("%s", hdev->name);
1295
1296         if (test_bit(HCI_INQUIRY, &hdev->flags))
1297                 return;
1298
1299         /* Start Inquiry */
1300         memcpy(&cp.lap, &ir->lap, 3);
1301         cp.length  = ir->length;
1302         cp.num_rsp = ir->num_rsp;
1303         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1304 }
1305
1306 int hci_inquiry(void __user *arg)
1307 {
1308         __u8 __user *ptr = arg;
1309         struct hci_inquiry_req ir;
1310         struct hci_dev *hdev;
1311         int err = 0, do_inquiry = 0, max_rsp;
1312         long timeo;
1313         __u8 *buf;
1314
1315         if (copy_from_user(&ir, ptr, sizeof(ir)))
1316                 return -EFAULT;
1317
1318         hdev = hci_dev_get(ir.dev_id);
1319         if (!hdev)
1320                 return -ENODEV;
1321
1322         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1323                 err = -EBUSY;
1324                 goto done;
1325         }
1326
1327         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1328                 err = -EOPNOTSUPP;
1329                 goto done;
1330         }
1331
1332         if (hdev->dev_type != HCI_BREDR) {
1333                 err = -EOPNOTSUPP;
1334                 goto done;
1335         }
1336
1337         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1338                 err = -EOPNOTSUPP;
1339                 goto done;
1340         }
1341
1342         hci_dev_lock(hdev);
1343         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1344             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1345                 hci_inquiry_cache_flush(hdev);
1346                 do_inquiry = 1;
1347         }
1348         hci_dev_unlock(hdev);
1349
1350         timeo = ir.length * msecs_to_jiffies(2000);
1351
1352         if (do_inquiry) {
1353                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1354                                    timeo);
1355                 if (err < 0)
1356                         goto done;
1357
1358                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1359                  * cleared). If it is interrupted by a signal, return -EINTR.
1360                  */
1361                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1362                                 TASK_INTERRUPTIBLE))
1363                         return -EINTR;
1364         }
1365
1366         /* for unlimited number of responses we will use buffer with
1367          * 255 entries
1368          */
1369         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1370
1371         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1372          * copy it to the user space.
1373          */
1374         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1375         if (!buf) {
1376                 err = -ENOMEM;
1377                 goto done;
1378         }
1379
1380         hci_dev_lock(hdev);
1381         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1382         hci_dev_unlock(hdev);
1383
1384         BT_DBG("num_rsp %d", ir.num_rsp);
1385
1386         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1387                 ptr += sizeof(ir);
1388                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1389                                  ir.num_rsp))
1390                         err = -EFAULT;
1391         } else
1392                 err = -EFAULT;
1393
1394         kfree(buf);
1395
1396 done:
1397         hci_dev_put(hdev);
1398         return err;
1399 }
1400
1401 static int hci_dev_do_open(struct hci_dev *hdev)
1402 {
1403         int ret = 0;
1404
1405         BT_DBG("%s %p", hdev->name, hdev);
1406
1407         hci_req_lock(hdev);
1408
1409         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1410                 ret = -ENODEV;
1411                 goto done;
1412         }
1413
1414         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1415             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1416                 /* Check for rfkill but allow the HCI setup stage to
1417                  * proceed (which in itself doesn't cause any RF activity).
1418                  */
1419                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1420                         ret = -ERFKILL;
1421                         goto done;
1422                 }
1423
1424                 /* Check for valid public address or a configured static
1425                  * random adddress, but let the HCI setup proceed to
1426                  * be able to determine if there is a public address
1427                  * or not.
1428                  *
1429                  * In case of user channel usage, it is not important
1430                  * if a public address or static random address is
1431                  * available.
1432                  *
1433                  * This check is only valid for BR/EDR controllers
1434                  * since AMP controllers do not have an address.
1435                  */
1436                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1437                     hdev->dev_type == HCI_BREDR &&
1438                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1439                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1440                         ret = -EADDRNOTAVAIL;
1441                         goto done;
1442                 }
1443         }
1444
1445         if (test_bit(HCI_UP, &hdev->flags)) {
1446                 ret = -EALREADY;
1447                 goto done;
1448         }
1449
1450         if (hdev->open(hdev)) {
1451                 ret = -EIO;
1452                 goto done;
1453         }
1454
1455         set_bit(HCI_RUNNING, &hdev->flags);
1456         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1457
1458         atomic_set(&hdev->cmd_cnt, 1);
1459         set_bit(HCI_INIT, &hdev->flags);
1460
1461         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1462                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1463
1464                 if (hdev->setup)
1465                         ret = hdev->setup(hdev);
1466
1467                 /* The transport driver can set these quirks before
1468                  * creating the HCI device or in its setup callback.
1469                  *
1470                  * In case any of them is set, the controller has to
1471                  * start up as unconfigured.
1472                  */
1473                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1474                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1475                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1476
1477                 /* For an unconfigured controller it is required to
1478                  * read at least the version information provided by
1479                  * the Read Local Version Information command.
1480                  *
1481                  * If the set_bdaddr driver callback is provided, then
1482                  * also the original Bluetooth public device address
1483                  * will be read using the Read BD Address command.
1484                  */
1485                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1486                         ret = __hci_unconf_init(hdev);
1487         }
1488
1489         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1490                 /* If public address change is configured, ensure that
1491                  * the address gets programmed. If the driver does not
1492                  * support changing the public address, fail the power
1493                  * on procedure.
1494                  */
1495                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1496                     hdev->set_bdaddr)
1497                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1498                 else
1499                         ret = -EADDRNOTAVAIL;
1500         }
1501
1502         if (!ret) {
1503                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1504                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1505                         ret = __hci_init(hdev);
1506                         if (!ret && hdev->post_init)
1507                                 ret = hdev->post_init(hdev);
1508                 }
1509         }
1510
1511         /* If the HCI Reset command is clearing all diagnostic settings,
1512          * then they need to be reprogrammed after the init procedure
1513          * completed.
1514          */
1515         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1516             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1517                 ret = hdev->set_diag(hdev, true);
1518
1519         clear_bit(HCI_INIT, &hdev->flags);
1520
1521         if (!ret) {
1522                 hci_dev_hold(hdev);
1523                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1524                 set_bit(HCI_UP, &hdev->flags);
1525                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1526                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1527                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1528                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1529                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1530                     hdev->dev_type == HCI_BREDR) {
1531                         hci_dev_lock(hdev);
1532                         mgmt_powered(hdev, 1);
1533                         hci_dev_unlock(hdev);
1534                 }
1535         } else {
1536                 /* Init failed, cleanup */
1537                 flush_work(&hdev->tx_work);
1538                 flush_work(&hdev->cmd_work);
1539                 flush_work(&hdev->rx_work);
1540
1541                 skb_queue_purge(&hdev->cmd_q);
1542                 skb_queue_purge(&hdev->rx_q);
1543
1544                 if (hdev->flush)
1545                         hdev->flush(hdev);
1546
1547                 if (hdev->sent_cmd) {
1548                         kfree_skb(hdev->sent_cmd);
1549                         hdev->sent_cmd = NULL;
1550                 }
1551
1552                 clear_bit(HCI_RUNNING, &hdev->flags);
1553                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1554
1555                 hdev->close(hdev);
1556                 hdev->flags &= BIT(HCI_RAW);
1557         }
1558
1559 done:
1560         hci_req_unlock(hdev);
1561         return ret;
1562 }
1563
1564 /* ---- HCI ioctl helpers ---- */
1565
1566 int hci_dev_open(__u16 dev)
1567 {
1568         struct hci_dev *hdev;
1569         int err;
1570
1571         hdev = hci_dev_get(dev);
1572         if (!hdev)
1573                 return -ENODEV;
1574
1575         /* Devices that are marked as unconfigured can only be powered
1576          * up as user channel. Trying to bring them up as normal devices
1577          * will result into a failure. Only user channel operation is
1578          * possible.
1579          *
1580          * When this function is called for a user channel, the flag
1581          * HCI_USER_CHANNEL will be set first before attempting to
1582          * open the device.
1583          */
1584         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1585             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1586                 err = -EOPNOTSUPP;
1587                 goto done;
1588         }
1589
1590         /* We need to ensure that no other power on/off work is pending
1591          * before proceeding to call hci_dev_do_open. This is
1592          * particularly important if the setup procedure has not yet
1593          * completed.
1594          */
1595         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1596                 cancel_delayed_work(&hdev->power_off);
1597
1598         /* After this call it is guaranteed that the setup procedure
1599          * has finished. This means that error conditions like RFKILL
1600          * or no valid public or static random address apply.
1601          */
1602         flush_workqueue(hdev->req_workqueue);
1603
1604         /* For controllers not using the management interface and that
1605          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1606          * so that pairing works for them. Once the management interface
1607          * is in use this bit will be cleared again and userspace has
1608          * to explicitly enable it.
1609          */
1610         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1611             !hci_dev_test_flag(hdev, HCI_MGMT))
1612                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1613
1614         err = hci_dev_do_open(hdev);
1615
1616 done:
1617         hci_dev_put(hdev);
1618         return err;
1619 }
1620
1621 /* This function requires the caller holds hdev->lock */
1622 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1623 {
1624         struct hci_conn_params *p;
1625
1626         list_for_each_entry(p, &hdev->le_conn_params, list) {
1627                 if (p->conn) {
1628                         hci_conn_drop(p->conn);
1629                         hci_conn_put(p->conn);
1630                         p->conn = NULL;
1631                 }
1632                 list_del_init(&p->action);
1633         }
1634
1635         BT_DBG("All LE pending actions cleared");
1636 }
1637
1638 int hci_dev_do_close(struct hci_dev *hdev)
1639 {
1640         bool auto_off;
1641
1642         BT_DBG("%s %p", hdev->name, hdev);
1643
1644         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1645             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1646             test_bit(HCI_UP, &hdev->flags)) {
1647                 /* Execute vendor specific shutdown routine */
1648                 if (hdev->shutdown)
1649                         hdev->shutdown(hdev);
1650         }
1651
1652         cancel_delayed_work(&hdev->power_off);
1653
1654         hci_req_cancel(hdev, ENODEV);
1655         hci_req_lock(hdev);
1656
1657         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1658                 cancel_delayed_work_sync(&hdev->cmd_timer);
1659                 hci_req_unlock(hdev);
1660                 return 0;
1661         }
1662
1663         /* Flush RX and TX works */
1664         flush_work(&hdev->tx_work);
1665         flush_work(&hdev->rx_work);
1666
1667         if (hdev->discov_timeout > 0) {
1668                 cancel_delayed_work(&hdev->discov_off);
1669                 hdev->discov_timeout = 0;
1670                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1671                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672         }
1673
1674         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1675                 cancel_delayed_work(&hdev->service_cache);
1676
1677         cancel_delayed_work_sync(&hdev->le_scan_disable);
1678         cancel_delayed_work_sync(&hdev->le_scan_restart);
1679
1680         if (hci_dev_test_flag(hdev, HCI_MGMT))
1681                 cancel_delayed_work_sync(&hdev->rpa_expired);
1682
1683         if (hdev->adv_instance_timeout) {
1684                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1685                 hdev->adv_instance_timeout = 0;
1686         }
1687
1688         /* Avoid potential lockdep warnings from the *_flush() calls by
1689          * ensuring the workqueue is empty up front.
1690          */
1691         drain_workqueue(hdev->workqueue);
1692
1693         hci_dev_lock(hdev);
1694
1695         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696
1697         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1698
1699         if (!auto_off && hdev->dev_type == HCI_BREDR)
1700                 mgmt_powered(hdev, 0);
1701
1702         hci_inquiry_cache_flush(hdev);
1703         hci_pend_le_actions_clear(hdev);
1704         hci_conn_hash_flush(hdev);
1705         hci_dev_unlock(hdev);
1706
1707         smp_unregister(hdev);
1708
1709         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1710
1711         if (hdev->flush)
1712                 hdev->flush(hdev);
1713
1714         /* Reset device */
1715         skb_queue_purge(&hdev->cmd_q);
1716         atomic_set(&hdev->cmd_cnt, 1);
1717         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1718             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1719                 set_bit(HCI_INIT, &hdev->flags);
1720                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1721                 clear_bit(HCI_INIT, &hdev->flags);
1722         }
1723
1724         /* flush cmd  work */
1725         flush_work(&hdev->cmd_work);
1726
1727         /* Drop queues */
1728         skb_queue_purge(&hdev->rx_q);
1729         skb_queue_purge(&hdev->cmd_q);
1730         skb_queue_purge(&hdev->raw_q);
1731
1732         /* Drop last sent command */
1733         if (hdev->sent_cmd) {
1734                 cancel_delayed_work_sync(&hdev->cmd_timer);
1735                 kfree_skb(hdev->sent_cmd);
1736                 hdev->sent_cmd = NULL;
1737         }
1738
1739         clear_bit(HCI_RUNNING, &hdev->flags);
1740         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1741
1742         /* After this point our queues are empty
1743          * and no tasks are scheduled. */
1744         hdev->close(hdev);
1745
1746         /* Clear flags */
1747         hdev->flags &= BIT(HCI_RAW);
1748         hci_dev_clear_volatile_flags(hdev);
1749
1750         /* Controller radio is available but is currently powered down */
1751         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1752
1753         memset(hdev->eir, 0, sizeof(hdev->eir));
1754         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1755         bacpy(&hdev->random_addr, BDADDR_ANY);
1756
1757         hci_req_unlock(hdev);
1758
1759         hci_dev_put(hdev);
1760         return 0;
1761 }
1762
1763 int hci_dev_close(__u16 dev)
1764 {
1765         struct hci_dev *hdev;
1766         int err;
1767
1768         hdev = hci_dev_get(dev);
1769         if (!hdev)
1770                 return -ENODEV;
1771
1772         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1773                 err = -EBUSY;
1774                 goto done;
1775         }
1776
1777         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1778                 cancel_delayed_work(&hdev->power_off);
1779
1780         err = hci_dev_do_close(hdev);
1781
1782 done:
1783         hci_dev_put(hdev);
1784         return err;
1785 }
1786
1787 static int hci_dev_do_reset(struct hci_dev *hdev)
1788 {
1789         int ret;
1790
1791         BT_DBG("%s %p", hdev->name, hdev);
1792
1793         hci_req_lock(hdev);
1794
1795         /* Drop queues */
1796         skb_queue_purge(&hdev->rx_q);
1797         skb_queue_purge(&hdev->cmd_q);
1798
1799         /* Avoid potential lockdep warnings from the *_flush() calls by
1800          * ensuring the workqueue is empty up front.
1801          */
1802         drain_workqueue(hdev->workqueue);
1803
1804         hci_dev_lock(hdev);
1805         hci_inquiry_cache_flush(hdev);
1806         hci_conn_hash_flush(hdev);
1807         hci_dev_unlock(hdev);
1808
1809         if (hdev->flush)
1810                 hdev->flush(hdev);
1811
1812         atomic_set(&hdev->cmd_cnt, 1);
1813         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1814
1815         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1816
1817         hci_req_unlock(hdev);
1818         return ret;
1819 }
1820
1821 int hci_dev_reset(__u16 dev)
1822 {
1823         struct hci_dev *hdev;
1824         int err;
1825
1826         hdev = hci_dev_get(dev);
1827         if (!hdev)
1828                 return -ENODEV;
1829
1830         if (!test_bit(HCI_UP, &hdev->flags)) {
1831                 err = -ENETDOWN;
1832                 goto done;
1833         }
1834
1835         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1836                 err = -EBUSY;
1837                 goto done;
1838         }
1839
1840         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1841                 err = -EOPNOTSUPP;
1842                 goto done;
1843         }
1844
1845         err = hci_dev_do_reset(hdev);
1846
1847 done:
1848         hci_dev_put(hdev);
1849         return err;
1850 }
1851
1852 int hci_dev_reset_stat(__u16 dev)
1853 {
1854         struct hci_dev *hdev;
1855         int ret = 0;
1856
1857         hdev = hci_dev_get(dev);
1858         if (!hdev)
1859                 return -ENODEV;
1860
1861         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1862                 ret = -EBUSY;
1863                 goto done;
1864         }
1865
1866         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1867                 ret = -EOPNOTSUPP;
1868                 goto done;
1869         }
1870
1871         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1872
1873 done:
1874         hci_dev_put(hdev);
1875         return ret;
1876 }
1877
1878 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1879 {
1880         bool conn_changed, discov_changed;
1881
1882         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1883
1884         if ((scan & SCAN_PAGE))
1885                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1886                                                           HCI_CONNECTABLE);
1887         else
1888                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1889                                                            HCI_CONNECTABLE);
1890
1891         if ((scan & SCAN_INQUIRY)) {
1892                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1893                                                             HCI_DISCOVERABLE);
1894         } else {
1895                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1896                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1897                                                              HCI_DISCOVERABLE);
1898         }
1899
1900         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1901                 return;
1902
1903         if (conn_changed || discov_changed) {
1904                 /* In case this was disabled through mgmt */
1905                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1906
1907                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1908                         mgmt_update_adv_data(hdev);
1909
1910                 mgmt_new_settings(hdev);
1911         }
1912 }
1913
1914 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1915 {
1916         struct hci_dev *hdev;
1917         struct hci_dev_req dr;
1918         int err = 0;
1919
1920         if (copy_from_user(&dr, arg, sizeof(dr)))
1921                 return -EFAULT;
1922
1923         hdev = hci_dev_get(dr.dev_id);
1924         if (!hdev)
1925                 return -ENODEV;
1926
1927         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1928                 err = -EBUSY;
1929                 goto done;
1930         }
1931
1932         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1933                 err = -EOPNOTSUPP;
1934                 goto done;
1935         }
1936
1937         if (hdev->dev_type != HCI_BREDR) {
1938                 err = -EOPNOTSUPP;
1939                 goto done;
1940         }
1941
1942         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1943                 err = -EOPNOTSUPP;
1944                 goto done;
1945         }
1946
1947         switch (cmd) {
1948         case HCISETAUTH:
1949                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1950                                    HCI_INIT_TIMEOUT);
1951                 break;
1952
1953         case HCISETENCRYPT:
1954                 if (!lmp_encrypt_capable(hdev)) {
1955                         err = -EOPNOTSUPP;
1956                         break;
1957                 }
1958
1959                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1960                         /* Auth must be enabled first */
1961                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1962                                            HCI_INIT_TIMEOUT);
1963                         if (err)
1964                                 break;
1965                 }
1966
1967                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1968                                    HCI_INIT_TIMEOUT);
1969                 break;
1970
1971         case HCISETSCAN:
1972                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1973                                    HCI_INIT_TIMEOUT);
1974
1975                 /* Ensure that the connectable and discoverable states
1976                  * get correctly modified as this was a non-mgmt change.
1977                  */
1978                 if (!err)
1979                         hci_update_scan_state(hdev, dr.dev_opt);
1980                 break;
1981
1982         case HCISETLINKPOL:
1983                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1984                                    HCI_INIT_TIMEOUT);
1985                 break;
1986
1987         case HCISETLINKMODE:
1988                 hdev->link_mode = ((__u16) dr.dev_opt) &
1989                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1990                 break;
1991
1992         case HCISETPTYPE:
1993                 hdev->pkt_type = (__u16) dr.dev_opt;
1994                 break;
1995
1996         case HCISETACLMTU:
1997                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1998                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1999                 break;
2000
2001         case HCISETSCOMTU:
2002                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2003                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2004                 break;
2005
2006         default:
2007                 err = -EINVAL;
2008                 break;
2009         }
2010
2011 done:
2012         hci_dev_put(hdev);
2013         return err;
2014 }
2015
2016 int hci_get_dev_list(void __user *arg)
2017 {
2018         struct hci_dev *hdev;
2019         struct hci_dev_list_req *dl;
2020         struct hci_dev_req *dr;
2021         int n = 0, size, err;
2022         __u16 dev_num;
2023
2024         if (get_user(dev_num, (__u16 __user *) arg))
2025                 return -EFAULT;
2026
2027         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2028                 return -EINVAL;
2029
2030         size = sizeof(*dl) + dev_num * sizeof(*dr);
2031
2032         dl = kzalloc(size, GFP_KERNEL);
2033         if (!dl)
2034                 return -ENOMEM;
2035
2036         dr = dl->dev_req;
2037
2038         read_lock(&hci_dev_list_lock);
2039         list_for_each_entry(hdev, &hci_dev_list, list) {
2040                 unsigned long flags = hdev->flags;
2041
2042                 /* When the auto-off is configured it means the transport
2043                  * is running, but in that case still indicate that the
2044                  * device is actually down.
2045                  */
2046                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2047                         flags &= ~BIT(HCI_UP);
2048
2049                 (dr + n)->dev_id  = hdev->id;
2050                 (dr + n)->dev_opt = flags;
2051
2052                 if (++n >= dev_num)
2053                         break;
2054         }
2055         read_unlock(&hci_dev_list_lock);
2056
2057         dl->dev_num = n;
2058         size = sizeof(*dl) + n * sizeof(*dr);
2059
2060         err = copy_to_user(arg, dl, size);
2061         kfree(dl);
2062
2063         return err ? -EFAULT : 0;
2064 }
2065
2066 int hci_get_dev_info(void __user *arg)
2067 {
2068         struct hci_dev *hdev;
2069         struct hci_dev_info di;
2070         unsigned long flags;
2071         int err = 0;
2072
2073         if (copy_from_user(&di, arg, sizeof(di)))
2074                 return -EFAULT;
2075
2076         hdev = hci_dev_get(di.dev_id);
2077         if (!hdev)
2078                 return -ENODEV;
2079
2080         /* When the auto-off is configured it means the transport
2081          * is running, but in that case still indicate that the
2082          * device is actually down.
2083          */
2084         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2085                 flags = hdev->flags & ~BIT(HCI_UP);
2086         else
2087                 flags = hdev->flags;
2088
2089         strcpy(di.name, hdev->name);
2090         di.bdaddr   = hdev->bdaddr;
2091         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2092         di.flags    = flags;
2093         di.pkt_type = hdev->pkt_type;
2094         if (lmp_bredr_capable(hdev)) {
2095                 di.acl_mtu  = hdev->acl_mtu;
2096                 di.acl_pkts = hdev->acl_pkts;
2097                 di.sco_mtu  = hdev->sco_mtu;
2098                 di.sco_pkts = hdev->sco_pkts;
2099         } else {
2100                 di.acl_mtu  = hdev->le_mtu;
2101                 di.acl_pkts = hdev->le_pkts;
2102                 di.sco_mtu  = 0;
2103                 di.sco_pkts = 0;
2104         }
2105         di.link_policy = hdev->link_policy;
2106         di.link_mode   = hdev->link_mode;
2107
2108         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2109         memcpy(&di.features, &hdev->features, sizeof(di.features));
2110
2111         if (copy_to_user(arg, &di, sizeof(di)))
2112                 err = -EFAULT;
2113
2114         hci_dev_put(hdev);
2115
2116         return err;
2117 }
2118
2119 /* ---- Interface to HCI drivers ---- */
2120
2121 static int hci_rfkill_set_block(void *data, bool blocked)
2122 {
2123         struct hci_dev *hdev = data;
2124
2125         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2126
2127         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2128                 return -EBUSY;
2129
2130         if (blocked) {
2131                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2132                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2133                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2134                         hci_dev_do_close(hdev);
2135         } else {
2136                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2137         }
2138
2139         return 0;
2140 }
2141
2142 static const struct rfkill_ops hci_rfkill_ops = {
2143         .set_block = hci_rfkill_set_block,
2144 };
2145
2146 static void hci_power_on(struct work_struct *work)
2147 {
2148         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2149         int err;
2150
2151         BT_DBG("%s", hdev->name);
2152
2153         err = hci_dev_do_open(hdev);
2154         if (err < 0) {
2155                 hci_dev_lock(hdev);
2156                 mgmt_set_powered_failed(hdev, err);
2157                 hci_dev_unlock(hdev);
2158                 return;
2159         }
2160
2161         /* During the HCI setup phase, a few error conditions are
2162          * ignored and they need to be checked now. If they are still
2163          * valid, it is important to turn the device back off.
2164          */
2165         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2166             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2167             (hdev->dev_type == HCI_BREDR &&
2168              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2169              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2170                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2171                 hci_dev_do_close(hdev);
2172         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2173                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2174                                    HCI_AUTO_OFF_TIMEOUT);
2175         }
2176
2177         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2178                 /* For unconfigured devices, set the HCI_RAW flag
2179                  * so that userspace can easily identify them.
2180                  */
2181                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2182                         set_bit(HCI_RAW, &hdev->flags);
2183
2184                 /* For fully configured devices, this will send
2185                  * the Index Added event. For unconfigured devices,
2186                  * it will send Unconfigued Index Added event.
2187                  *
2188                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2189                  * and no event will be send.
2190                  */
2191                 mgmt_index_added(hdev);
2192         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2193                 /* When the controller is now configured, then it
2194                  * is important to clear the HCI_RAW flag.
2195                  */
2196                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2197                         clear_bit(HCI_RAW, &hdev->flags);
2198
2199                 /* Powering on the controller with HCI_CONFIG set only
2200                  * happens with the transition from unconfigured to
2201                  * configured. This will send the Index Added event.
2202                  */
2203                 mgmt_index_added(hdev);
2204         }
2205 }
2206
2207 static void hci_power_off(struct work_struct *work)
2208 {
2209         struct hci_dev *hdev = container_of(work, struct hci_dev,
2210                                             power_off.work);
2211
2212         BT_DBG("%s", hdev->name);
2213
2214         hci_dev_do_close(hdev);
2215 }
2216
2217 static void hci_error_reset(struct work_struct *work)
2218 {
2219         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2220
2221         BT_DBG("%s", hdev->name);
2222
2223         if (hdev->hw_error)
2224                 hdev->hw_error(hdev, hdev->hw_error_code);
2225         else
2226                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2227                        hdev->hw_error_code);
2228
2229         if (hci_dev_do_close(hdev))
2230                 return;
2231
2232         hci_dev_do_open(hdev);
2233 }
2234
2235 static void hci_discov_off(struct work_struct *work)
2236 {
2237         struct hci_dev *hdev;
2238
2239         hdev = container_of(work, struct hci_dev, discov_off.work);
2240
2241         BT_DBG("%s", hdev->name);
2242
2243         mgmt_discoverable_timeout(hdev);
2244 }
2245
2246 static void hci_adv_timeout_expire(struct work_struct *work)
2247 {
2248         struct hci_dev *hdev;
2249
2250         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2251
2252         BT_DBG("%s", hdev->name);
2253
2254         mgmt_adv_timeout_expired(hdev);
2255 }
2256
2257 void hci_uuids_clear(struct hci_dev *hdev)
2258 {
2259         struct bt_uuid *uuid, *tmp;
2260
2261         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2262                 list_del(&uuid->list);
2263                 kfree(uuid);
2264         }
2265 }
2266
2267 void hci_link_keys_clear(struct hci_dev *hdev)
2268 {
2269         struct link_key *key;
2270
2271         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2272                 list_del_rcu(&key->list);
2273                 kfree_rcu(key, rcu);
2274         }
2275 }
2276
2277 void hci_smp_ltks_clear(struct hci_dev *hdev)
2278 {
2279         struct smp_ltk *k;
2280
2281         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2282                 list_del_rcu(&k->list);
2283                 kfree_rcu(k, rcu);
2284         }
2285 }
2286
2287 void hci_smp_irks_clear(struct hci_dev *hdev)
2288 {
2289         struct smp_irk *k;
2290
2291         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2292                 list_del_rcu(&k->list);
2293                 kfree_rcu(k, rcu);
2294         }
2295 }
2296
2297 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2298 {
2299         struct link_key *k;
2300
2301         rcu_read_lock();
2302         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2303                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2304                         rcu_read_unlock();
2305                         return k;
2306                 }
2307         }
2308         rcu_read_unlock();
2309
2310         return NULL;
2311 }
2312
2313 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2314                                u8 key_type, u8 old_key_type)
2315 {
2316         /* Legacy key */
2317         if (key_type < 0x03)
2318                 return true;
2319
2320         /* Debug keys are insecure so don't store them persistently */
2321         if (key_type == HCI_LK_DEBUG_COMBINATION)
2322                 return false;
2323
2324         /* Changed combination key and there's no previous one */
2325         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2326                 return false;
2327
2328         /* Security mode 3 case */
2329         if (!conn)
2330                 return true;
2331
2332         /* BR/EDR key derived using SC from an LE link */
2333         if (conn->type == LE_LINK)
2334                 return true;
2335
2336         /* Neither local nor remote side had no-bonding as requirement */
2337         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2338                 return true;
2339
2340         /* Local side had dedicated bonding as requirement */
2341         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2342                 return true;
2343
2344         /* Remote side had dedicated bonding as requirement */
2345         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2346                 return true;
2347
2348         /* If none of the above criteria match, then don't store the key
2349          * persistently */
2350         return false;
2351 }
2352
2353 static u8 ltk_role(u8 type)
2354 {
2355         if (type == SMP_LTK)
2356                 return HCI_ROLE_MASTER;
2357
2358         return HCI_ROLE_SLAVE;
2359 }
2360
2361 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362                              u8 addr_type, u8 role)
2363 {
2364         struct smp_ltk *k;
2365
2366         rcu_read_lock();
2367         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2368                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2369                         continue;
2370
2371                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2372                         rcu_read_unlock();
2373                         return k;
2374                 }
2375         }
2376         rcu_read_unlock();
2377
2378         return NULL;
2379 }
2380
2381 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2382 {
2383         struct smp_irk *irk;
2384
2385         rcu_read_lock();
2386         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2387                 if (!bacmp(&irk->rpa, rpa)) {
2388                         rcu_read_unlock();
2389                         return irk;
2390                 }
2391         }
2392
2393         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2394                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2395                         bacpy(&irk->rpa, rpa);
2396                         rcu_read_unlock();
2397                         return irk;
2398                 }
2399         }
2400         rcu_read_unlock();
2401
2402         return NULL;
2403 }
2404
2405 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2406                                      u8 addr_type)
2407 {
2408         struct smp_irk *irk;
2409
2410         /* Identity Address must be public or static random */
2411         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2412                 return NULL;
2413
2414         rcu_read_lock();
2415         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2416                 if (addr_type == irk->addr_type &&
2417                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2418                         rcu_read_unlock();
2419                         return irk;
2420                 }
2421         }
2422         rcu_read_unlock();
2423
2424         return NULL;
2425 }
2426
2427 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2428                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2429                                   u8 pin_len, bool *persistent)
2430 {
2431         struct link_key *key, *old_key;
2432         u8 old_key_type;
2433
2434         old_key = hci_find_link_key(hdev, bdaddr);
2435         if (old_key) {
2436                 old_key_type = old_key->type;
2437                 key = old_key;
2438         } else {
2439                 old_key_type = conn ? conn->key_type : 0xff;
2440                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2441                 if (!key)
2442                         return NULL;
2443                 list_add_rcu(&key->list, &hdev->link_keys);
2444         }
2445
2446         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2447
2448         /* Some buggy controller combinations generate a changed
2449          * combination key for legacy pairing even when there's no
2450          * previous key */
2451         if (type == HCI_LK_CHANGED_COMBINATION &&
2452             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2453                 type = HCI_LK_COMBINATION;
2454                 if (conn)
2455                         conn->key_type = type;
2456         }
2457
2458         bacpy(&key->bdaddr, bdaddr);
2459         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2460         key->pin_len = pin_len;
2461
2462         if (type == HCI_LK_CHANGED_COMBINATION)
2463                 key->type = old_key_type;
2464         else
2465                 key->type = type;
2466
2467         if (persistent)
2468                 *persistent = hci_persistent_key(hdev, conn, type,
2469                                                  old_key_type);
2470
2471         return key;
2472 }
2473
2474 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2475                             u8 addr_type, u8 type, u8 authenticated,
2476                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2477 {
2478         struct smp_ltk *key, *old_key;
2479         u8 role = ltk_role(type);
2480
2481         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2482         if (old_key)
2483                 key = old_key;
2484         else {
2485                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2486                 if (!key)
2487                         return NULL;
2488                 list_add_rcu(&key->list, &hdev->long_term_keys);
2489         }
2490
2491         bacpy(&key->bdaddr, bdaddr);
2492         key->bdaddr_type = addr_type;
2493         memcpy(key->val, tk, sizeof(key->val));
2494         key->authenticated = authenticated;
2495         key->ediv = ediv;
2496         key->rand = rand;
2497         key->enc_size = enc_size;
2498         key->type = type;
2499
2500         return key;
2501 }
2502
2503 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2505 {
2506         struct smp_irk *irk;
2507
2508         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2509         if (!irk) {
2510                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2511                 if (!irk)
2512                         return NULL;
2513
2514                 bacpy(&irk->bdaddr, bdaddr);
2515                 irk->addr_type = addr_type;
2516
2517                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2518         }
2519
2520         memcpy(irk->val, val, 16);
2521         bacpy(&irk->rpa, rpa);
2522
2523         return irk;
2524 }
2525
2526 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2527 {
2528         struct link_key *key;
2529
2530         key = hci_find_link_key(hdev, bdaddr);
2531         if (!key)
2532                 return -ENOENT;
2533
2534         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2535
2536         list_del_rcu(&key->list);
2537         kfree_rcu(key, rcu);
2538
2539         return 0;
2540 }
2541
2542 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2543 {
2544         struct smp_ltk *k;
2545         int removed = 0;
2546
2547         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2548                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2549                         continue;
2550
2551                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2552
2553                 list_del_rcu(&k->list);
2554                 kfree_rcu(k, rcu);
2555                 removed++;
2556         }
2557
2558         return removed ? 0 : -ENOENT;
2559 }
2560
2561 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2562 {
2563         struct smp_irk *k;
2564
2565         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2566                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2567                         continue;
2568
2569                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2570
2571                 list_del_rcu(&k->list);
2572                 kfree_rcu(k, rcu);
2573         }
2574 }
2575
2576 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2577 {
2578         struct smp_ltk *k;
2579         struct smp_irk *irk;
2580         u8 addr_type;
2581
2582         if (type == BDADDR_BREDR) {
2583                 if (hci_find_link_key(hdev, bdaddr))
2584                         return true;
2585                 return false;
2586         }
2587
2588         /* Convert to HCI addr type which struct smp_ltk uses */
2589         if (type == BDADDR_LE_PUBLIC)
2590                 addr_type = ADDR_LE_DEV_PUBLIC;
2591         else
2592                 addr_type = ADDR_LE_DEV_RANDOM;
2593
2594         irk = hci_get_irk(hdev, bdaddr, addr_type);
2595         if (irk) {
2596                 bdaddr = &irk->bdaddr;
2597                 addr_type = irk->addr_type;
2598         }
2599
2600         rcu_read_lock();
2601         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2602                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2603                         rcu_read_unlock();
2604                         return true;
2605                 }
2606         }
2607         rcu_read_unlock();
2608
2609         return false;
2610 }
2611
2612 /* HCI command timer function */
2613 static void hci_cmd_timeout(struct work_struct *work)
2614 {
2615         struct hci_dev *hdev = container_of(work, struct hci_dev,
2616                                             cmd_timer.work);
2617
2618         if (hdev->sent_cmd) {
2619                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2620                 u16 opcode = __le16_to_cpu(sent->opcode);
2621
2622                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2623         } else {
2624                 BT_ERR("%s command tx timeout", hdev->name);
2625         }
2626
2627         atomic_set(&hdev->cmd_cnt, 1);
2628         queue_work(hdev->workqueue, &hdev->cmd_work);
2629 }
2630
2631 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2632                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2633 {
2634         struct oob_data *data;
2635
2636         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2637                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2638                         continue;
2639                 if (data->bdaddr_type != bdaddr_type)
2640                         continue;
2641                 return data;
2642         }
2643
2644         return NULL;
2645 }
2646
2647 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648                                u8 bdaddr_type)
2649 {
2650         struct oob_data *data;
2651
2652         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2653         if (!data)
2654                 return -ENOENT;
2655
2656         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2657
2658         list_del(&data->list);
2659         kfree(data);
2660
2661         return 0;
2662 }
2663
2664 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2665 {
2666         struct oob_data *data, *n;
2667
2668         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2669                 list_del(&data->list);
2670                 kfree(data);
2671         }
2672 }
2673
2674 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2675                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2676                             u8 *hash256, u8 *rand256)
2677 {
2678         struct oob_data *data;
2679
2680         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2681         if (!data) {
2682                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2683                 if (!data)
2684                         return -ENOMEM;
2685
2686                 bacpy(&data->bdaddr, bdaddr);
2687                 data->bdaddr_type = bdaddr_type;
2688                 list_add(&data->list, &hdev->remote_oob_data);
2689         }
2690
2691         if (hash192 && rand192) {
2692                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2693                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2694                 if (hash256 && rand256)
2695                         data->present = 0x03;
2696         } else {
2697                 memset(data->hash192, 0, sizeof(data->hash192));
2698                 memset(data->rand192, 0, sizeof(data->rand192));
2699                 if (hash256 && rand256)
2700                         data->present = 0x02;
2701                 else
2702                         data->present = 0x00;
2703         }
2704
2705         if (hash256 && rand256) {
2706                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2707                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2708         } else {
2709                 memset(data->hash256, 0, sizeof(data->hash256));
2710                 memset(data->rand256, 0, sizeof(data->rand256));
2711                 if (hash192 && rand192)
2712                         data->present = 0x01;
2713         }
2714
2715         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2716
2717         return 0;
2718 }
2719
2720 /* This function requires the caller holds hdev->lock */
2721 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2722 {
2723         struct adv_info *adv_instance;
2724
2725         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2726                 if (adv_instance->instance == instance)
2727                         return adv_instance;
2728         }
2729
2730         return NULL;
2731 }
2732
2733 /* This function requires the caller holds hdev->lock */
2734 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2735         struct adv_info *cur_instance;
2736
2737         cur_instance = hci_find_adv_instance(hdev, instance);
2738         if (!cur_instance)
2739                 return NULL;
2740
2741         if (cur_instance == list_last_entry(&hdev->adv_instances,
2742                                             struct adv_info, list))
2743                 return list_first_entry(&hdev->adv_instances,
2744                                                  struct adv_info, list);
2745         else
2746                 return list_next_entry(cur_instance, list);
2747 }
2748
2749 /* This function requires the caller holds hdev->lock */
2750 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2751 {
2752         struct adv_info *adv_instance;
2753
2754         adv_instance = hci_find_adv_instance(hdev, instance);
2755         if (!adv_instance)
2756                 return -ENOENT;
2757
2758         BT_DBG("%s removing %dMR", hdev->name, instance);
2759
2760         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2761                 cancel_delayed_work(&hdev->adv_instance_expire);
2762                 hdev->adv_instance_timeout = 0;
2763         }
2764
2765         list_del(&adv_instance->list);
2766         kfree(adv_instance);
2767
2768         hdev->adv_instance_cnt--;
2769
2770         return 0;
2771 }
2772
2773 /* This function requires the caller holds hdev->lock */
2774 void hci_adv_instances_clear(struct hci_dev *hdev)
2775 {
2776         struct adv_info *adv_instance, *n;
2777
2778         if (hdev->adv_instance_timeout) {
2779                 cancel_delayed_work(&hdev->adv_instance_expire);
2780                 hdev->adv_instance_timeout = 0;
2781         }
2782
2783         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2784                 list_del(&adv_instance->list);
2785                 kfree(adv_instance);
2786         }
2787
2788         hdev->adv_instance_cnt = 0;
2789 }
2790
2791 /* This function requires the caller holds hdev->lock */
2792 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2793                          u16 adv_data_len, u8 *adv_data,
2794                          u16 scan_rsp_len, u8 *scan_rsp_data,
2795                          u16 timeout, u16 duration)
2796 {
2797         struct adv_info *adv_instance;
2798
2799         adv_instance = hci_find_adv_instance(hdev, instance);
2800         if (adv_instance) {
2801                 memset(adv_instance->adv_data, 0,
2802                        sizeof(adv_instance->adv_data));
2803                 memset(adv_instance->scan_rsp_data, 0,
2804                        sizeof(adv_instance->scan_rsp_data));
2805         } else {
2806                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2807                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2808                         return -EOVERFLOW;
2809
2810                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2811                 if (!adv_instance)
2812                         return -ENOMEM;
2813
2814                 adv_instance->pending = true;
2815                 adv_instance->instance = instance;
2816                 list_add(&adv_instance->list, &hdev->adv_instances);
2817                 hdev->adv_instance_cnt++;
2818         }
2819
2820         adv_instance->flags = flags;
2821         adv_instance->adv_data_len = adv_data_len;
2822         adv_instance->scan_rsp_len = scan_rsp_len;
2823
2824         if (adv_data_len)
2825                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2826
2827         if (scan_rsp_len)
2828                 memcpy(adv_instance->scan_rsp_data,
2829                        scan_rsp_data, scan_rsp_len);
2830
2831         adv_instance->timeout = timeout;
2832         adv_instance->remaining_time = timeout;
2833
2834         if (duration == 0)
2835                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2836         else
2837                 adv_instance->duration = duration;
2838
2839         BT_DBG("%s for %dMR", hdev->name, instance);
2840
2841         return 0;
2842 }
2843
2844 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2845                                          bdaddr_t *bdaddr, u8 type)
2846 {
2847         struct bdaddr_list *b;
2848
2849         list_for_each_entry(b, bdaddr_list, list) {
2850                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2851                         return b;
2852         }
2853
2854         return NULL;
2855 }
2856
2857 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2858 {
2859         struct list_head *p, *n;
2860
2861         list_for_each_safe(p, n, bdaddr_list) {
2862                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2863
2864                 list_del(p);
2865                 kfree(b);
2866         }
2867 }
2868
2869 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2870 {
2871         struct bdaddr_list *entry;
2872
2873         if (!bacmp(bdaddr, BDADDR_ANY))
2874                 return -EBADF;
2875
2876         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2877                 return -EEXIST;
2878
2879         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2880         if (!entry)
2881                 return -ENOMEM;
2882
2883         bacpy(&entry->bdaddr, bdaddr);
2884         entry->bdaddr_type = type;
2885
2886         list_add(&entry->list, list);
2887
2888         return 0;
2889 }
2890
2891 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2892 {
2893         struct bdaddr_list *entry;
2894
2895         if (!bacmp(bdaddr, BDADDR_ANY)) {
2896                 hci_bdaddr_list_clear(list);
2897                 return 0;
2898         }
2899
2900         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2901         if (!entry)
2902                 return -ENOENT;
2903
2904         list_del(&entry->list);
2905         kfree(entry);
2906
2907         return 0;
2908 }
2909
2910 /* This function requires the caller holds hdev->lock */
2911 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2912                                                bdaddr_t *addr, u8 addr_type)
2913 {
2914         struct hci_conn_params *params;
2915
2916         list_for_each_entry(params, &hdev->le_conn_params, list) {
2917                 if (bacmp(&params->addr, addr) == 0 &&
2918                     params->addr_type == addr_type) {
2919                         return params;
2920                 }
2921         }
2922
2923         return NULL;
2924 }
2925
2926 /* This function requires the caller holds hdev->lock */
2927 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2928                                                   bdaddr_t *addr, u8 addr_type)
2929 {
2930         struct hci_conn_params *param;
2931
2932         list_for_each_entry(param, list, action) {
2933                 if (bacmp(&param->addr, addr) == 0 &&
2934                     param->addr_type == addr_type)
2935                         return param;
2936         }
2937
2938         return NULL;
2939 }
2940
2941 /* This function requires the caller holds hdev->lock */
2942 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2943                                             bdaddr_t *addr, u8 addr_type)
2944 {
2945         struct hci_conn_params *params;
2946
2947         params = hci_conn_params_lookup(hdev, addr, addr_type);
2948         if (params)
2949                 return params;
2950
2951         params = kzalloc(sizeof(*params), GFP_KERNEL);
2952         if (!params) {
2953                 BT_ERR("Out of memory");
2954                 return NULL;
2955         }
2956
2957         bacpy(&params->addr, addr);
2958         params->addr_type = addr_type;
2959
2960         list_add(&params->list, &hdev->le_conn_params);
2961         INIT_LIST_HEAD(&params->action);
2962
2963         params->conn_min_interval = hdev->le_conn_min_interval;
2964         params->conn_max_interval = hdev->le_conn_max_interval;
2965         params->conn_latency = hdev->le_conn_latency;
2966         params->supervision_timeout = hdev->le_supv_timeout;
2967         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2968
2969         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2970
2971         return params;
2972 }
2973
2974 static void hci_conn_params_free(struct hci_conn_params *params)
2975 {
2976         if (params->conn) {
2977                 hci_conn_drop(params->conn);
2978                 hci_conn_put(params->conn);
2979         }
2980
2981         list_del(&params->action);
2982         list_del(&params->list);
2983         kfree(params);
2984 }
2985
2986 /* This function requires the caller holds hdev->lock */
2987 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2988 {
2989         struct hci_conn_params *params;
2990
2991         params = hci_conn_params_lookup(hdev, addr, addr_type);
2992         if (!params)
2993                 return;
2994
2995         hci_conn_params_free(params);
2996
2997         hci_update_background_scan(hdev);
2998
2999         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3000 }
3001
3002 /* This function requires the caller holds hdev->lock */
3003 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3004 {
3005         struct hci_conn_params *params, *tmp;
3006
3007         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3008                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3009                         continue;
3010
3011                 /* If trying to estabilish one time connection to disabled
3012                  * device, leave the params, but mark them as just once.
3013                  */
3014                 if (params->explicit_connect) {
3015                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016                         continue;
3017                 }
3018
3019                 list_del(&params->list);
3020                 kfree(params);
3021         }
3022
3023         BT_DBG("All LE disabled connection parameters were removed");
3024 }
3025
3026 /* This function requires the caller holds hdev->lock */
3027 void hci_conn_params_clear_all(struct hci_dev *hdev)
3028 {
3029         struct hci_conn_params *params, *tmp;
3030
3031         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3032                 hci_conn_params_free(params);
3033
3034         hci_update_background_scan(hdev);
3035
3036         BT_DBG("All LE connection parameters were removed");
3037 }
3038
3039 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3040 {
3041         if (status) {
3042                 BT_ERR("Failed to start inquiry: status %d", status);
3043
3044                 hci_dev_lock(hdev);
3045                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3046                 hci_dev_unlock(hdev);
3047                 return;
3048         }
3049 }
3050
3051 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3052                                           u16 opcode)
3053 {
3054         /* General inquiry access code (GIAC) */
3055         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3056         struct hci_cp_inquiry cp;
3057         int err;
3058
3059         if (status) {
3060                 BT_ERR("Failed to disable LE scanning: status %d", status);
3061                 return;
3062         }
3063
3064         hdev->discovery.scan_start = 0;
3065
3066         switch (hdev->discovery.type) {
3067         case DISCOV_TYPE_LE:
3068                 hci_dev_lock(hdev);
3069                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3070                 hci_dev_unlock(hdev);
3071                 break;
3072
3073         case DISCOV_TYPE_INTERLEAVED:
3074                 hci_dev_lock(hdev);
3075
3076                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3077                              &hdev->quirks)) {
3078                         /* If we were running LE only scan, change discovery
3079                          * state. If we were running both LE and BR/EDR inquiry
3080                          * simultaneously, and BR/EDR inquiry is already
3081                          * finished, stop discovery, otherwise BR/EDR inquiry
3082                          * will stop discovery when finished. If we will resolve
3083                          * remote device name, do not change discovery state.
3084                          */
3085                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3086                             hdev->discovery.state != DISCOVERY_RESOLVING)
3087                                 hci_discovery_set_state(hdev,
3088                                                         DISCOVERY_STOPPED);
3089                 } else {
3090                         struct hci_request req;
3091
3092                         hci_inquiry_cache_flush(hdev);
3093
3094                         hci_req_init(&req, hdev);
3095
3096                         memset(&cp, 0, sizeof(cp));
3097                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3098                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3099                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3100
3101                         err = hci_req_run(&req, inquiry_complete);
3102                         if (err) {
3103                                 BT_ERR("Inquiry request failed: err %d", err);
3104                                 hci_discovery_set_state(hdev,
3105                                                         DISCOVERY_STOPPED);
3106                         }
3107                 }
3108
3109                 hci_dev_unlock(hdev);
3110                 break;
3111         }
3112 }
3113
3114 static void le_scan_disable_work(struct work_struct *work)
3115 {
3116         struct hci_dev *hdev = container_of(work, struct hci_dev,
3117                                             le_scan_disable.work);
3118         struct hci_request req;
3119         int err;
3120
3121         BT_DBG("%s", hdev->name);
3122
3123         cancel_delayed_work_sync(&hdev->le_scan_restart);
3124
3125         hci_req_init(&req, hdev);
3126
3127         hci_req_add_le_scan_disable(&req);
3128
3129         err = hci_req_run(&req, le_scan_disable_work_complete);
3130         if (err)
3131                 BT_ERR("Disable LE scanning request failed: err %d", err);
3132 }
3133
3134 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3135                                           u16 opcode)
3136 {
3137         unsigned long timeout, duration, scan_start, now;
3138
3139         BT_DBG("%s", hdev->name);
3140
3141         if (status) {
3142                 BT_ERR("Failed to restart LE scan: status %d", status);
3143                 return;
3144         }
3145
3146         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3147             !hdev->discovery.scan_start)
3148                 return;
3149
3150         /* When the scan was started, hdev->le_scan_disable has been queued
3151          * after duration from scan_start. During scan restart this job
3152          * has been canceled, and we need to queue it again after proper
3153          * timeout, to make sure that scan does not run indefinitely.
3154          */
3155         duration = hdev->discovery.scan_duration;
3156         scan_start = hdev->discovery.scan_start;
3157         now = jiffies;
3158         if (now - scan_start <= duration) {
3159                 int elapsed;
3160
3161                 if (now >= scan_start)
3162                         elapsed = now - scan_start;
3163                 else
3164                         elapsed = ULONG_MAX - scan_start + now;
3165
3166                 timeout = duration - elapsed;
3167         } else {
3168                 timeout = 0;
3169         }
3170         queue_delayed_work(hdev->workqueue,
3171                            &hdev->le_scan_disable, timeout);
3172 }
3173
3174 static void le_scan_restart_work(struct work_struct *work)
3175 {
3176         struct hci_dev *hdev = container_of(work, struct hci_dev,
3177                                             le_scan_restart.work);
3178         struct hci_request req;
3179         struct hci_cp_le_set_scan_enable cp;
3180         int err;
3181
3182         BT_DBG("%s", hdev->name);
3183
3184         /* If controller is not scanning we are done. */
3185         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3186                 return;
3187
3188         hci_req_init(&req, hdev);
3189
3190         hci_req_add_le_scan_disable(&req);
3191
3192         memset(&cp, 0, sizeof(cp));
3193         cp.enable = LE_SCAN_ENABLE;
3194         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3195         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3196
3197         err = hci_req_run(&req, le_scan_restart_work_complete);
3198         if (err)
3199                 BT_ERR("Restart LE scan request failed: err %d", err);
3200 }
3201
3202 /* Copy the Identity Address of the controller.
3203  *
3204  * If the controller has a public BD_ADDR, then by default use that one.
3205  * If this is a LE only controller without a public address, default to
3206  * the static random address.
3207  *
3208  * For debugging purposes it is possible to force controllers with a
3209  * public address to use the static random address instead.
3210  *
3211  * In case BR/EDR has been disabled on a dual-mode controller and
3212  * userspace has configured a static address, then that address
3213  * becomes the identity address instead of the public BR/EDR address.
3214  */
3215 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3216                                u8 *bdaddr_type)
3217 {
3218         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3219             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3220             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3221              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3222                 bacpy(bdaddr, &hdev->static_addr);
3223                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3224         } else {
3225                 bacpy(bdaddr, &hdev->bdaddr);
3226                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3227         }
3228 }
3229
3230 /* Alloc HCI device */
3231 struct hci_dev *hci_alloc_dev(void)
3232 {
3233         struct hci_dev *hdev;
3234
3235         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3236         if (!hdev)
3237                 return NULL;
3238
3239         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3240         hdev->esco_type = (ESCO_HV1);
3241         hdev->link_mode = (HCI_LM_ACCEPT);
3242         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3243         hdev->io_capability = 0x03;     /* No Input No Output */
3244         hdev->manufacturer = 0xffff;    /* Default to internal use */
3245         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3246         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3247         hdev->adv_instance_cnt = 0;
3248         hdev->cur_adv_instance = 0x00;
3249         hdev->adv_instance_timeout = 0;
3250
3251         hdev->sniff_max_interval = 800;
3252         hdev->sniff_min_interval = 80;
3253
3254         hdev->le_adv_channel_map = 0x07;
3255         hdev->le_adv_min_interval = 0x0800;
3256         hdev->le_adv_max_interval = 0x0800;
3257         hdev->le_scan_interval = 0x0060;
3258         hdev->le_scan_window = 0x0030;
3259         hdev->le_conn_min_interval = 0x0028;
3260         hdev->le_conn_max_interval = 0x0038;
3261         hdev->le_conn_latency = 0x0000;
3262         hdev->le_supv_timeout = 0x002a;
3263         hdev->le_def_tx_len = 0x001b;
3264         hdev->le_def_tx_time = 0x0148;
3265         hdev->le_max_tx_len = 0x001b;
3266         hdev->le_max_tx_time = 0x0148;
3267         hdev->le_max_rx_len = 0x001b;
3268         hdev->le_max_rx_time = 0x0148;
3269
3270         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3271         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3272         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3273         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3274
3275         mutex_init(&hdev->lock);
3276         mutex_init(&hdev->req_lock);
3277
3278         INIT_LIST_HEAD(&hdev->mgmt_pending);
3279         INIT_LIST_HEAD(&hdev->blacklist);
3280         INIT_LIST_HEAD(&hdev->whitelist);
3281         INIT_LIST_HEAD(&hdev->uuids);
3282         INIT_LIST_HEAD(&hdev->link_keys);
3283         INIT_LIST_HEAD(&hdev->long_term_keys);
3284         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3285         INIT_LIST_HEAD(&hdev->remote_oob_data);
3286         INIT_LIST_HEAD(&hdev->le_white_list);
3287         INIT_LIST_HEAD(&hdev->le_conn_params);
3288         INIT_LIST_HEAD(&hdev->pend_le_conns);
3289         INIT_LIST_HEAD(&hdev->pend_le_reports);
3290         INIT_LIST_HEAD(&hdev->conn_hash.list);
3291         INIT_LIST_HEAD(&hdev->adv_instances);
3292
3293         INIT_WORK(&hdev->rx_work, hci_rx_work);
3294         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3295         INIT_WORK(&hdev->tx_work, hci_tx_work);
3296         INIT_WORK(&hdev->power_on, hci_power_on);
3297         INIT_WORK(&hdev->error_reset, hci_error_reset);
3298
3299         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3300         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3301         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3302         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3303         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3304
3305         skb_queue_head_init(&hdev->rx_q);
3306         skb_queue_head_init(&hdev->cmd_q);
3307         skb_queue_head_init(&hdev->raw_q);
3308
3309         init_waitqueue_head(&hdev->req_wait_q);
3310
3311         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3312
3313         hci_init_sysfs(hdev);
3314         discovery_init(hdev);
3315
3316         return hdev;
3317 }
3318 EXPORT_SYMBOL(hci_alloc_dev);
3319
3320 /* Free HCI device */
3321 void hci_free_dev(struct hci_dev *hdev)
3322 {
3323         /* will free via device release */
3324         put_device(&hdev->dev);
3325 }
3326 EXPORT_SYMBOL(hci_free_dev);
3327
3328 /* Register HCI device */
3329 int hci_register_dev(struct hci_dev *hdev)
3330 {
3331         int id, error;
3332
3333         if (!hdev->open || !hdev->close || !hdev->send)
3334                 return -EINVAL;
3335
3336         /* Do not allow HCI_AMP devices to register at index 0,
3337          * so the index can be used as the AMP controller ID.
3338          */
3339         switch (hdev->dev_type) {
3340         case HCI_BREDR:
3341                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3342                 break;
3343         case HCI_AMP:
3344                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3345                 break;
3346         default:
3347                 return -EINVAL;
3348         }
3349
3350         if (id < 0)
3351                 return id;
3352
3353         sprintf(hdev->name, "hci%d", id);
3354         hdev->id = id;
3355
3356         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3357
3358         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3359                                           WQ_MEM_RECLAIM, 1, hdev->name);
3360         if (!hdev->workqueue) {
3361                 error = -ENOMEM;
3362                 goto err;
3363         }
3364
3365         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3366                                               WQ_MEM_RECLAIM, 1, hdev->name);
3367         if (!hdev->req_workqueue) {
3368                 destroy_workqueue(hdev->workqueue);
3369                 error = -ENOMEM;
3370                 goto err;
3371         }
3372
3373         if (!IS_ERR_OR_NULL(bt_debugfs))
3374                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3375
3376         dev_set_name(&hdev->dev, "%s", hdev->name);
3377
3378         error = device_add(&hdev->dev);
3379         if (error < 0)
3380                 goto err_wqueue;
3381
3382         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3383                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3384                                     hdev);
3385         if (hdev->rfkill) {
3386                 if (rfkill_register(hdev->rfkill) < 0) {
3387                         rfkill_destroy(hdev->rfkill);
3388                         hdev->rfkill = NULL;
3389                 }
3390         }
3391
3392         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3393                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3394
3395         hci_dev_set_flag(hdev, HCI_SETUP);
3396         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3397
3398         if (hdev->dev_type == HCI_BREDR) {
3399                 /* Assume BR/EDR support until proven otherwise (such as
3400                  * through reading supported features during init.
3401                  */
3402                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3403         }
3404
3405         write_lock(&hci_dev_list_lock);
3406         list_add(&hdev->list, &hci_dev_list);
3407         write_unlock(&hci_dev_list_lock);
3408
3409         /* Devices that are marked for raw-only usage are unconfigured
3410          * and should not be included in normal operation.
3411          */
3412         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3413                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3414
3415         hci_sock_dev_event(hdev, HCI_DEV_REG);
3416         hci_dev_hold(hdev);
3417
3418         queue_work(hdev->req_workqueue, &hdev->power_on);
3419
3420         return id;
3421
3422 err_wqueue:
3423         destroy_workqueue(hdev->workqueue);
3424         destroy_workqueue(hdev->req_workqueue);
3425 err:
3426         ida_simple_remove(&hci_index_ida, hdev->id);
3427
3428         return error;
3429 }
3430 EXPORT_SYMBOL(hci_register_dev);
3431
3432 /* Unregister HCI device */
3433 void hci_unregister_dev(struct hci_dev *hdev)
3434 {
3435         int id;
3436
3437         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3438
3439         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3440
3441         id = hdev->id;
3442
3443         write_lock(&hci_dev_list_lock);
3444         list_del(&hdev->list);
3445         write_unlock(&hci_dev_list_lock);
3446
3447         hci_dev_do_close(hdev);
3448
3449         cancel_work_sync(&hdev->power_on);
3450
3451         if (!test_bit(HCI_INIT, &hdev->flags) &&
3452             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3453             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3454                 hci_dev_lock(hdev);
3455                 mgmt_index_removed(hdev);
3456                 hci_dev_unlock(hdev);
3457         }
3458
3459         /* mgmt_index_removed should take care of emptying the
3460          * pending list */
3461         BUG_ON(!list_empty(&hdev->mgmt_pending));
3462
3463         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3464
3465         if (hdev->rfkill) {
3466                 rfkill_unregister(hdev->rfkill);
3467                 rfkill_destroy(hdev->rfkill);
3468         }
3469
3470         device_del(&hdev->dev);
3471
3472         debugfs_remove_recursive(hdev->debugfs);
3473
3474         destroy_workqueue(hdev->workqueue);
3475         destroy_workqueue(hdev->req_workqueue);
3476
3477         hci_dev_lock(hdev);
3478         hci_bdaddr_list_clear(&hdev->blacklist);
3479         hci_bdaddr_list_clear(&hdev->whitelist);
3480         hci_uuids_clear(hdev);
3481         hci_link_keys_clear(hdev);
3482         hci_smp_ltks_clear(hdev);
3483         hci_smp_irks_clear(hdev);
3484         hci_remote_oob_data_clear(hdev);
3485         hci_adv_instances_clear(hdev);
3486         hci_bdaddr_list_clear(&hdev->le_white_list);
3487         hci_conn_params_clear_all(hdev);
3488         hci_discovery_filter_clear(hdev);
3489         hci_dev_unlock(hdev);
3490
3491         hci_dev_put(hdev);
3492
3493         ida_simple_remove(&hci_index_ida, id);
3494 }
3495 EXPORT_SYMBOL(hci_unregister_dev);
3496
3497 /* Suspend HCI device */
3498 int hci_suspend_dev(struct hci_dev *hdev)
3499 {
3500         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3501         return 0;
3502 }
3503 EXPORT_SYMBOL(hci_suspend_dev);
3504
3505 /* Resume HCI device */
3506 int hci_resume_dev(struct hci_dev *hdev)
3507 {
3508         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3509         return 0;
3510 }
3511 EXPORT_SYMBOL(hci_resume_dev);
3512
3513 /* Reset HCI device */
3514 int hci_reset_dev(struct hci_dev *hdev)
3515 {
3516         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3517         struct sk_buff *skb;
3518
3519         skb = bt_skb_alloc(3, GFP_ATOMIC);
3520         if (!skb)
3521                 return -ENOMEM;
3522
3523         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3524         memcpy(skb_put(skb, 3), hw_err, 3);
3525
3526         /* Send Hardware Error to upper stack */
3527         return hci_recv_frame(hdev, skb);
3528 }
3529 EXPORT_SYMBOL(hci_reset_dev);
3530
3531 /* Receive frame from HCI drivers */
3532 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3533 {
3534         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3535                       && !test_bit(HCI_INIT, &hdev->flags))) {
3536                 kfree_skb(skb);
3537                 return -ENXIO;
3538         }
3539
3540         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3541             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3542             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3543                 kfree_skb(skb);
3544                 return -EINVAL;
3545         }
3546
3547         /* Incoming skb */
3548         bt_cb(skb)->incoming = 1;
3549
3550         /* Time stamp */
3551         __net_timestamp(skb);
3552
3553         skb_queue_tail(&hdev->rx_q, skb);
3554         queue_work(hdev->workqueue, &hdev->rx_work);
3555
3556         return 0;
3557 }
3558 EXPORT_SYMBOL(hci_recv_frame);
3559
3560 /* Receive diagnostic message from HCI drivers */
3561 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3562 {
3563         /* Mark as diagnostic packet */
3564         bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3565
3566         /* Time stamp */
3567         __net_timestamp(skb);
3568
3569         skb_queue_tail(&hdev->rx_q, skb);
3570         queue_work(hdev->workqueue, &hdev->rx_work);
3571
3572         return 0;
3573 }
3574 EXPORT_SYMBOL(hci_recv_diag);
3575
3576 /* ---- Interface to upper protocols ---- */
3577
3578 int hci_register_cb(struct hci_cb *cb)
3579 {
3580         BT_DBG("%p name %s", cb, cb->name);
3581
3582         mutex_lock(&hci_cb_list_lock);
3583         list_add_tail(&cb->list, &hci_cb_list);
3584         mutex_unlock(&hci_cb_list_lock);
3585
3586         return 0;
3587 }
3588 EXPORT_SYMBOL(hci_register_cb);
3589
3590 int hci_unregister_cb(struct hci_cb *cb)
3591 {
3592         BT_DBG("%p name %s", cb, cb->name);
3593
3594         mutex_lock(&hci_cb_list_lock);
3595         list_del(&cb->list);
3596         mutex_unlock(&hci_cb_list_lock);
3597
3598         return 0;
3599 }
3600 EXPORT_SYMBOL(hci_unregister_cb);
3601
3602 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3603 {
3604         int err;
3605
3606         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3607
3608         /* Time stamp */
3609         __net_timestamp(skb);
3610
3611         /* Send copy to monitor */
3612         hci_send_to_monitor(hdev, skb);
3613
3614         if (atomic_read(&hdev->promisc)) {
3615                 /* Send copy to the sockets */
3616                 hci_send_to_sock(hdev, skb);
3617         }
3618
3619         /* Get rid of skb owner, prior to sending to the driver. */
3620         skb_orphan(skb);
3621
3622         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3623                 kfree_skb(skb);
3624                 return;
3625         }
3626
3627         err = hdev->send(hdev, skb);
3628         if (err < 0) {
3629                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3630                 kfree_skb(skb);
3631         }
3632 }
3633
3634 /* Send HCI command */
3635 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3636                  const void *param)
3637 {
3638         struct sk_buff *skb;
3639
3640         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3641
3642         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3643         if (!skb) {
3644                 BT_ERR("%s no memory for command", hdev->name);
3645                 return -ENOMEM;
3646         }
3647
3648         /* Stand-alone HCI commands must be flagged as
3649          * single-command requests.
3650          */
3651         bt_cb(skb)->hci.req_start = true;
3652
3653         skb_queue_tail(&hdev->cmd_q, skb);
3654         queue_work(hdev->workqueue, &hdev->cmd_work);
3655
3656         return 0;
3657 }
3658
3659 /* Get data from the previously sent command */
3660 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3661 {
3662         struct hci_command_hdr *hdr;
3663
3664         if (!hdev->sent_cmd)
3665                 return NULL;
3666
3667         hdr = (void *) hdev->sent_cmd->data;
3668
3669         if (hdr->opcode != cpu_to_le16(opcode))
3670                 return NULL;
3671
3672         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3673
3674         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3675 }
3676
3677 /* Send HCI command and wait for command commplete event */
3678 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3679                              const void *param, u32 timeout)
3680 {
3681         struct sk_buff *skb;
3682
3683         if (!test_bit(HCI_UP, &hdev->flags))
3684                 return ERR_PTR(-ENETDOWN);
3685
3686         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3687
3688         hci_req_lock(hdev);
3689         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3690         hci_req_unlock(hdev);
3691
3692         return skb;
3693 }
3694 EXPORT_SYMBOL(hci_cmd_sync);
3695
3696 /* Send ACL data */
3697 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3698 {
3699         struct hci_acl_hdr *hdr;
3700         int len = skb->len;
3701
3702         skb_push(skb, HCI_ACL_HDR_SIZE);
3703         skb_reset_transport_header(skb);
3704         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3705         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3706         hdr->dlen   = cpu_to_le16(len);
3707 }
3708
3709 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3710                           struct sk_buff *skb, __u16 flags)
3711 {
3712         struct hci_conn *conn = chan->conn;
3713         struct hci_dev *hdev = conn->hdev;
3714         struct sk_buff *list;
3715
3716         skb->len = skb_headlen(skb);
3717         skb->data_len = 0;
3718
3719         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3720
3721         switch (hdev->dev_type) {
3722         case HCI_BREDR:
3723                 hci_add_acl_hdr(skb, conn->handle, flags);
3724                 break;
3725         case HCI_AMP:
3726                 hci_add_acl_hdr(skb, chan->handle, flags);
3727                 break;
3728         default:
3729                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3730                 return;
3731         }
3732
3733         list = skb_shinfo(skb)->frag_list;
3734         if (!list) {
3735                 /* Non fragmented */
3736                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3737
3738                 skb_queue_tail(queue, skb);
3739         } else {
3740                 /* Fragmented */
3741                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3742
3743                 skb_shinfo(skb)->frag_list = NULL;
3744
3745                 /* Queue all fragments atomically. We need to use spin_lock_bh
3746                  * here because of 6LoWPAN links, as there this function is
3747                  * called from softirq and using normal spin lock could cause
3748                  * deadlocks.
3749                  */
3750                 spin_lock_bh(&queue->lock);
3751
3752                 __skb_queue_tail(queue, skb);
3753
3754                 flags &= ~ACL_START;
3755                 flags |= ACL_CONT;
3756                 do {
3757                         skb = list; list = list->next;
3758
3759                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3760                         hci_add_acl_hdr(skb, conn->handle, flags);
3761
3762                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3763
3764                         __skb_queue_tail(queue, skb);
3765                 } while (list);
3766
3767                 spin_unlock_bh(&queue->lock);
3768         }
3769 }
3770
3771 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3772 {
3773         struct hci_dev *hdev = chan->conn->hdev;
3774
3775         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3776
3777         hci_queue_acl(chan, &chan->data_q, skb, flags);
3778
3779         queue_work(hdev->workqueue, &hdev->tx_work);
3780 }
3781
3782 /* Send SCO data */
3783 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3784 {
3785         struct hci_dev *hdev = conn->hdev;
3786         struct hci_sco_hdr hdr;
3787
3788         BT_DBG("%s len %d", hdev->name, skb->len);
3789
3790         hdr.handle = cpu_to_le16(conn->handle);
3791         hdr.dlen   = skb->len;
3792
3793         skb_push(skb, HCI_SCO_HDR_SIZE);
3794         skb_reset_transport_header(skb);
3795         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3796
3797         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3798
3799         skb_queue_tail(&conn->data_q, skb);
3800         queue_work(hdev->workqueue, &hdev->tx_work);
3801 }
3802
3803 /* ---- HCI TX task (outgoing data) ---- */
3804
3805 /* HCI Connection scheduler */
3806 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3807                                      int *quote)
3808 {
3809         struct hci_conn_hash *h = &hdev->conn_hash;
3810         struct hci_conn *conn = NULL, *c;
3811         unsigned int num = 0, min = ~0;
3812
3813         /* We don't have to lock device here. Connections are always
3814          * added and removed with TX task disabled. */
3815
3816         rcu_read_lock();
3817
3818         list_for_each_entry_rcu(c, &h->list, list) {
3819                 if (c->type != type || skb_queue_empty(&c->data_q))
3820                         continue;
3821
3822                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3823                         continue;
3824
3825                 num++;
3826
3827                 if (c->sent < min) {
3828                         min  = c->sent;
3829                         conn = c;
3830                 }
3831
3832                 if (hci_conn_num(hdev, type) == num)
3833                         break;
3834         }
3835
3836         rcu_read_unlock();
3837
3838         if (conn) {
3839                 int cnt, q;
3840
3841                 switch (conn->type) {
3842                 case ACL_LINK:
3843                         cnt = hdev->acl_cnt;
3844                         break;
3845                 case SCO_LINK:
3846                 case ESCO_LINK:
3847                         cnt = hdev->sco_cnt;
3848                         break;
3849                 case LE_LINK:
3850                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3851                         break;
3852                 default:
3853                         cnt = 0;
3854                         BT_ERR("Unknown link type");
3855                 }
3856
3857                 q = cnt / num;
3858                 *quote = q ? q : 1;
3859         } else
3860                 *quote = 0;
3861
3862         BT_DBG("conn %p quote %d", conn, *quote);
3863         return conn;
3864 }
3865
3866 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3867 {
3868         struct hci_conn_hash *h = &hdev->conn_hash;
3869         struct hci_conn *c;
3870
3871         BT_ERR("%s link tx timeout", hdev->name);
3872
3873         rcu_read_lock();
3874
3875         /* Kill stalled connections */
3876         list_for_each_entry_rcu(c, &h->list, list) {
3877                 if (c->type == type && c->sent) {
3878                         BT_ERR("%s killing stalled connection %pMR",
3879                                hdev->name, &c->dst);
3880                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3881                 }
3882         }
3883
3884         rcu_read_unlock();
3885 }
3886
3887 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3888                                       int *quote)
3889 {
3890         struct hci_conn_hash *h = &hdev->conn_hash;
3891         struct hci_chan *chan = NULL;
3892         unsigned int num = 0, min = ~0, cur_prio = 0;
3893         struct hci_conn *conn;
3894         int cnt, q, conn_num = 0;
3895
3896         BT_DBG("%s", hdev->name);
3897
3898         rcu_read_lock();
3899
3900         list_for_each_entry_rcu(conn, &h->list, list) {
3901                 struct hci_chan *tmp;
3902
3903                 if (conn->type != type)
3904                         continue;
3905
3906                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3907                         continue;
3908
3909                 conn_num++;
3910
3911                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3912                         struct sk_buff *skb;
3913
3914                         if (skb_queue_empty(&tmp->data_q))
3915                                 continue;
3916
3917                         skb = skb_peek(&tmp->data_q);
3918                         if (skb->priority < cur_prio)
3919                                 continue;
3920
3921                         if (skb->priority > cur_prio) {
3922                                 num = 0;
3923                                 min = ~0;
3924                                 cur_prio = skb->priority;
3925                         }
3926
3927                         num++;
3928
3929                         if (conn->sent < min) {
3930                                 min  = conn->sent;
3931                                 chan = tmp;
3932                         }
3933                 }
3934
3935                 if (hci_conn_num(hdev, type) == conn_num)
3936                         break;
3937         }
3938
3939         rcu_read_unlock();
3940
3941         if (!chan)
3942                 return NULL;
3943
3944         switch (chan->conn->type) {
3945         case ACL_LINK:
3946                 cnt = hdev->acl_cnt;
3947                 break;
3948         case AMP_LINK:
3949                 cnt = hdev->block_cnt;
3950                 break;
3951         case SCO_LINK:
3952         case ESCO_LINK:
3953                 cnt = hdev->sco_cnt;
3954                 break;
3955         case LE_LINK:
3956                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3957                 break;
3958         default:
3959                 cnt = 0;
3960                 BT_ERR("Unknown link type");
3961         }
3962
3963         q = cnt / num;
3964         *quote = q ? q : 1;
3965         BT_DBG("chan %p quote %d", chan, *quote);
3966         return chan;
3967 }
3968
3969 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3970 {
3971         struct hci_conn_hash *h = &hdev->conn_hash;
3972         struct hci_conn *conn;
3973         int num = 0;
3974
3975         BT_DBG("%s", hdev->name);
3976
3977         rcu_read_lock();
3978
3979         list_for_each_entry_rcu(conn, &h->list, list) {
3980                 struct hci_chan *chan;
3981
3982                 if (conn->type != type)
3983                         continue;
3984
3985                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3986                         continue;
3987
3988                 num++;
3989
3990                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3991                         struct sk_buff *skb;
3992
3993                         if (chan->sent) {
3994                                 chan->sent = 0;
3995                                 continue;
3996                         }
3997
3998                         if (skb_queue_empty(&chan->data_q))
3999                                 continue;
4000
4001                         skb = skb_peek(&chan->data_q);
4002                         if (skb->priority >= HCI_PRIO_MAX - 1)
4003                                 continue;
4004
4005                         skb->priority = HCI_PRIO_MAX - 1;
4006
4007                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4008                                skb->priority);
4009                 }
4010
4011                 if (hci_conn_num(hdev, type) == num)
4012                         break;
4013         }
4014
4015         rcu_read_unlock();
4016
4017 }
4018
4019 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4020 {
4021         /* Calculate count of blocks used by this packet */
4022         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4023 }
4024
4025 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4026 {
4027         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4028                 /* ACL tx timeout must be longer than maximum
4029                  * link supervision timeout (40.9 seconds) */
4030                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4031                                        HCI_ACL_TX_TIMEOUT))
4032                         hci_link_tx_to(hdev, ACL_LINK);
4033         }
4034 }
4035
4036 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4037 {
4038         unsigned int cnt = hdev->acl_cnt;
4039         struct hci_chan *chan;
4040         struct sk_buff *skb;
4041         int quote;
4042
4043         __check_timeout(hdev, cnt);
4044
4045         while (hdev->acl_cnt &&
4046                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4047                 u32 priority = (skb_peek(&chan->data_q))->priority;
4048                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4049                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4050                                skb->len, skb->priority);
4051
4052                         /* Stop if priority has changed */
4053                         if (skb->priority < priority)
4054                                 break;
4055
4056                         skb = skb_dequeue(&chan->data_q);
4057
4058                         hci_conn_enter_active_mode(chan->conn,
4059                                                    bt_cb(skb)->force_active);
4060
4061                         hci_send_frame(hdev, skb);
4062                         hdev->acl_last_tx = jiffies;
4063
4064                         hdev->acl_cnt--;
4065                         chan->sent++;
4066                         chan->conn->sent++;
4067                 }
4068         }
4069
4070         if (cnt != hdev->acl_cnt)
4071                 hci_prio_recalculate(hdev, ACL_LINK);
4072 }
4073
4074 static void hci_sched_acl_blk(struct hci_dev *hdev)
4075 {
4076         unsigned int cnt = hdev->block_cnt;
4077         struct hci_chan *chan;
4078         struct sk_buff *skb;
4079         int quote;
4080         u8 type;
4081
4082         __check_timeout(hdev, cnt);
4083
4084         BT_DBG("%s", hdev->name);
4085
4086         if (hdev->dev_type == HCI_AMP)
4087                 type = AMP_LINK;
4088         else
4089                 type = ACL_LINK;
4090
4091         while (hdev->block_cnt > 0 &&
4092                (chan = hci_chan_sent(hdev, type, &quote))) {
4093                 u32 priority = (skb_peek(&chan->data_q))->priority;
4094                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4095                         int blocks;
4096
4097                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4098                                skb->len, skb->priority);
4099
4100                         /* Stop if priority has changed */
4101                         if (skb->priority < priority)
4102                                 break;
4103
4104                         skb = skb_dequeue(&chan->data_q);
4105
4106                         blocks = __get_blocks(hdev, skb);
4107                         if (blocks > hdev->block_cnt)
4108                                 return;
4109
4110                         hci_conn_enter_active_mode(chan->conn,
4111                                                    bt_cb(skb)->force_active);
4112
4113                         hci_send_frame(hdev, skb);
4114                         hdev->acl_last_tx = jiffies;
4115
4116                         hdev->block_cnt -= blocks;
4117                         quote -= blocks;
4118
4119                         chan->sent += blocks;
4120                         chan->conn->sent += blocks;
4121                 }
4122         }
4123
4124         if (cnt != hdev->block_cnt)
4125                 hci_prio_recalculate(hdev, type);
4126 }
4127
4128 static void hci_sched_acl(struct hci_dev *hdev)
4129 {
4130         BT_DBG("%s", hdev->name);
4131
4132         /* No ACL link over BR/EDR controller */
4133         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4134                 return;
4135
4136         /* No AMP link over AMP controller */
4137         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4138                 return;
4139
4140         switch (hdev->flow_ctl_mode) {
4141         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4142                 hci_sched_acl_pkt(hdev);
4143                 break;
4144
4145         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4146                 hci_sched_acl_blk(hdev);
4147                 break;
4148         }
4149 }
4150
4151 /* Schedule SCO */
4152 static void hci_sched_sco(struct hci_dev *hdev)
4153 {
4154         struct hci_conn *conn;
4155         struct sk_buff *skb;
4156         int quote;
4157
4158         BT_DBG("%s", hdev->name);
4159
4160         if (!hci_conn_num(hdev, SCO_LINK))
4161                 return;
4162
4163         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4164                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4165                         BT_DBG("skb %p len %d", skb, skb->len);
4166                         hci_send_frame(hdev, skb);
4167
4168                         conn->sent++;
4169                         if (conn->sent == ~0)
4170                                 conn->sent = 0;
4171                 }
4172         }
4173 }
4174
4175 static void hci_sched_esco(struct hci_dev *hdev)
4176 {
4177         struct hci_conn *conn;
4178         struct sk_buff *skb;
4179         int quote;
4180
4181         BT_DBG("%s", hdev->name);
4182
4183         if (!hci_conn_num(hdev, ESCO_LINK))
4184                 return;
4185
4186         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4187                                                      &quote))) {
4188                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4189                         BT_DBG("skb %p len %d", skb, skb->len);
4190                         hci_send_frame(hdev, skb);
4191
4192                         conn->sent++;
4193                         if (conn->sent == ~0)
4194                                 conn->sent = 0;
4195                 }
4196         }
4197 }
4198
4199 static void hci_sched_le(struct hci_dev *hdev)
4200 {
4201         struct hci_chan *chan;
4202         struct sk_buff *skb;
4203         int quote, cnt, tmp;
4204
4205         BT_DBG("%s", hdev->name);
4206
4207         if (!hci_conn_num(hdev, LE_LINK))
4208                 return;
4209
4210         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4211                 /* LE tx timeout must be longer than maximum
4212                  * link supervision timeout (40.9 seconds) */
4213                 if (!hdev->le_cnt && hdev->le_pkts &&
4214                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4215                         hci_link_tx_to(hdev, LE_LINK);
4216         }
4217
4218         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4219         tmp = cnt;
4220         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4221                 u32 priority = (skb_peek(&chan->data_q))->priority;
4222                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4223                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4224                                skb->len, skb->priority);
4225
4226                         /* Stop if priority has changed */
4227                         if (skb->priority < priority)
4228                                 break;
4229
4230                         skb = skb_dequeue(&chan->data_q);
4231
4232                         hci_send_frame(hdev, skb);
4233                         hdev->le_last_tx = jiffies;
4234
4235                         cnt--;
4236                         chan->sent++;
4237                         chan->conn->sent++;
4238                 }
4239         }
4240
4241         if (hdev->le_pkts)
4242                 hdev->le_cnt = cnt;
4243         else
4244                 hdev->acl_cnt = cnt;
4245
4246         if (cnt != tmp)
4247                 hci_prio_recalculate(hdev, LE_LINK);
4248 }
4249
4250 static void hci_tx_work(struct work_struct *work)
4251 {
4252         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4253         struct sk_buff *skb;
4254
4255         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4256                hdev->sco_cnt, hdev->le_cnt);
4257
4258         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4259                 /* Schedule queues and send stuff to HCI driver */
4260                 hci_sched_acl(hdev);
4261                 hci_sched_sco(hdev);
4262                 hci_sched_esco(hdev);
4263                 hci_sched_le(hdev);
4264         }
4265
4266         /* Send next queued raw (unknown type) packet */
4267         while ((skb = skb_dequeue(&hdev->raw_q)))
4268                 hci_send_frame(hdev, skb);
4269 }
4270
4271 /* ----- HCI RX task (incoming data processing) ----- */
4272
4273 /* ACL data packet */
4274 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4275 {
4276         struct hci_acl_hdr *hdr = (void *) skb->data;
4277         struct hci_conn *conn;
4278         __u16 handle, flags;
4279
4280         skb_pull(skb, HCI_ACL_HDR_SIZE);
4281
4282         handle = __le16_to_cpu(hdr->handle);
4283         flags  = hci_flags(handle);
4284         handle = hci_handle(handle);
4285
4286         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4287                handle, flags);
4288
4289         hdev->stat.acl_rx++;
4290
4291         hci_dev_lock(hdev);
4292         conn = hci_conn_hash_lookup_handle(hdev, handle);
4293         hci_dev_unlock(hdev);
4294
4295         if (conn) {
4296                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4297
4298                 /* Send to upper protocol */
4299                 l2cap_recv_acldata(conn, skb, flags);
4300                 return;
4301         } else {
4302                 BT_ERR("%s ACL packet for unknown connection handle %d",
4303                        hdev->name, handle);
4304         }
4305
4306         kfree_skb(skb);
4307 }
4308
4309 /* SCO data packet */
4310 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4311 {
4312         struct hci_sco_hdr *hdr = (void *) skb->data;
4313         struct hci_conn *conn;
4314         __u16 handle;
4315
4316         skb_pull(skb, HCI_SCO_HDR_SIZE);
4317
4318         handle = __le16_to_cpu(hdr->handle);
4319
4320         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4321
4322         hdev->stat.sco_rx++;
4323
4324         hci_dev_lock(hdev);
4325         conn = hci_conn_hash_lookup_handle(hdev, handle);
4326         hci_dev_unlock(hdev);
4327
4328         if (conn) {
4329                 /* Send to upper protocol */
4330                 sco_recv_scodata(conn, skb);
4331                 return;
4332         } else {
4333                 BT_ERR("%s SCO packet for unknown connection handle %d",
4334                        hdev->name, handle);
4335         }
4336
4337         kfree_skb(skb);
4338 }
4339
4340 static bool hci_req_is_complete(struct hci_dev *hdev)
4341 {
4342         struct sk_buff *skb;
4343
4344         skb = skb_peek(&hdev->cmd_q);
4345         if (!skb)
4346                 return true;
4347
4348         return bt_cb(skb)->hci.req_start;
4349 }
4350
4351 static void hci_resend_last(struct hci_dev *hdev)
4352 {
4353         struct hci_command_hdr *sent;
4354         struct sk_buff *skb;
4355         u16 opcode;
4356
4357         if (!hdev->sent_cmd)
4358                 return;
4359
4360         sent = (void *) hdev->sent_cmd->data;
4361         opcode = __le16_to_cpu(sent->opcode);
4362         if (opcode == HCI_OP_RESET)
4363                 return;
4364
4365         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4366         if (!skb)
4367                 return;
4368
4369         skb_queue_head(&hdev->cmd_q, skb);
4370         queue_work(hdev->workqueue, &hdev->cmd_work);
4371 }
4372
4373 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4374                           hci_req_complete_t *req_complete,
4375                           hci_req_complete_skb_t *req_complete_skb)
4376 {
4377         struct sk_buff *skb;
4378         unsigned long flags;
4379
4380         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4381
4382         /* If the completed command doesn't match the last one that was
4383          * sent we need to do special handling of it.
4384          */
4385         if (!hci_sent_cmd_data(hdev, opcode)) {
4386                 /* Some CSR based controllers generate a spontaneous
4387                  * reset complete event during init and any pending
4388                  * command will never be completed. In such a case we
4389                  * need to resend whatever was the last sent
4390                  * command.
4391                  */
4392                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4393                         hci_resend_last(hdev);
4394
4395                 return;
4396         }
4397
4398         /* If the command succeeded and there's still more commands in
4399          * this request the request is not yet complete.
4400          */
4401         if (!status && !hci_req_is_complete(hdev))
4402                 return;
4403
4404         /* If this was the last command in a request the complete
4405          * callback would be found in hdev->sent_cmd instead of the
4406          * command queue (hdev->cmd_q).
4407          */
4408         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4409                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4410                 return;
4411         }
4412
4413         if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4414                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4415                 return;
4416         }
4417
4418         /* Remove all pending commands belonging to this request */
4419         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4420         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4421                 if (bt_cb(skb)->hci.req_start) {
4422                         __skb_queue_head(&hdev->cmd_q, skb);
4423                         break;
4424                 }
4425
4426                 *req_complete = bt_cb(skb)->hci.req_complete;
4427                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4428                 kfree_skb(skb);
4429         }
4430         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4431 }
4432
4433 static void hci_rx_work(struct work_struct *work)
4434 {
4435         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4436         struct sk_buff *skb;
4437
4438         BT_DBG("%s", hdev->name);
4439
4440         while ((skb = skb_dequeue(&hdev->rx_q))) {
4441                 /* Send copy to monitor */
4442                 hci_send_to_monitor(hdev, skb);
4443
4444                 if (atomic_read(&hdev->promisc)) {
4445                         /* Send copy to the sockets */
4446                         hci_send_to_sock(hdev, skb);
4447                 }
4448
4449                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4450                         kfree_skb(skb);
4451                         continue;
4452                 }
4453
4454                 if (test_bit(HCI_INIT, &hdev->flags)) {
4455                         /* Don't process data packets in this states. */
4456                         switch (bt_cb(skb)->pkt_type) {
4457                         case HCI_ACLDATA_PKT:
4458                         case HCI_SCODATA_PKT:
4459                                 kfree_skb(skb);
4460                                 continue;
4461                         }
4462                 }
4463
4464                 /* Process frame */
4465                 switch (bt_cb(skb)->pkt_type) {
4466                 case HCI_EVENT_PKT:
4467                         BT_DBG("%s Event packet", hdev->name);
4468                         hci_event_packet(hdev, skb);
4469                         break;
4470
4471                 case HCI_ACLDATA_PKT:
4472                         BT_DBG("%s ACL data packet", hdev->name);
4473                         hci_acldata_packet(hdev, skb);
4474                         break;
4475
4476                 case HCI_SCODATA_PKT:
4477                         BT_DBG("%s SCO data packet", hdev->name);
4478                         hci_scodata_packet(hdev, skb);
4479                         break;
4480
4481                 default:
4482                         kfree_skb(skb);
4483                         break;
4484                 }
4485         }
4486 }
4487
4488 static void hci_cmd_work(struct work_struct *work)
4489 {
4490         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4491         struct sk_buff *skb;
4492
4493         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4494                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4495
4496         /* Send queued commands */
4497         if (atomic_read(&hdev->cmd_cnt)) {
4498                 skb = skb_dequeue(&hdev->cmd_q);
4499                 if (!skb)
4500                         return;
4501
4502                 kfree_skb(hdev->sent_cmd);
4503
4504                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4505                 if (hdev->sent_cmd) {
4506                         atomic_dec(&hdev->cmd_cnt);
4507                         hci_send_frame(hdev, skb);
4508                         if (test_bit(HCI_RESET, &hdev->flags))
4509                                 cancel_delayed_work(&hdev->cmd_timer);
4510                         else
4511                                 schedule_delayed_work(&hdev->cmd_timer,
4512                                                       HCI_CMD_TIMEOUT);
4513                 } else {
4514                         skb_queue_head(&hdev->cmd_q, skb);
4515                         queue_work(hdev->workqueue, &hdev->cmd_work);
4516                 }
4517         }
4518 }