Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         hci_dev_change_flag(hdev, HCI_DUT_MODE);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145                                   struct sk_buff *skb)
146 {
147         BT_DBG("%s result 0x%2.2x", hdev->name, result);
148
149         if (hdev->req_status == HCI_REQ_PEND) {
150                 hdev->req_result = result;
151                 hdev->req_status = HCI_REQ_DONE;
152                 if (skb)
153                         hdev->req_skb = skb_get(skb);
154                 wake_up_interruptible(&hdev->req_wait_q);
155         }
156 }
157
158 static void hci_req_cancel(struct hci_dev *hdev, int err)
159 {
160         BT_DBG("%s err 0x%2.2x", hdev->name, err);
161
162         if (hdev->req_status == HCI_REQ_PEND) {
163                 hdev->req_result = err;
164                 hdev->req_status = HCI_REQ_CANCELED;
165                 wake_up_interruptible(&hdev->req_wait_q);
166         }
167 }
168
169 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
170                                   const void *param, u8 event, u32 timeout)
171 {
172         DECLARE_WAITQUEUE(wait, current);
173         struct hci_request req;
174         struct sk_buff *skb;
175         int err = 0;
176
177         BT_DBG("%s", hdev->name);
178
179         hci_req_init(&req, hdev);
180
181         hci_req_add_ev(&req, opcode, plen, param, event);
182
183         hdev->req_status = HCI_REQ_PEND;
184
185         add_wait_queue(&hdev->req_wait_q, &wait);
186         set_current_state(TASK_INTERRUPTIBLE);
187
188         err = hci_req_run_skb(&req, hci_req_sync_complete);
189         if (err < 0) {
190                 remove_wait_queue(&hdev->req_wait_q, &wait);
191                 set_current_state(TASK_RUNNING);
192                 return ERR_PTR(err);
193         }
194
195         schedule_timeout(timeout);
196
197         remove_wait_queue(&hdev->req_wait_q, &wait);
198
199         if (signal_pending(current))
200                 return ERR_PTR(-EINTR);
201
202         switch (hdev->req_status) {
203         case HCI_REQ_DONE:
204                 err = -bt_to_errno(hdev->req_result);
205                 break;
206
207         case HCI_REQ_CANCELED:
208                 err = -hdev->req_result;
209                 break;
210
211         default:
212                 err = -ETIMEDOUT;
213                 break;
214         }
215
216         hdev->req_status = hdev->req_result = 0;
217         skb = hdev->req_skb;
218         hdev->req_skb = NULL;
219
220         BT_DBG("%s end: err %d", hdev->name, err);
221
222         if (err < 0) {
223                 kfree_skb(skb);
224                 return ERR_PTR(err);
225         }
226
227         if (!skb)
228                 return ERR_PTR(-ENODATA);
229
230         return skb;
231 }
232 EXPORT_SYMBOL(__hci_cmd_sync_ev);
233
234 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
235                                const void *param, u32 timeout)
236 {
237         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
238 }
239 EXPORT_SYMBOL(__hci_cmd_sync);
240
241 /* Execute request and wait for completion. */
242 static int __hci_req_sync(struct hci_dev *hdev,
243                           void (*func)(struct hci_request *req,
244                                       unsigned long opt),
245                           unsigned long opt, __u32 timeout)
246 {
247         struct hci_request req;
248         DECLARE_WAITQUEUE(wait, current);
249         int err = 0;
250
251         BT_DBG("%s start", hdev->name);
252
253         hci_req_init(&req, hdev);
254
255         hdev->req_status = HCI_REQ_PEND;
256
257         func(&req, opt);
258
259         add_wait_queue(&hdev->req_wait_q, &wait);
260         set_current_state(TASK_INTERRUPTIBLE);
261
262         err = hci_req_run_skb(&req, hci_req_sync_complete);
263         if (err < 0) {
264                 hdev->req_status = 0;
265
266                 remove_wait_queue(&hdev->req_wait_q, &wait);
267                 set_current_state(TASK_RUNNING);
268
269                 /* ENODATA means the HCI request command queue is empty.
270                  * This can happen when a request with conditionals doesn't
271                  * trigger any commands to be sent. This is normal behavior
272                  * and should not trigger an error return.
273                  */
274                 if (err == -ENODATA)
275                         return 0;
276
277                 return err;
278         }
279
280         schedule_timeout(timeout);
281
282         remove_wait_queue(&hdev->req_wait_q, &wait);
283
284         if (signal_pending(current))
285                 return -EINTR;
286
287         switch (hdev->req_status) {
288         case HCI_REQ_DONE:
289                 err = -bt_to_errno(hdev->req_result);
290                 break;
291
292         case HCI_REQ_CANCELED:
293                 err = -hdev->req_result;
294                 break;
295
296         default:
297                 err = -ETIMEDOUT;
298                 break;
299         }
300
301         hdev->req_status = hdev->req_result = 0;
302
303         BT_DBG("%s end: err %d", hdev->name, err);
304
305         return err;
306 }
307
308 static int hci_req_sync(struct hci_dev *hdev,
309                         void (*req)(struct hci_request *req,
310                                     unsigned long opt),
311                         unsigned long opt, __u32 timeout)
312 {
313         int ret;
314
315         if (!test_bit(HCI_UP, &hdev->flags))
316                 return -ENETDOWN;
317
318         /* Serialize all requests */
319         hci_req_lock(hdev);
320         ret = __hci_req_sync(hdev, req, opt, timeout);
321         hci_req_unlock(hdev);
322
323         return ret;
324 }
325
326 static void hci_reset_req(struct hci_request *req, unsigned long opt)
327 {
328         BT_DBG("%s %ld", req->hdev->name, opt);
329
330         /* Reset device */
331         set_bit(HCI_RESET, &req->hdev->flags);
332         hci_req_add(req, HCI_OP_RESET, 0, NULL);
333 }
334
335 static void bredr_init(struct hci_request *req)
336 {
337         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
338
339         /* Read Local Supported Features */
340         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
341
342         /* Read Local Version */
343         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
344
345         /* Read BD Address */
346         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
347 }
348
349 static void amp_init1(struct hci_request *req)
350 {
351         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
352
353         /* Read Local Version */
354         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
355
356         /* Read Local Supported Commands */
357         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358
359         /* Read Local AMP Info */
360         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
361
362         /* Read Data Blk size */
363         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
364
365         /* Read Flow Control Mode */
366         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367
368         /* Read Location Data */
369         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
370 }
371
372 static void amp_init2(struct hci_request *req)
373 {
374         /* Read Local Supported Features. Not all AMP controllers
375          * support this so it's placed conditionally in the second
376          * stage init.
377          */
378         if (req->hdev->commands[14] & 0x20)
379                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380 }
381
382 static void hci_init1_req(struct hci_request *req, unsigned long opt)
383 {
384         struct hci_dev *hdev = req->hdev;
385
386         BT_DBG("%s %ld", hdev->name, opt);
387
388         /* Reset */
389         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
390                 hci_reset_req(req, 0);
391
392         switch (hdev->dev_type) {
393         case HCI_BREDR:
394                 bredr_init(req);
395                 break;
396
397         case HCI_AMP:
398                 amp_init1(req);
399                 break;
400
401         default:
402                 BT_ERR("Unknown device type %d", hdev->dev_type);
403                 break;
404         }
405 }
406
407 static void bredr_setup(struct hci_request *req)
408 {
409         __le16 param;
410         __u8 flt_type;
411
412         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
413         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
414
415         /* Read Class of Device */
416         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
417
418         /* Read Local Name */
419         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
420
421         /* Read Voice Setting */
422         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
423
424         /* Read Number of Supported IAC */
425         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426
427         /* Read Current IAC LAP */
428         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429
430         /* Clear Event Filters */
431         flt_type = HCI_FLT_CLEAR_ALL;
432         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
433
434         /* Connection accept timeout ~20 secs */
435         param = cpu_to_le16(0x7d00);
436         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
437 }
438
439 static void le_setup(struct hci_request *req)
440 {
441         struct hci_dev *hdev = req->hdev;
442
443         /* Read LE Buffer Size */
444         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
445
446         /* Read LE Local Supported Features */
447         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
448
449         /* Read LE Supported States */
450         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451
452         /* Read LE White List Size */
453         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
454
455         /* Clear LE White List */
456         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
457
458         /* LE-only controllers have LE implicitly enabled */
459         if (!lmp_bredr_capable(hdev))
460                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
461 }
462
463 static void hci_setup_event_mask(struct hci_request *req)
464 {
465         struct hci_dev *hdev = req->hdev;
466
467         /* The second byte is 0xff instead of 0x9f (two reserved bits
468          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469          * command otherwise.
470          */
471         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474          * any event mask for pre 1.2 devices.
475          */
476         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477                 return;
478
479         if (lmp_bredr_capable(hdev)) {
480                 events[4] |= 0x01; /* Flow Specification Complete */
481                 events[4] |= 0x02; /* Inquiry Result with RSSI */
482                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483                 events[5] |= 0x08; /* Synchronous Connection Complete */
484                 events[5] |= 0x10; /* Synchronous Connection Changed */
485         } else {
486                 /* Use a different default for LE-only devices */
487                 memset(events, 0, sizeof(events));
488                 events[0] |= 0x10; /* Disconnection Complete */
489                 events[1] |= 0x08; /* Read Remote Version Information Complete */
490                 events[1] |= 0x20; /* Command Complete */
491                 events[1] |= 0x40; /* Command Status */
492                 events[1] |= 0x80; /* Hardware Error */
493                 events[2] |= 0x04; /* Number of Completed Packets */
494                 events[3] |= 0x02; /* Data Buffer Overflow */
495
496                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497                         events[0] |= 0x80; /* Encryption Change */
498                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
499                 }
500         }
501
502         if (lmp_inq_rssi_capable(hdev))
503                 events[4] |= 0x02; /* Inquiry Result with RSSI */
504
505         if (lmp_sniffsubr_capable(hdev))
506                 events[5] |= 0x20; /* Sniff Subrating */
507
508         if (lmp_pause_enc_capable(hdev))
509                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510
511         if (lmp_ext_inq_capable(hdev))
512                 events[5] |= 0x40; /* Extended Inquiry Result */
513
514         if (lmp_no_flush_capable(hdev))
515                 events[7] |= 0x01; /* Enhanced Flush Complete */
516
517         if (lmp_lsto_capable(hdev))
518                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519
520         if (lmp_ssp_capable(hdev)) {
521                 events[6] |= 0x01;      /* IO Capability Request */
522                 events[6] |= 0x02;      /* IO Capability Response */
523                 events[6] |= 0x04;      /* User Confirmation Request */
524                 events[6] |= 0x08;      /* User Passkey Request */
525                 events[6] |= 0x10;      /* Remote OOB Data Request */
526                 events[6] |= 0x20;      /* Simple Pairing Complete */
527                 events[7] |= 0x04;      /* User Passkey Notification */
528                 events[7] |= 0x08;      /* Keypress Notification */
529                 events[7] |= 0x10;      /* Remote Host Supported
530                                          * Features Notification
531                                          */
532         }
533
534         if (lmp_le_capable(hdev))
535                 events[7] |= 0x20;      /* LE Meta-Event */
536
537         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
538 }
539
540 static void hci_init2_req(struct hci_request *req, unsigned long opt)
541 {
542         struct hci_dev *hdev = req->hdev;
543
544         if (hdev->dev_type == HCI_AMP)
545                 return amp_init2(req);
546
547         if (lmp_bredr_capable(hdev))
548                 bredr_setup(req);
549         else
550                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
551
552         if (lmp_le_capable(hdev))
553                 le_setup(req);
554
555         /* All Bluetooth 1.2 and later controllers should support the
556          * HCI command for reading the local supported commands.
557          *
558          * Unfortunately some controllers indicate Bluetooth 1.2 support,
559          * but do not have support for this command. If that is the case,
560          * the driver can quirk the behavior and skip reading the local
561          * supported commands.
562          */
563         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
565                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566
567         if (lmp_ssp_capable(hdev)) {
568                 /* When SSP is available, then the host features page
569                  * should also be available as well. However some
570                  * controllers list the max_page as 0 as long as SSP
571                  * has not been enabled. To achieve proper debugging
572                  * output, force the minimum max_page to 1 at least.
573                  */
574                 hdev->max_page = 0x01;
575
576                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
577                         u8 mode = 0x01;
578
579                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580                                     sizeof(mode), &mode);
581                 } else {
582                         struct hci_cp_write_eir cp;
583
584                         memset(hdev->eir, 0, sizeof(hdev->eir));
585                         memset(&cp, 0, sizeof(cp));
586
587                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
588                 }
589         }
590
591         if (lmp_inq_rssi_capable(hdev) ||
592             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
593                 u8 mode;
594
595                 /* If Extended Inquiry Result events are supported, then
596                  * they are clearly preferred over Inquiry Result with RSSI
597                  * events.
598                  */
599                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600
601                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602         }
603
604         if (lmp_inq_tx_pwr_capable(hdev))
605                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
606
607         if (lmp_ext_feat_capable(hdev)) {
608                 struct hci_cp_read_local_ext_features cp;
609
610                 cp.page = 0x01;
611                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612                             sizeof(cp), &cp);
613         }
614
615         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
616                 u8 enable = 1;
617                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618                             &enable);
619         }
620 }
621
622 static void hci_setup_link_policy(struct hci_request *req)
623 {
624         struct hci_dev *hdev = req->hdev;
625         struct hci_cp_write_def_link_policy cp;
626         u16 link_policy = 0;
627
628         if (lmp_rswitch_capable(hdev))
629                 link_policy |= HCI_LP_RSWITCH;
630         if (lmp_hold_capable(hdev))
631                 link_policy |= HCI_LP_HOLD;
632         if (lmp_sniff_capable(hdev))
633                 link_policy |= HCI_LP_SNIFF;
634         if (lmp_park_capable(hdev))
635                 link_policy |= HCI_LP_PARK;
636
637         cp.policy = cpu_to_le16(link_policy);
638         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
639 }
640
641 static void hci_set_le_support(struct hci_request *req)
642 {
643         struct hci_dev *hdev = req->hdev;
644         struct hci_cp_write_le_host_supported cp;
645
646         /* LE-only devices do not support explicit enablement */
647         if (!lmp_bredr_capable(hdev))
648                 return;
649
650         memset(&cp, 0, sizeof(cp));
651
652         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
653                 cp.le = 0x01;
654                 cp.simul = 0x00;
655         }
656
657         if (cp.le != lmp_host_le_capable(hdev))
658                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659                             &cp);
660 }
661
662 static void hci_set_event_mask_page_2(struct hci_request *req)
663 {
664         struct hci_dev *hdev = req->hdev;
665         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666
667         /* If Connectionless Slave Broadcast master role is supported
668          * enable all necessary events for it.
669          */
670         if (lmp_csb_master_capable(hdev)) {
671                 events[1] |= 0x40;      /* Triggered Clock Capture */
672                 events[1] |= 0x80;      /* Synchronization Train Complete */
673                 events[2] |= 0x10;      /* Slave Page Response Timeout */
674                 events[2] |= 0x20;      /* CSB Channel Map Change */
675         }
676
677         /* If Connectionless Slave Broadcast slave role is supported
678          * enable all necessary events for it.
679          */
680         if (lmp_csb_slave_capable(hdev)) {
681                 events[2] |= 0x01;      /* Synchronization Train Received */
682                 events[2] |= 0x02;      /* CSB Receive */
683                 events[2] |= 0x04;      /* CSB Timeout */
684                 events[2] |= 0x08;      /* Truncated Page Complete */
685         }
686
687         /* Enable Authenticated Payload Timeout Expired event if supported */
688         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
689                 events[2] |= 0x80;
690
691         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
692 }
693
694 static void hci_init3_req(struct hci_request *req, unsigned long opt)
695 {
696         struct hci_dev *hdev = req->hdev;
697         u8 p;
698
699         hci_setup_event_mask(req);
700
701         if (hdev->commands[6] & 0x20) {
702                 struct hci_cp_read_stored_link_key cp;
703
704                 bacpy(&cp.bdaddr, BDADDR_ANY);
705                 cp.read_all = 0x01;
706                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
707         }
708
709         if (hdev->commands[5] & 0x10)
710                 hci_setup_link_policy(req);
711
712         if (hdev->commands[8] & 0x01)
713                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
714
715         /* Some older Broadcom based Bluetooth 1.2 controllers do not
716          * support the Read Page Scan Type command. Check support for
717          * this command in the bit mask of supported commands.
718          */
719         if (hdev->commands[13] & 0x01)
720                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
721
722         if (lmp_le_capable(hdev)) {
723                 u8 events[8];
724
725                 memset(events, 0, sizeof(events));
726                 events[0] = 0x0f;
727
728                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
729                         events[0] |= 0x10;      /* LE Long Term Key Request */
730
731                 /* If controller supports the Connection Parameters Request
732                  * Link Layer Procedure, enable the corresponding event.
733                  */
734                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
735                         events[0] |= 0x20;      /* LE Remote Connection
736                                                  * Parameter Request
737                                                  */
738
739                 /* If the controller supports the Data Length Extension
740                  * feature, enable the corresponding event.
741                  */
742                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
743                         events[0] |= 0x40;      /* LE Data Length Change */
744
745                 /* If the controller supports Extended Scanner Filter
746                  * Policies, enable the correspondig event.
747                  */
748                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
749                         events[1] |= 0x04;      /* LE Direct Advertising
750                                                  * Report
751                                                  */
752
753                 /* If the controller supports the LE Read Local P-256
754                  * Public Key command, enable the corresponding event.
755                  */
756                 if (hdev->commands[34] & 0x02)
757                         events[0] |= 0x80;      /* LE Read Local P-256
758                                                  * Public Key Complete
759                                                  */
760
761                 /* If the controller supports the LE Generate DHKey
762                  * command, enable the corresponding event.
763                  */
764                 if (hdev->commands[34] & 0x04)
765                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
766
767                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
768                             events);
769
770                 if (hdev->commands[25] & 0x40) {
771                         /* Read LE Advertising Channel TX Power */
772                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
773                 }
774
775                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776                         /* Read LE Maximum Data Length */
777                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
778
779                         /* Read LE Suggested Default Data Length */
780                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
781                 }
782
783                 hci_set_le_support(req);
784         }
785
786         /* Read features beyond page 1 if available */
787         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
788                 struct hci_cp_read_local_ext_features cp;
789
790                 cp.page = p;
791                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
792                             sizeof(cp), &cp);
793         }
794 }
795
796 static void hci_init4_req(struct hci_request *req, unsigned long opt)
797 {
798         struct hci_dev *hdev = req->hdev;
799
800         /* Some Broadcom based Bluetooth controllers do not support the
801          * Delete Stored Link Key command. They are clearly indicating its
802          * absence in the bit mask of supported commands.
803          *
804          * Check the supported commands and only if the the command is marked
805          * as supported send it. If not supported assume that the controller
806          * does not have actual support for stored link keys which makes this
807          * command redundant anyway.
808          *
809          * Some controllers indicate that they support handling deleting
810          * stored link keys, but they don't. The quirk lets a driver
811          * just disable this command.
812          */
813         if (hdev->commands[6] & 0x80 &&
814             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
815                 struct hci_cp_delete_stored_link_key cp;
816
817                 bacpy(&cp.bdaddr, BDADDR_ANY);
818                 cp.delete_all = 0x01;
819                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
820                             sizeof(cp), &cp);
821         }
822
823         /* Set event mask page 2 if the HCI command for it is supported */
824         if (hdev->commands[22] & 0x04)
825                 hci_set_event_mask_page_2(req);
826
827         /* Read local codec list if the HCI command is supported */
828         if (hdev->commands[29] & 0x20)
829                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
830
831         /* Get MWS transport configuration if the HCI command is supported */
832         if (hdev->commands[30] & 0x08)
833                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
834
835         /* Check for Synchronization Train support */
836         if (lmp_sync_train_capable(hdev))
837                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
838
839         /* Enable Secure Connections if supported and configured */
840         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
841             bredr_sc_enabled(hdev)) {
842                 u8 support = 0x01;
843
844                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
845                             sizeof(support), &support);
846         }
847 }
848
849 static int __hci_init(struct hci_dev *hdev)
850 {
851         int err;
852
853         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
854         if (err < 0)
855                 return err;
856
857         /* The Device Under Test (DUT) mode is special and available for
858          * all controller types. So just create it early on.
859          */
860         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
861                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
862                                     &dut_mode_fops);
863         }
864
865         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
866         if (err < 0)
867                 return err;
868
869         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870          * BR/EDR/LE type controllers. AMP controllers only need the
871          * first two stages of init.
872          */
873         if (hdev->dev_type != HCI_BREDR)
874                 return 0;
875
876         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
877         if (err < 0)
878                 return err;
879
880         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
881         if (err < 0)
882                 return err;
883
884         /* This function is only called when the controller is actually in
885          * configured state. When the controller is marked as unconfigured,
886          * this initialization procedure is not run.
887          *
888          * It means that it is possible that a controller runs through its
889          * setup phase and then discovers missing settings. If that is the
890          * case, then this function will not be called. It then will only
891          * be called during the config phase.
892          *
893          * So only when in setup phase or config phase, create the debugfs
894          * entries and register the SMP channels.
895          */
896         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
897             !hci_dev_test_flag(hdev, HCI_CONFIG))
898                 return 0;
899
900         hci_debugfs_create_common(hdev);
901
902         if (lmp_bredr_capable(hdev))
903                 hci_debugfs_create_bredr(hdev);
904
905         if (lmp_le_capable(hdev))
906                 hci_debugfs_create_le(hdev);
907
908         return 0;
909 }
910
911 static void hci_init0_req(struct hci_request *req, unsigned long opt)
912 {
913         struct hci_dev *hdev = req->hdev;
914
915         BT_DBG("%s %ld", hdev->name, opt);
916
917         /* Reset */
918         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
919                 hci_reset_req(req, 0);
920
921         /* Read Local Version */
922         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923
924         /* Read BD Address */
925         if (hdev->set_bdaddr)
926                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927 }
928
929 static int __hci_unconf_init(struct hci_dev *hdev)
930 {
931         int err;
932
933         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
934                 return 0;
935
936         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
937         if (err < 0)
938                 return err;
939
940         return 0;
941 }
942
943 static void hci_scan_req(struct hci_request *req, unsigned long opt)
944 {
945         __u8 scan = opt;
946
947         BT_DBG("%s %x", req->hdev->name, scan);
948
949         /* Inquiry and Page scans */
950         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
951 }
952
953 static void hci_auth_req(struct hci_request *req, unsigned long opt)
954 {
955         __u8 auth = opt;
956
957         BT_DBG("%s %x", req->hdev->name, auth);
958
959         /* Authentication */
960         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
961 }
962
963 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
964 {
965         __u8 encrypt = opt;
966
967         BT_DBG("%s %x", req->hdev->name, encrypt);
968
969         /* Encryption */
970         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
971 }
972
973 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
974 {
975         __le16 policy = cpu_to_le16(opt);
976
977         BT_DBG("%s %x", req->hdev->name, policy);
978
979         /* Default link policy */
980         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
981 }
982
983 /* Get HCI device by index.
984  * Device is held on return. */
985 struct hci_dev *hci_dev_get(int index)
986 {
987         struct hci_dev *hdev = NULL, *d;
988
989         BT_DBG("%d", index);
990
991         if (index < 0)
992                 return NULL;
993
994         read_lock(&hci_dev_list_lock);
995         list_for_each_entry(d, &hci_dev_list, list) {
996                 if (d->id == index) {
997                         hdev = hci_dev_hold(d);
998                         break;
999                 }
1000         }
1001         read_unlock(&hci_dev_list_lock);
1002         return hdev;
1003 }
1004
1005 /* ---- Inquiry support ---- */
1006
1007 bool hci_discovery_active(struct hci_dev *hdev)
1008 {
1009         struct discovery_state *discov = &hdev->discovery;
1010
1011         switch (discov->state) {
1012         case DISCOVERY_FINDING:
1013         case DISCOVERY_RESOLVING:
1014                 return true;
1015
1016         default:
1017                 return false;
1018         }
1019 }
1020
1021 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1022 {
1023         int old_state = hdev->discovery.state;
1024
1025         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1026
1027         if (old_state == state)
1028                 return;
1029
1030         hdev->discovery.state = state;
1031
1032         switch (state) {
1033         case DISCOVERY_STOPPED:
1034                 hci_update_background_scan(hdev);
1035
1036                 if (old_state != DISCOVERY_STARTING)
1037                         mgmt_discovering(hdev, 0);
1038                 break;
1039         case DISCOVERY_STARTING:
1040                 break;
1041         case DISCOVERY_FINDING:
1042                 mgmt_discovering(hdev, 1);
1043                 break;
1044         case DISCOVERY_RESOLVING:
1045                 break;
1046         case DISCOVERY_STOPPING:
1047                 break;
1048         }
1049 }
1050
1051 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1052 {
1053         struct discovery_state *cache = &hdev->discovery;
1054         struct inquiry_entry *p, *n;
1055
1056         list_for_each_entry_safe(p, n, &cache->all, all) {
1057                 list_del(&p->all);
1058                 kfree(p);
1059         }
1060
1061         INIT_LIST_HEAD(&cache->unknown);
1062         INIT_LIST_HEAD(&cache->resolve);
1063 }
1064
1065 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1066                                                bdaddr_t *bdaddr)
1067 {
1068         struct discovery_state *cache = &hdev->discovery;
1069         struct inquiry_entry *e;
1070
1071         BT_DBG("cache %p, %pMR", cache, bdaddr);
1072
1073         list_for_each_entry(e, &cache->all, all) {
1074                 if (!bacmp(&e->data.bdaddr, bdaddr))
1075                         return e;
1076         }
1077
1078         return NULL;
1079 }
1080
1081 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1082                                                        bdaddr_t *bdaddr)
1083 {
1084         struct discovery_state *cache = &hdev->discovery;
1085         struct inquiry_entry *e;
1086
1087         BT_DBG("cache %p, %pMR", cache, bdaddr);
1088
1089         list_for_each_entry(e, &cache->unknown, list) {
1090                 if (!bacmp(&e->data.bdaddr, bdaddr))
1091                         return e;
1092         }
1093
1094         return NULL;
1095 }
1096
1097 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1098                                                        bdaddr_t *bdaddr,
1099                                                        int state)
1100 {
1101         struct discovery_state *cache = &hdev->discovery;
1102         struct inquiry_entry *e;
1103
1104         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1105
1106         list_for_each_entry(e, &cache->resolve, list) {
1107                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1108                         return e;
1109                 if (!bacmp(&e->data.bdaddr, bdaddr))
1110                         return e;
1111         }
1112
1113         return NULL;
1114 }
1115
1116 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1117                                       struct inquiry_entry *ie)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct list_head *pos = &cache->resolve;
1121         struct inquiry_entry *p;
1122
1123         list_del(&ie->list);
1124
1125         list_for_each_entry(p, &cache->resolve, list) {
1126                 if (p->name_state != NAME_PENDING &&
1127                     abs(p->data.rssi) >= abs(ie->data.rssi))
1128                         break;
1129                 pos = &p->list;
1130         }
1131
1132         list_add(&ie->list, pos);
1133 }
1134
1135 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1136                              bool name_known)
1137 {
1138         struct discovery_state *cache = &hdev->discovery;
1139         struct inquiry_entry *ie;
1140         u32 flags = 0;
1141
1142         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1143
1144         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1145
1146         if (!data->ssp_mode)
1147                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1150         if (ie) {
1151                 if (!ie->data.ssp_mode)
1152                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1153
1154                 if (ie->name_state == NAME_NEEDED &&
1155                     data->rssi != ie->data.rssi) {
1156                         ie->data.rssi = data->rssi;
1157                         hci_inquiry_cache_update_resolve(hdev, ie);
1158                 }
1159
1160                 goto update;
1161         }
1162
1163         /* Entry not in the cache. Add new one. */
1164         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1165         if (!ie) {
1166                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1167                 goto done;
1168         }
1169
1170         list_add(&ie->all, &cache->all);
1171
1172         if (name_known) {
1173                 ie->name_state = NAME_KNOWN;
1174         } else {
1175                 ie->name_state = NAME_NOT_KNOWN;
1176                 list_add(&ie->list, &cache->unknown);
1177         }
1178
1179 update:
1180         if (name_known && ie->name_state != NAME_KNOWN &&
1181             ie->name_state != NAME_PENDING) {
1182                 ie->name_state = NAME_KNOWN;
1183                 list_del(&ie->list);
1184         }
1185
1186         memcpy(&ie->data, data, sizeof(*data));
1187         ie->timestamp = jiffies;
1188         cache->timestamp = jiffies;
1189
1190         if (ie->name_state == NAME_NOT_KNOWN)
1191                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192
1193 done:
1194         return flags;
1195 }
1196
1197 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1198 {
1199         struct discovery_state *cache = &hdev->discovery;
1200         struct inquiry_info *info = (struct inquiry_info *) buf;
1201         struct inquiry_entry *e;
1202         int copied = 0;
1203
1204         list_for_each_entry(e, &cache->all, all) {
1205                 struct inquiry_data *data = &e->data;
1206
1207                 if (copied >= num)
1208                         break;
1209
1210                 bacpy(&info->bdaddr, &data->bdaddr);
1211                 info->pscan_rep_mode    = data->pscan_rep_mode;
1212                 info->pscan_period_mode = data->pscan_period_mode;
1213                 info->pscan_mode        = data->pscan_mode;
1214                 memcpy(info->dev_class, data->dev_class, 3);
1215                 info->clock_offset      = data->clock_offset;
1216
1217                 info++;
1218                 copied++;
1219         }
1220
1221         BT_DBG("cache %p, copied %d", cache, copied);
1222         return copied;
1223 }
1224
1225 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1226 {
1227         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1228         struct hci_dev *hdev = req->hdev;
1229         struct hci_cp_inquiry cp;
1230
1231         BT_DBG("%s", hdev->name);
1232
1233         if (test_bit(HCI_INQUIRY, &hdev->flags))
1234                 return;
1235
1236         /* Start Inquiry */
1237         memcpy(&cp.lap, &ir->lap, 3);
1238         cp.length  = ir->length;
1239         cp.num_rsp = ir->num_rsp;
1240         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1241 }
1242
1243 int hci_inquiry(void __user *arg)
1244 {
1245         __u8 __user *ptr = arg;
1246         struct hci_inquiry_req ir;
1247         struct hci_dev *hdev;
1248         int err = 0, do_inquiry = 0, max_rsp;
1249         long timeo;
1250         __u8 *buf;
1251
1252         if (copy_from_user(&ir, ptr, sizeof(ir)))
1253                 return -EFAULT;
1254
1255         hdev = hci_dev_get(ir.dev_id);
1256         if (!hdev)
1257                 return -ENODEV;
1258
1259         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1260                 err = -EBUSY;
1261                 goto done;
1262         }
1263
1264         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (hdev->dev_type != HCI_BREDR) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1275                 err = -EOPNOTSUPP;
1276                 goto done;
1277         }
1278
1279         hci_dev_lock(hdev);
1280         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1281             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1282                 hci_inquiry_cache_flush(hdev);
1283                 do_inquiry = 1;
1284         }
1285         hci_dev_unlock(hdev);
1286
1287         timeo = ir.length * msecs_to_jiffies(2000);
1288
1289         if (do_inquiry) {
1290                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1291                                    timeo);
1292                 if (err < 0)
1293                         goto done;
1294
1295                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296                  * cleared). If it is interrupted by a signal, return -EINTR.
1297                  */
1298                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1299                                 TASK_INTERRUPTIBLE))
1300                         return -EINTR;
1301         }
1302
1303         /* for unlimited number of responses we will use buffer with
1304          * 255 entries
1305          */
1306         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1307
1308         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309          * copy it to the user space.
1310          */
1311         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1312         if (!buf) {
1313                 err = -ENOMEM;
1314                 goto done;
1315         }
1316
1317         hci_dev_lock(hdev);
1318         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1319         hci_dev_unlock(hdev);
1320
1321         BT_DBG("num_rsp %d", ir.num_rsp);
1322
1323         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1324                 ptr += sizeof(ir);
1325                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1326                                  ir.num_rsp))
1327                         err = -EFAULT;
1328         } else
1329                 err = -EFAULT;
1330
1331         kfree(buf);
1332
1333 done:
1334         hci_dev_put(hdev);
1335         return err;
1336 }
1337
1338 static int hci_dev_do_open(struct hci_dev *hdev)
1339 {
1340         int ret = 0;
1341
1342         BT_DBG("%s %p", hdev->name, hdev);
1343
1344         hci_req_lock(hdev);
1345
1346         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1347                 ret = -ENODEV;
1348                 goto done;
1349         }
1350
1351         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1352             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1353                 /* Check for rfkill but allow the HCI setup stage to
1354                  * proceed (which in itself doesn't cause any RF activity).
1355                  */
1356                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1357                         ret = -ERFKILL;
1358                         goto done;
1359                 }
1360
1361                 /* Check for valid public address or a configured static
1362                  * random adddress, but let the HCI setup proceed to
1363                  * be able to determine if there is a public address
1364                  * or not.
1365                  *
1366                  * In case of user channel usage, it is not important
1367                  * if a public address or static random address is
1368                  * available.
1369                  *
1370                  * This check is only valid for BR/EDR controllers
1371                  * since AMP controllers do not have an address.
1372                  */
1373                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1374                     hdev->dev_type == HCI_BREDR &&
1375                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1376                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1377                         ret = -EADDRNOTAVAIL;
1378                         goto done;
1379                 }
1380         }
1381
1382         if (test_bit(HCI_UP, &hdev->flags)) {
1383                 ret = -EALREADY;
1384                 goto done;
1385         }
1386
1387         if (hdev->open(hdev)) {
1388                 ret = -EIO;
1389                 goto done;
1390         }
1391
1392         atomic_set(&hdev->cmd_cnt, 1);
1393         set_bit(HCI_INIT, &hdev->flags);
1394
1395         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1396                 if (hdev->setup)
1397                         ret = hdev->setup(hdev);
1398
1399                 /* The transport driver can set these quirks before
1400                  * creating the HCI device or in its setup callback.
1401                  *
1402                  * In case any of them is set, the controller has to
1403                  * start up as unconfigured.
1404                  */
1405                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1406                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1407                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1408
1409                 /* For an unconfigured controller it is required to
1410                  * read at least the version information provided by
1411                  * the Read Local Version Information command.
1412                  *
1413                  * If the set_bdaddr driver callback is provided, then
1414                  * also the original Bluetooth public device address
1415                  * will be read using the Read BD Address command.
1416                  */
1417                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1418                         ret = __hci_unconf_init(hdev);
1419         }
1420
1421         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1422                 /* If public address change is configured, ensure that
1423                  * the address gets programmed. If the driver does not
1424                  * support changing the public address, fail the power
1425                  * on procedure.
1426                  */
1427                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1428                     hdev->set_bdaddr)
1429                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1430                 else
1431                         ret = -EADDRNOTAVAIL;
1432         }
1433
1434         if (!ret) {
1435                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1436                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1437                         ret = __hci_init(hdev);
1438         }
1439
1440         clear_bit(HCI_INIT, &hdev->flags);
1441
1442         if (!ret) {
1443                 hci_dev_hold(hdev);
1444                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1445                 set_bit(HCI_UP, &hdev->flags);
1446                 hci_notify(hdev, HCI_DEV_UP);
1447                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451                     hdev->dev_type == HCI_BREDR) {
1452                         hci_dev_lock(hdev);
1453                         mgmt_powered(hdev, 1);
1454                         hci_dev_unlock(hdev);
1455                 }
1456         } else {
1457                 /* Init failed, cleanup */
1458                 flush_work(&hdev->tx_work);
1459                 flush_work(&hdev->cmd_work);
1460                 flush_work(&hdev->rx_work);
1461
1462                 skb_queue_purge(&hdev->cmd_q);
1463                 skb_queue_purge(&hdev->rx_q);
1464
1465                 if (hdev->flush)
1466                         hdev->flush(hdev);
1467
1468                 if (hdev->sent_cmd) {
1469                         kfree_skb(hdev->sent_cmd);
1470                         hdev->sent_cmd = NULL;
1471                 }
1472
1473                 hdev->close(hdev);
1474                 hdev->flags &= BIT(HCI_RAW);
1475         }
1476
1477 done:
1478         hci_req_unlock(hdev);
1479         return ret;
1480 }
1481
1482 /* ---- HCI ioctl helpers ---- */
1483
1484 int hci_dev_open(__u16 dev)
1485 {
1486         struct hci_dev *hdev;
1487         int err;
1488
1489         hdev = hci_dev_get(dev);
1490         if (!hdev)
1491                 return -ENODEV;
1492
1493         /* Devices that are marked as unconfigured can only be powered
1494          * up as user channel. Trying to bring them up as normal devices
1495          * will result into a failure. Only user channel operation is
1496          * possible.
1497          *
1498          * When this function is called for a user channel, the flag
1499          * HCI_USER_CHANNEL will be set first before attempting to
1500          * open the device.
1501          */
1502         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1504                 err = -EOPNOTSUPP;
1505                 goto done;
1506         }
1507
1508         /* We need to ensure that no other power on/off work is pending
1509          * before proceeding to call hci_dev_do_open. This is
1510          * particularly important if the setup procedure has not yet
1511          * completed.
1512          */
1513         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1514                 cancel_delayed_work(&hdev->power_off);
1515
1516         /* After this call it is guaranteed that the setup procedure
1517          * has finished. This means that error conditions like RFKILL
1518          * or no valid public or static random address apply.
1519          */
1520         flush_workqueue(hdev->req_workqueue);
1521
1522         /* For controllers not using the management interface and that
1523          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1524          * so that pairing works for them. Once the management interface
1525          * is in use this bit will be cleared again and userspace has
1526          * to explicitly enable it.
1527          */
1528         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529             !hci_dev_test_flag(hdev, HCI_MGMT))
1530                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1531
1532         err = hci_dev_do_open(hdev);
1533
1534 done:
1535         hci_dev_put(hdev);
1536         return err;
1537 }
1538
1539 /* This function requires the caller holds hdev->lock */
1540 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541 {
1542         struct hci_conn_params *p;
1543
1544         list_for_each_entry(p, &hdev->le_conn_params, list) {
1545                 if (p->conn) {
1546                         hci_conn_drop(p->conn);
1547                         hci_conn_put(p->conn);
1548                         p->conn = NULL;
1549                 }
1550                 list_del_init(&p->action);
1551         }
1552
1553         BT_DBG("All LE pending actions cleared");
1554 }
1555
1556 static int hci_dev_do_close(struct hci_dev *hdev)
1557 {
1558         BT_DBG("%s %p", hdev->name, hdev);
1559
1560         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1561             test_bit(HCI_UP, &hdev->flags)) {
1562                 /* Execute vendor specific shutdown routine */
1563                 if (hdev->shutdown)
1564                         hdev->shutdown(hdev);
1565         }
1566
1567         cancel_delayed_work(&hdev->power_off);
1568
1569         hci_req_cancel(hdev, ENODEV);
1570         hci_req_lock(hdev);
1571
1572         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1573                 cancel_delayed_work_sync(&hdev->cmd_timer);
1574                 hci_req_unlock(hdev);
1575                 return 0;
1576         }
1577
1578         /* Flush RX and TX works */
1579         flush_work(&hdev->tx_work);
1580         flush_work(&hdev->rx_work);
1581
1582         if (hdev->discov_timeout > 0) {
1583                 cancel_delayed_work(&hdev->discov_off);
1584                 hdev->discov_timeout = 0;
1585                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1586                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1587         }
1588
1589         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1590                 cancel_delayed_work(&hdev->service_cache);
1591
1592         cancel_delayed_work_sync(&hdev->le_scan_disable);
1593         cancel_delayed_work_sync(&hdev->le_scan_restart);
1594
1595         if (hci_dev_test_flag(hdev, HCI_MGMT))
1596                 cancel_delayed_work_sync(&hdev->rpa_expired);
1597
1598         /* Avoid potential lockdep warnings from the *_flush() calls by
1599          * ensuring the workqueue is empty up front.
1600          */
1601         drain_workqueue(hdev->workqueue);
1602
1603         hci_dev_lock(hdev);
1604
1605         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1606
1607         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1608                 if (hdev->dev_type == HCI_BREDR)
1609                         mgmt_powered(hdev, 0);
1610         }
1611
1612         hci_inquiry_cache_flush(hdev);
1613         hci_pend_le_actions_clear(hdev);
1614         hci_conn_hash_flush(hdev);
1615         hci_dev_unlock(hdev);
1616
1617         smp_unregister(hdev);
1618
1619         hci_notify(hdev, HCI_DEV_DOWN);
1620
1621         if (hdev->flush)
1622                 hdev->flush(hdev);
1623
1624         /* Reset device */
1625         skb_queue_purge(&hdev->cmd_q);
1626         atomic_set(&hdev->cmd_cnt, 1);
1627         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1628             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1629             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1630                 set_bit(HCI_INIT, &hdev->flags);
1631                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1632                 clear_bit(HCI_INIT, &hdev->flags);
1633         }
1634
1635         /* flush cmd  work */
1636         flush_work(&hdev->cmd_work);
1637
1638         /* Drop queues */
1639         skb_queue_purge(&hdev->rx_q);
1640         skb_queue_purge(&hdev->cmd_q);
1641         skb_queue_purge(&hdev->raw_q);
1642
1643         /* Drop last sent command */
1644         if (hdev->sent_cmd) {
1645                 cancel_delayed_work_sync(&hdev->cmd_timer);
1646                 kfree_skb(hdev->sent_cmd);
1647                 hdev->sent_cmd = NULL;
1648         }
1649
1650         /* After this point our queues are empty
1651          * and no tasks are scheduled. */
1652         hdev->close(hdev);
1653
1654         /* Clear flags */
1655         hdev->flags &= BIT(HCI_RAW);
1656         hci_dev_clear_volatile_flags(hdev);
1657
1658         /* Controller radio is available but is currently powered down */
1659         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1660
1661         memset(hdev->eir, 0, sizeof(hdev->eir));
1662         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1663         bacpy(&hdev->random_addr, BDADDR_ANY);
1664
1665         hci_req_unlock(hdev);
1666
1667         hci_dev_put(hdev);
1668         return 0;
1669 }
1670
1671 int hci_dev_close(__u16 dev)
1672 {
1673         struct hci_dev *hdev;
1674         int err;
1675
1676         hdev = hci_dev_get(dev);
1677         if (!hdev)
1678                 return -ENODEV;
1679
1680         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1681                 err = -EBUSY;
1682                 goto done;
1683         }
1684
1685         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1686                 cancel_delayed_work(&hdev->power_off);
1687
1688         err = hci_dev_do_close(hdev);
1689
1690 done:
1691         hci_dev_put(hdev);
1692         return err;
1693 }
1694
1695 static int hci_dev_do_reset(struct hci_dev *hdev)
1696 {
1697         int ret;
1698
1699         BT_DBG("%s %p", hdev->name, hdev);
1700
1701         hci_req_lock(hdev);
1702
1703         /* Drop queues */
1704         skb_queue_purge(&hdev->rx_q);
1705         skb_queue_purge(&hdev->cmd_q);
1706
1707         /* Avoid potential lockdep warnings from the *_flush() calls by
1708          * ensuring the workqueue is empty up front.
1709          */
1710         drain_workqueue(hdev->workqueue);
1711
1712         hci_dev_lock(hdev);
1713         hci_inquiry_cache_flush(hdev);
1714         hci_conn_hash_flush(hdev);
1715         hci_dev_unlock(hdev);
1716
1717         if (hdev->flush)
1718                 hdev->flush(hdev);
1719
1720         atomic_set(&hdev->cmd_cnt, 1);
1721         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1722
1723         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1724
1725         hci_req_unlock(hdev);
1726         return ret;
1727 }
1728
1729 int hci_dev_reset(__u16 dev)
1730 {
1731         struct hci_dev *hdev;
1732         int err;
1733
1734         hdev = hci_dev_get(dev);
1735         if (!hdev)
1736                 return -ENODEV;
1737
1738         if (!test_bit(HCI_UP, &hdev->flags)) {
1739                 err = -ENETDOWN;
1740                 goto done;
1741         }
1742
1743         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1744                 err = -EBUSY;
1745                 goto done;
1746         }
1747
1748         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1749                 err = -EOPNOTSUPP;
1750                 goto done;
1751         }
1752
1753         err = hci_dev_do_reset(hdev);
1754
1755 done:
1756         hci_dev_put(hdev);
1757         return err;
1758 }
1759
1760 int hci_dev_reset_stat(__u16 dev)
1761 {
1762         struct hci_dev *hdev;
1763         int ret = 0;
1764
1765         hdev = hci_dev_get(dev);
1766         if (!hdev)
1767                 return -ENODEV;
1768
1769         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1770                 ret = -EBUSY;
1771                 goto done;
1772         }
1773
1774         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1775                 ret = -EOPNOTSUPP;
1776                 goto done;
1777         }
1778
1779         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1780
1781 done:
1782         hci_dev_put(hdev);
1783         return ret;
1784 }
1785
1786 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1787 {
1788         bool conn_changed, discov_changed;
1789
1790         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1791
1792         if ((scan & SCAN_PAGE))
1793                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1794                                                           HCI_CONNECTABLE);
1795         else
1796                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1797                                                            HCI_CONNECTABLE);
1798
1799         if ((scan & SCAN_INQUIRY)) {
1800                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1801                                                             HCI_DISCOVERABLE);
1802         } else {
1803                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1804                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1805                                                              HCI_DISCOVERABLE);
1806         }
1807
1808         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1809                 return;
1810
1811         if (conn_changed || discov_changed) {
1812                 /* In case this was disabled through mgmt */
1813                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1814
1815                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1816                         mgmt_update_adv_data(hdev);
1817
1818                 mgmt_new_settings(hdev);
1819         }
1820 }
1821
1822 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1823 {
1824         struct hci_dev *hdev;
1825         struct hci_dev_req dr;
1826         int err = 0;
1827
1828         if (copy_from_user(&dr, arg, sizeof(dr)))
1829                 return -EFAULT;
1830
1831         hdev = hci_dev_get(dr.dev_id);
1832         if (!hdev)
1833                 return -ENODEV;
1834
1835         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1836                 err = -EBUSY;
1837                 goto done;
1838         }
1839
1840         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1841                 err = -EOPNOTSUPP;
1842                 goto done;
1843         }
1844
1845         if (hdev->dev_type != HCI_BREDR) {
1846                 err = -EOPNOTSUPP;
1847                 goto done;
1848         }
1849
1850         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1851                 err = -EOPNOTSUPP;
1852                 goto done;
1853         }
1854
1855         switch (cmd) {
1856         case HCISETAUTH:
1857                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1858                                    HCI_INIT_TIMEOUT);
1859                 break;
1860
1861         case HCISETENCRYPT:
1862                 if (!lmp_encrypt_capable(hdev)) {
1863                         err = -EOPNOTSUPP;
1864                         break;
1865                 }
1866
1867                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1868                         /* Auth must be enabled first */
1869                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1870                                            HCI_INIT_TIMEOUT);
1871                         if (err)
1872                                 break;
1873                 }
1874
1875                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1876                                    HCI_INIT_TIMEOUT);
1877                 break;
1878
1879         case HCISETSCAN:
1880                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1881                                    HCI_INIT_TIMEOUT);
1882
1883                 /* Ensure that the connectable and discoverable states
1884                  * get correctly modified as this was a non-mgmt change.
1885                  */
1886                 if (!err)
1887                         hci_update_scan_state(hdev, dr.dev_opt);
1888                 break;
1889
1890         case HCISETLINKPOL:
1891                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1892                                    HCI_INIT_TIMEOUT);
1893                 break;
1894
1895         case HCISETLINKMODE:
1896                 hdev->link_mode = ((__u16) dr.dev_opt) &
1897                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1898                 break;
1899
1900         case HCISETPTYPE:
1901                 hdev->pkt_type = (__u16) dr.dev_opt;
1902                 break;
1903
1904         case HCISETACLMTU:
1905                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1906                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1907                 break;
1908
1909         case HCISETSCOMTU:
1910                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1911                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1912                 break;
1913
1914         default:
1915                 err = -EINVAL;
1916                 break;
1917         }
1918
1919 done:
1920         hci_dev_put(hdev);
1921         return err;
1922 }
1923
1924 int hci_get_dev_list(void __user *arg)
1925 {
1926         struct hci_dev *hdev;
1927         struct hci_dev_list_req *dl;
1928         struct hci_dev_req *dr;
1929         int n = 0, size, err;
1930         __u16 dev_num;
1931
1932         if (get_user(dev_num, (__u16 __user *) arg))
1933                 return -EFAULT;
1934
1935         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1936                 return -EINVAL;
1937
1938         size = sizeof(*dl) + dev_num * sizeof(*dr);
1939
1940         dl = kzalloc(size, GFP_KERNEL);
1941         if (!dl)
1942                 return -ENOMEM;
1943
1944         dr = dl->dev_req;
1945
1946         read_lock(&hci_dev_list_lock);
1947         list_for_each_entry(hdev, &hci_dev_list, list) {
1948                 unsigned long flags = hdev->flags;
1949
1950                 /* When the auto-off is configured it means the transport
1951                  * is running, but in that case still indicate that the
1952                  * device is actually down.
1953                  */
1954                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1955                         flags &= ~BIT(HCI_UP);
1956
1957                 (dr + n)->dev_id  = hdev->id;
1958                 (dr + n)->dev_opt = flags;
1959
1960                 if (++n >= dev_num)
1961                         break;
1962         }
1963         read_unlock(&hci_dev_list_lock);
1964
1965         dl->dev_num = n;
1966         size = sizeof(*dl) + n * sizeof(*dr);
1967
1968         err = copy_to_user(arg, dl, size);
1969         kfree(dl);
1970
1971         return err ? -EFAULT : 0;
1972 }
1973
1974 int hci_get_dev_info(void __user *arg)
1975 {
1976         struct hci_dev *hdev;
1977         struct hci_dev_info di;
1978         unsigned long flags;
1979         int err = 0;
1980
1981         if (copy_from_user(&di, arg, sizeof(di)))
1982                 return -EFAULT;
1983
1984         hdev = hci_dev_get(di.dev_id);
1985         if (!hdev)
1986                 return -ENODEV;
1987
1988         /* When the auto-off is configured it means the transport
1989          * is running, but in that case still indicate that the
1990          * device is actually down.
1991          */
1992         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1993                 flags = hdev->flags & ~BIT(HCI_UP);
1994         else
1995                 flags = hdev->flags;
1996
1997         strcpy(di.name, hdev->name);
1998         di.bdaddr   = hdev->bdaddr;
1999         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2000         di.flags    = flags;
2001         di.pkt_type = hdev->pkt_type;
2002         if (lmp_bredr_capable(hdev)) {
2003                 di.acl_mtu  = hdev->acl_mtu;
2004                 di.acl_pkts = hdev->acl_pkts;
2005                 di.sco_mtu  = hdev->sco_mtu;
2006                 di.sco_pkts = hdev->sco_pkts;
2007         } else {
2008                 di.acl_mtu  = hdev->le_mtu;
2009                 di.acl_pkts = hdev->le_pkts;
2010                 di.sco_mtu  = 0;
2011                 di.sco_pkts = 0;
2012         }
2013         di.link_policy = hdev->link_policy;
2014         di.link_mode   = hdev->link_mode;
2015
2016         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2017         memcpy(&di.features, &hdev->features, sizeof(di.features));
2018
2019         if (copy_to_user(arg, &di, sizeof(di)))
2020                 err = -EFAULT;
2021
2022         hci_dev_put(hdev);
2023
2024         return err;
2025 }
2026
2027 /* ---- Interface to HCI drivers ---- */
2028
2029 static int hci_rfkill_set_block(void *data, bool blocked)
2030 {
2031         struct hci_dev *hdev = data;
2032
2033         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2034
2035         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2036                 return -EBUSY;
2037
2038         if (blocked) {
2039                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2040                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2041                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2042                         hci_dev_do_close(hdev);
2043         } else {
2044                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2045         }
2046
2047         return 0;
2048 }
2049
2050 static const struct rfkill_ops hci_rfkill_ops = {
2051         .set_block = hci_rfkill_set_block,
2052 };
2053
2054 static void hci_power_on(struct work_struct *work)
2055 {
2056         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2057         int err;
2058
2059         BT_DBG("%s", hdev->name);
2060
2061         err = hci_dev_do_open(hdev);
2062         if (err < 0) {
2063                 hci_dev_lock(hdev);
2064                 mgmt_set_powered_failed(hdev, err);
2065                 hci_dev_unlock(hdev);
2066                 return;
2067         }
2068
2069         /* During the HCI setup phase, a few error conditions are
2070          * ignored and they need to be checked now. If they are still
2071          * valid, it is important to turn the device back off.
2072          */
2073         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2074             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2075             (hdev->dev_type == HCI_BREDR &&
2076              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2077              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2078                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2079                 hci_dev_do_close(hdev);
2080         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2081                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2082                                    HCI_AUTO_OFF_TIMEOUT);
2083         }
2084
2085         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2086                 /* For unconfigured devices, set the HCI_RAW flag
2087                  * so that userspace can easily identify them.
2088                  */
2089                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2090                         set_bit(HCI_RAW, &hdev->flags);
2091
2092                 /* For fully configured devices, this will send
2093                  * the Index Added event. For unconfigured devices,
2094                  * it will send Unconfigued Index Added event.
2095                  *
2096                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2097                  * and no event will be send.
2098                  */
2099                 mgmt_index_added(hdev);
2100         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2101                 /* When the controller is now configured, then it
2102                  * is important to clear the HCI_RAW flag.
2103                  */
2104                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2105                         clear_bit(HCI_RAW, &hdev->flags);
2106
2107                 /* Powering on the controller with HCI_CONFIG set only
2108                  * happens with the transition from unconfigured to
2109                  * configured. This will send the Index Added event.
2110                  */
2111                 mgmt_index_added(hdev);
2112         }
2113 }
2114
2115 static void hci_power_off(struct work_struct *work)
2116 {
2117         struct hci_dev *hdev = container_of(work, struct hci_dev,
2118                                             power_off.work);
2119
2120         BT_DBG("%s", hdev->name);
2121
2122         hci_dev_do_close(hdev);
2123 }
2124
2125 static void hci_error_reset(struct work_struct *work)
2126 {
2127         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2128
2129         BT_DBG("%s", hdev->name);
2130
2131         if (hdev->hw_error)
2132                 hdev->hw_error(hdev, hdev->hw_error_code);
2133         else
2134                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2135                        hdev->hw_error_code);
2136
2137         if (hci_dev_do_close(hdev))
2138                 return;
2139
2140         hci_dev_do_open(hdev);
2141 }
2142
2143 static void hci_discov_off(struct work_struct *work)
2144 {
2145         struct hci_dev *hdev;
2146
2147         hdev = container_of(work, struct hci_dev, discov_off.work);
2148
2149         BT_DBG("%s", hdev->name);
2150
2151         mgmt_discoverable_timeout(hdev);
2152 }
2153
2154 void hci_uuids_clear(struct hci_dev *hdev)
2155 {
2156         struct bt_uuid *uuid, *tmp;
2157
2158         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2159                 list_del(&uuid->list);
2160                 kfree(uuid);
2161         }
2162 }
2163
2164 void hci_link_keys_clear(struct hci_dev *hdev)
2165 {
2166         struct link_key *key;
2167
2168         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2169                 list_del_rcu(&key->list);
2170                 kfree_rcu(key, rcu);
2171         }
2172 }
2173
2174 void hci_smp_ltks_clear(struct hci_dev *hdev)
2175 {
2176         struct smp_ltk *k;
2177
2178         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2179                 list_del_rcu(&k->list);
2180                 kfree_rcu(k, rcu);
2181         }
2182 }
2183
2184 void hci_smp_irks_clear(struct hci_dev *hdev)
2185 {
2186         struct smp_irk *k;
2187
2188         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2189                 list_del_rcu(&k->list);
2190                 kfree_rcu(k, rcu);
2191         }
2192 }
2193
2194 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2195 {
2196         struct link_key *k;
2197
2198         rcu_read_lock();
2199         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2200                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2201                         rcu_read_unlock();
2202                         return k;
2203                 }
2204         }
2205         rcu_read_unlock();
2206
2207         return NULL;
2208 }
2209
2210 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2211                                u8 key_type, u8 old_key_type)
2212 {
2213         /* Legacy key */
2214         if (key_type < 0x03)
2215                 return true;
2216
2217         /* Debug keys are insecure so don't store them persistently */
2218         if (key_type == HCI_LK_DEBUG_COMBINATION)
2219                 return false;
2220
2221         /* Changed combination key and there's no previous one */
2222         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2223                 return false;
2224
2225         /* Security mode 3 case */
2226         if (!conn)
2227                 return true;
2228
2229         /* BR/EDR key derived using SC from an LE link */
2230         if (conn->type == LE_LINK)
2231                 return true;
2232
2233         /* Neither local nor remote side had no-bonding as requirement */
2234         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2235                 return true;
2236
2237         /* Local side had dedicated bonding as requirement */
2238         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2239                 return true;
2240
2241         /* Remote side had dedicated bonding as requirement */
2242         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2243                 return true;
2244
2245         /* If none of the above criteria match, then don't store the key
2246          * persistently */
2247         return false;
2248 }
2249
2250 static u8 ltk_role(u8 type)
2251 {
2252         if (type == SMP_LTK)
2253                 return HCI_ROLE_MASTER;
2254
2255         return HCI_ROLE_SLAVE;
2256 }
2257
2258 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2259                              u8 addr_type, u8 role)
2260 {
2261         struct smp_ltk *k;
2262
2263         rcu_read_lock();
2264         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2265                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2266                         continue;
2267
2268                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2269                         rcu_read_unlock();
2270                         return k;
2271                 }
2272         }
2273         rcu_read_unlock();
2274
2275         return NULL;
2276 }
2277
2278 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2279 {
2280         struct smp_irk *irk;
2281
2282         rcu_read_lock();
2283         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2284                 if (!bacmp(&irk->rpa, rpa)) {
2285                         rcu_read_unlock();
2286                         return irk;
2287                 }
2288         }
2289
2290         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2291                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2292                         bacpy(&irk->rpa, rpa);
2293                         rcu_read_unlock();
2294                         return irk;
2295                 }
2296         }
2297         rcu_read_unlock();
2298
2299         return NULL;
2300 }
2301
2302 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2303                                      u8 addr_type)
2304 {
2305         struct smp_irk *irk;
2306
2307         /* Identity Address must be public or static random */
2308         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2309                 return NULL;
2310
2311         rcu_read_lock();
2312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2313                 if (addr_type == irk->addr_type &&
2314                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2315                         rcu_read_unlock();
2316                         return irk;
2317                 }
2318         }
2319         rcu_read_unlock();
2320
2321         return NULL;
2322 }
2323
2324 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2325                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2326                                   u8 pin_len, bool *persistent)
2327 {
2328         struct link_key *key, *old_key;
2329         u8 old_key_type;
2330
2331         old_key = hci_find_link_key(hdev, bdaddr);
2332         if (old_key) {
2333                 old_key_type = old_key->type;
2334                 key = old_key;
2335         } else {
2336                 old_key_type = conn ? conn->key_type : 0xff;
2337                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2338                 if (!key)
2339                         return NULL;
2340                 list_add_rcu(&key->list, &hdev->link_keys);
2341         }
2342
2343         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2344
2345         /* Some buggy controller combinations generate a changed
2346          * combination key for legacy pairing even when there's no
2347          * previous key */
2348         if (type == HCI_LK_CHANGED_COMBINATION &&
2349             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2350                 type = HCI_LK_COMBINATION;
2351                 if (conn)
2352                         conn->key_type = type;
2353         }
2354
2355         bacpy(&key->bdaddr, bdaddr);
2356         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2357         key->pin_len = pin_len;
2358
2359         if (type == HCI_LK_CHANGED_COMBINATION)
2360                 key->type = old_key_type;
2361         else
2362                 key->type = type;
2363
2364         if (persistent)
2365                 *persistent = hci_persistent_key(hdev, conn, type,
2366                                                  old_key_type);
2367
2368         return key;
2369 }
2370
2371 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2372                             u8 addr_type, u8 type, u8 authenticated,
2373                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2374 {
2375         struct smp_ltk *key, *old_key;
2376         u8 role = ltk_role(type);
2377
2378         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2379         if (old_key)
2380                 key = old_key;
2381         else {
2382                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2383                 if (!key)
2384                         return NULL;
2385                 list_add_rcu(&key->list, &hdev->long_term_keys);
2386         }
2387
2388         bacpy(&key->bdaddr, bdaddr);
2389         key->bdaddr_type = addr_type;
2390         memcpy(key->val, tk, sizeof(key->val));
2391         key->authenticated = authenticated;
2392         key->ediv = ediv;
2393         key->rand = rand;
2394         key->enc_size = enc_size;
2395         key->type = type;
2396
2397         return key;
2398 }
2399
2400 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2402 {
2403         struct smp_irk *irk;
2404
2405         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2406         if (!irk) {
2407                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2408                 if (!irk)
2409                         return NULL;
2410
2411                 bacpy(&irk->bdaddr, bdaddr);
2412                 irk->addr_type = addr_type;
2413
2414                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2415         }
2416
2417         memcpy(irk->val, val, 16);
2418         bacpy(&irk->rpa, rpa);
2419
2420         return irk;
2421 }
2422
2423 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2424 {
2425         struct link_key *key;
2426
2427         key = hci_find_link_key(hdev, bdaddr);
2428         if (!key)
2429                 return -ENOENT;
2430
2431         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2432
2433         list_del_rcu(&key->list);
2434         kfree_rcu(key, rcu);
2435
2436         return 0;
2437 }
2438
2439 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2440 {
2441         struct smp_ltk *k;
2442         int removed = 0;
2443
2444         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2445                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2446                         continue;
2447
2448                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2449
2450                 list_del_rcu(&k->list);
2451                 kfree_rcu(k, rcu);
2452                 removed++;
2453         }
2454
2455         return removed ? 0 : -ENOENT;
2456 }
2457
2458 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2459 {
2460         struct smp_irk *k;
2461
2462         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2463                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2464                         continue;
2465
2466                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2467
2468                 list_del_rcu(&k->list);
2469                 kfree_rcu(k, rcu);
2470         }
2471 }
2472
2473 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2474 {
2475         struct smp_ltk *k;
2476         struct smp_irk *irk;
2477         u8 addr_type;
2478
2479         if (type == BDADDR_BREDR) {
2480                 if (hci_find_link_key(hdev, bdaddr))
2481                         return true;
2482                 return false;
2483         }
2484
2485         /* Convert to HCI addr type which struct smp_ltk uses */
2486         if (type == BDADDR_LE_PUBLIC)
2487                 addr_type = ADDR_LE_DEV_PUBLIC;
2488         else
2489                 addr_type = ADDR_LE_DEV_RANDOM;
2490
2491         irk = hci_get_irk(hdev, bdaddr, addr_type);
2492         if (irk) {
2493                 bdaddr = &irk->bdaddr;
2494                 addr_type = irk->addr_type;
2495         }
2496
2497         rcu_read_lock();
2498         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2499                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2500                         rcu_read_unlock();
2501                         return true;
2502                 }
2503         }
2504         rcu_read_unlock();
2505
2506         return false;
2507 }
2508
2509 /* HCI command timer function */
2510 static void hci_cmd_timeout(struct work_struct *work)
2511 {
2512         struct hci_dev *hdev = container_of(work, struct hci_dev,
2513                                             cmd_timer.work);
2514
2515         if (hdev->sent_cmd) {
2516                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2517                 u16 opcode = __le16_to_cpu(sent->opcode);
2518
2519                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2520         } else {
2521                 BT_ERR("%s command tx timeout", hdev->name);
2522         }
2523
2524         atomic_set(&hdev->cmd_cnt, 1);
2525         queue_work(hdev->workqueue, &hdev->cmd_work);
2526 }
2527
2528 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2529                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2530 {
2531         struct oob_data *data;
2532
2533         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2534                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2535                         continue;
2536                 if (data->bdaddr_type != bdaddr_type)
2537                         continue;
2538                 return data;
2539         }
2540
2541         return NULL;
2542 }
2543
2544 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2545                                u8 bdaddr_type)
2546 {
2547         struct oob_data *data;
2548
2549         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2550         if (!data)
2551                 return -ENOENT;
2552
2553         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2554
2555         list_del(&data->list);
2556         kfree(data);
2557
2558         return 0;
2559 }
2560
2561 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2562 {
2563         struct oob_data *data, *n;
2564
2565         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2566                 list_del(&data->list);
2567                 kfree(data);
2568         }
2569 }
2570
2571 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2572                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2573                             u8 *hash256, u8 *rand256)
2574 {
2575         struct oob_data *data;
2576
2577         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2578         if (!data) {
2579                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2580                 if (!data)
2581                         return -ENOMEM;
2582
2583                 bacpy(&data->bdaddr, bdaddr);
2584                 data->bdaddr_type = bdaddr_type;
2585                 list_add(&data->list, &hdev->remote_oob_data);
2586         }
2587
2588         if (hash192 && rand192) {
2589                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2590                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2591                 if (hash256 && rand256)
2592                         data->present = 0x03;
2593         } else {
2594                 memset(data->hash192, 0, sizeof(data->hash192));
2595                 memset(data->rand192, 0, sizeof(data->rand192));
2596                 if (hash256 && rand256)
2597                         data->present = 0x02;
2598                 else
2599                         data->present = 0x00;
2600         }
2601
2602         if (hash256 && rand256) {
2603                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2604                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2605         } else {
2606                 memset(data->hash256, 0, sizeof(data->hash256));
2607                 memset(data->rand256, 0, sizeof(data->rand256));
2608                 if (hash192 && rand192)
2609                         data->present = 0x01;
2610         }
2611
2612         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2613
2614         return 0;
2615 }
2616
2617 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2618                                          bdaddr_t *bdaddr, u8 type)
2619 {
2620         struct bdaddr_list *b;
2621
2622         list_for_each_entry(b, bdaddr_list, list) {
2623                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2624                         return b;
2625         }
2626
2627         return NULL;
2628 }
2629
2630 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2631 {
2632         struct list_head *p, *n;
2633
2634         list_for_each_safe(p, n, bdaddr_list) {
2635                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2636
2637                 list_del(p);
2638                 kfree(b);
2639         }
2640 }
2641
2642 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2643 {
2644         struct bdaddr_list *entry;
2645
2646         if (!bacmp(bdaddr, BDADDR_ANY))
2647                 return -EBADF;
2648
2649         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2650                 return -EEXIST;
2651
2652         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2653         if (!entry)
2654                 return -ENOMEM;
2655
2656         bacpy(&entry->bdaddr, bdaddr);
2657         entry->bdaddr_type = type;
2658
2659         list_add(&entry->list, list);
2660
2661         return 0;
2662 }
2663
2664 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2665 {
2666         struct bdaddr_list *entry;
2667
2668         if (!bacmp(bdaddr, BDADDR_ANY)) {
2669                 hci_bdaddr_list_clear(list);
2670                 return 0;
2671         }
2672
2673         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2674         if (!entry)
2675                 return -ENOENT;
2676
2677         list_del(&entry->list);
2678         kfree(entry);
2679
2680         return 0;
2681 }
2682
2683 /* This function requires the caller holds hdev->lock */
2684 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2685                                                bdaddr_t *addr, u8 addr_type)
2686 {
2687         struct hci_conn_params *params;
2688
2689         /* The conn params list only contains identity addresses */
2690         if (!hci_is_identity_address(addr, addr_type))
2691                 return NULL;
2692
2693         list_for_each_entry(params, &hdev->le_conn_params, list) {
2694                 if (bacmp(&params->addr, addr) == 0 &&
2695                     params->addr_type == addr_type) {
2696                         return params;
2697                 }
2698         }
2699
2700         return NULL;
2701 }
2702
2703 /* This function requires the caller holds hdev->lock */
2704 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2705                                                   bdaddr_t *addr, u8 addr_type)
2706 {
2707         struct hci_conn_params *param;
2708
2709         /* The list only contains identity addresses */
2710         if (!hci_is_identity_address(addr, addr_type))
2711                 return NULL;
2712
2713         list_for_each_entry(param, list, action) {
2714                 if (bacmp(&param->addr, addr) == 0 &&
2715                     param->addr_type == addr_type)
2716                         return param;
2717         }
2718
2719         return NULL;
2720 }
2721
2722 /* This function requires the caller holds hdev->lock */
2723 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2724                                             bdaddr_t *addr, u8 addr_type)
2725 {
2726         struct hci_conn_params *params;
2727
2728         if (!hci_is_identity_address(addr, addr_type))
2729                 return NULL;
2730
2731         params = hci_conn_params_lookup(hdev, addr, addr_type);
2732         if (params)
2733                 return params;
2734
2735         params = kzalloc(sizeof(*params), GFP_KERNEL);
2736         if (!params) {
2737                 BT_ERR("Out of memory");
2738                 return NULL;
2739         }
2740
2741         bacpy(&params->addr, addr);
2742         params->addr_type = addr_type;
2743
2744         list_add(&params->list, &hdev->le_conn_params);
2745         INIT_LIST_HEAD(&params->action);
2746
2747         params->conn_min_interval = hdev->le_conn_min_interval;
2748         params->conn_max_interval = hdev->le_conn_max_interval;
2749         params->conn_latency = hdev->le_conn_latency;
2750         params->supervision_timeout = hdev->le_supv_timeout;
2751         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2752
2753         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754
2755         return params;
2756 }
2757
2758 static void hci_conn_params_free(struct hci_conn_params *params)
2759 {
2760         if (params->conn) {
2761                 hci_conn_drop(params->conn);
2762                 hci_conn_put(params->conn);
2763         }
2764
2765         list_del(&params->action);
2766         list_del(&params->list);
2767         kfree(params);
2768 }
2769
2770 /* This function requires the caller holds hdev->lock */
2771 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2772 {
2773         struct hci_conn_params *params;
2774
2775         params = hci_conn_params_lookup(hdev, addr, addr_type);
2776         if (!params)
2777                 return;
2778
2779         hci_conn_params_free(params);
2780
2781         hci_update_background_scan(hdev);
2782
2783         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2784 }
2785
2786 /* This function requires the caller holds hdev->lock */
2787 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2788 {
2789         struct hci_conn_params *params, *tmp;
2790
2791         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2792                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2793                         continue;
2794                 list_del(&params->list);
2795                 kfree(params);
2796         }
2797
2798         BT_DBG("All LE disabled connection parameters were removed");
2799 }
2800
2801 /* This function requires the caller holds hdev->lock */
2802 void hci_conn_params_clear_all(struct hci_dev *hdev)
2803 {
2804         struct hci_conn_params *params, *tmp;
2805
2806         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2807                 hci_conn_params_free(params);
2808
2809         hci_update_background_scan(hdev);
2810
2811         BT_DBG("All LE connection parameters were removed");
2812 }
2813
2814 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2815 {
2816         if (status) {
2817                 BT_ERR("Failed to start inquiry: status %d", status);
2818
2819                 hci_dev_lock(hdev);
2820                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2821                 hci_dev_unlock(hdev);
2822                 return;
2823         }
2824 }
2825
2826 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2827                                           u16 opcode)
2828 {
2829         /* General inquiry access code (GIAC) */
2830         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2831         struct hci_cp_inquiry cp;
2832         int err;
2833
2834         if (status) {
2835                 BT_ERR("Failed to disable LE scanning: status %d", status);
2836                 return;
2837         }
2838
2839         hdev->discovery.scan_start = 0;
2840
2841         switch (hdev->discovery.type) {
2842         case DISCOV_TYPE_LE:
2843                 hci_dev_lock(hdev);
2844                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2845                 hci_dev_unlock(hdev);
2846                 break;
2847
2848         case DISCOV_TYPE_INTERLEAVED:
2849                 hci_dev_lock(hdev);
2850
2851                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2852                              &hdev->quirks)) {
2853                         /* If we were running LE only scan, change discovery
2854                          * state. If we were running both LE and BR/EDR inquiry
2855                          * simultaneously, and BR/EDR inquiry is already
2856                          * finished, stop discovery, otherwise BR/EDR inquiry
2857                          * will stop discovery when finished. If we will resolve
2858                          * remote device name, do not change discovery state.
2859                          */
2860                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2861                             hdev->discovery.state != DISCOVERY_RESOLVING)
2862                                 hci_discovery_set_state(hdev,
2863                                                         DISCOVERY_STOPPED);
2864                 } else {
2865                         struct hci_request req;
2866
2867                         hci_inquiry_cache_flush(hdev);
2868
2869                         hci_req_init(&req, hdev);
2870
2871                         memset(&cp, 0, sizeof(cp));
2872                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2873                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2874                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2875
2876                         err = hci_req_run(&req, inquiry_complete);
2877                         if (err) {
2878                                 BT_ERR("Inquiry request failed: err %d", err);
2879                                 hci_discovery_set_state(hdev,
2880                                                         DISCOVERY_STOPPED);
2881                         }
2882                 }
2883
2884                 hci_dev_unlock(hdev);
2885                 break;
2886         }
2887 }
2888
2889 static void le_scan_disable_work(struct work_struct *work)
2890 {
2891         struct hci_dev *hdev = container_of(work, struct hci_dev,
2892                                             le_scan_disable.work);
2893         struct hci_request req;
2894         int err;
2895
2896         BT_DBG("%s", hdev->name);
2897
2898         cancel_delayed_work_sync(&hdev->le_scan_restart);
2899
2900         hci_req_init(&req, hdev);
2901
2902         hci_req_add_le_scan_disable(&req);
2903
2904         err = hci_req_run(&req, le_scan_disable_work_complete);
2905         if (err)
2906                 BT_ERR("Disable LE scanning request failed: err %d", err);
2907 }
2908
2909 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2910                                           u16 opcode)
2911 {
2912         unsigned long timeout, duration, scan_start, now;
2913
2914         BT_DBG("%s", hdev->name);
2915
2916         if (status) {
2917                 BT_ERR("Failed to restart LE scan: status %d", status);
2918                 return;
2919         }
2920
2921         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2922             !hdev->discovery.scan_start)
2923                 return;
2924
2925         /* When the scan was started, hdev->le_scan_disable has been queued
2926          * after duration from scan_start. During scan restart this job
2927          * has been canceled, and we need to queue it again after proper
2928          * timeout, to make sure that scan does not run indefinitely.
2929          */
2930         duration = hdev->discovery.scan_duration;
2931         scan_start = hdev->discovery.scan_start;
2932         now = jiffies;
2933         if (now - scan_start <= duration) {
2934                 int elapsed;
2935
2936                 if (now >= scan_start)
2937                         elapsed = now - scan_start;
2938                 else
2939                         elapsed = ULONG_MAX - scan_start + now;
2940
2941                 timeout = duration - elapsed;
2942         } else {
2943                 timeout = 0;
2944         }
2945         queue_delayed_work(hdev->workqueue,
2946                            &hdev->le_scan_disable, timeout);
2947 }
2948
2949 static void le_scan_restart_work(struct work_struct *work)
2950 {
2951         struct hci_dev *hdev = container_of(work, struct hci_dev,
2952                                             le_scan_restart.work);
2953         struct hci_request req;
2954         struct hci_cp_le_set_scan_enable cp;
2955         int err;
2956
2957         BT_DBG("%s", hdev->name);
2958
2959         /* If controller is not scanning we are done. */
2960         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2961                 return;
2962
2963         hci_req_init(&req, hdev);
2964
2965         hci_req_add_le_scan_disable(&req);
2966
2967         memset(&cp, 0, sizeof(cp));
2968         cp.enable = LE_SCAN_ENABLE;
2969         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2970         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2971
2972         err = hci_req_run(&req, le_scan_restart_work_complete);
2973         if (err)
2974                 BT_ERR("Restart LE scan request failed: err %d", err);
2975 }
2976
2977 /* Copy the Identity Address of the controller.
2978  *
2979  * If the controller has a public BD_ADDR, then by default use that one.
2980  * If this is a LE only controller without a public address, default to
2981  * the static random address.
2982  *
2983  * For debugging purposes it is possible to force controllers with a
2984  * public address to use the static random address instead.
2985  *
2986  * In case BR/EDR has been disabled on a dual-mode controller and
2987  * userspace has configured a static address, then that address
2988  * becomes the identity address instead of the public BR/EDR address.
2989  */
2990 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2991                                u8 *bdaddr_type)
2992 {
2993         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2994             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2995             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2996              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2997                 bacpy(bdaddr, &hdev->static_addr);
2998                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2999         } else {
3000                 bacpy(bdaddr, &hdev->bdaddr);
3001                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3002         }
3003 }
3004
3005 /* Alloc HCI device */
3006 struct hci_dev *hci_alloc_dev(void)
3007 {
3008         struct hci_dev *hdev;
3009
3010         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3011         if (!hdev)
3012                 return NULL;
3013
3014         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3015         hdev->esco_type = (ESCO_HV1);
3016         hdev->link_mode = (HCI_LM_ACCEPT);
3017         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3018         hdev->io_capability = 0x03;     /* No Input No Output */
3019         hdev->manufacturer = 0xffff;    /* Default to internal use */
3020         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3021         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3022
3023         hdev->sniff_max_interval = 800;
3024         hdev->sniff_min_interval = 80;
3025
3026         hdev->le_adv_channel_map = 0x07;
3027         hdev->le_adv_min_interval = 0x0800;
3028         hdev->le_adv_max_interval = 0x0800;
3029         hdev->le_scan_interval = 0x0060;
3030         hdev->le_scan_window = 0x0030;
3031         hdev->le_conn_min_interval = 0x0028;
3032         hdev->le_conn_max_interval = 0x0038;
3033         hdev->le_conn_latency = 0x0000;
3034         hdev->le_supv_timeout = 0x002a;
3035         hdev->le_def_tx_len = 0x001b;
3036         hdev->le_def_tx_time = 0x0148;
3037         hdev->le_max_tx_len = 0x001b;
3038         hdev->le_max_tx_time = 0x0148;
3039         hdev->le_max_rx_len = 0x001b;
3040         hdev->le_max_rx_time = 0x0148;
3041
3042         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3043         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3044         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3045         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3046
3047         mutex_init(&hdev->lock);
3048         mutex_init(&hdev->req_lock);
3049
3050         INIT_LIST_HEAD(&hdev->mgmt_pending);
3051         INIT_LIST_HEAD(&hdev->blacklist);
3052         INIT_LIST_HEAD(&hdev->whitelist);
3053         INIT_LIST_HEAD(&hdev->uuids);
3054         INIT_LIST_HEAD(&hdev->link_keys);
3055         INIT_LIST_HEAD(&hdev->long_term_keys);
3056         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3057         INIT_LIST_HEAD(&hdev->remote_oob_data);
3058         INIT_LIST_HEAD(&hdev->le_white_list);
3059         INIT_LIST_HEAD(&hdev->le_conn_params);
3060         INIT_LIST_HEAD(&hdev->pend_le_conns);
3061         INIT_LIST_HEAD(&hdev->pend_le_reports);
3062         INIT_LIST_HEAD(&hdev->conn_hash.list);
3063
3064         INIT_WORK(&hdev->rx_work, hci_rx_work);
3065         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3066         INIT_WORK(&hdev->tx_work, hci_tx_work);
3067         INIT_WORK(&hdev->power_on, hci_power_on);
3068         INIT_WORK(&hdev->error_reset, hci_error_reset);
3069
3070         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3071         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3072         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3073         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3074
3075         skb_queue_head_init(&hdev->rx_q);
3076         skb_queue_head_init(&hdev->cmd_q);
3077         skb_queue_head_init(&hdev->raw_q);
3078
3079         init_waitqueue_head(&hdev->req_wait_q);
3080
3081         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3082
3083         hci_init_sysfs(hdev);
3084         discovery_init(hdev);
3085         adv_info_init(hdev);
3086
3087         return hdev;
3088 }
3089 EXPORT_SYMBOL(hci_alloc_dev);
3090
3091 /* Free HCI device */
3092 void hci_free_dev(struct hci_dev *hdev)
3093 {
3094         /* will free via device release */
3095         put_device(&hdev->dev);
3096 }
3097 EXPORT_SYMBOL(hci_free_dev);
3098
3099 /* Register HCI device */
3100 int hci_register_dev(struct hci_dev *hdev)
3101 {
3102         int id, error;
3103
3104         if (!hdev->open || !hdev->close || !hdev->send)
3105                 return -EINVAL;
3106
3107         /* Do not allow HCI_AMP devices to register at index 0,
3108          * so the index can be used as the AMP controller ID.
3109          */
3110         switch (hdev->dev_type) {
3111         case HCI_BREDR:
3112                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3113                 break;
3114         case HCI_AMP:
3115                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3116                 break;
3117         default:
3118                 return -EINVAL;
3119         }
3120
3121         if (id < 0)
3122                 return id;
3123
3124         sprintf(hdev->name, "hci%d", id);
3125         hdev->id = id;
3126
3127         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3128
3129         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3130                                           WQ_MEM_RECLAIM, 1, hdev->name);
3131         if (!hdev->workqueue) {
3132                 error = -ENOMEM;
3133                 goto err;
3134         }
3135
3136         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3137                                               WQ_MEM_RECLAIM, 1, hdev->name);
3138         if (!hdev->req_workqueue) {
3139                 destroy_workqueue(hdev->workqueue);
3140                 error = -ENOMEM;
3141                 goto err;
3142         }
3143
3144         if (!IS_ERR_OR_NULL(bt_debugfs))
3145                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3146
3147         dev_set_name(&hdev->dev, "%s", hdev->name);
3148
3149         error = device_add(&hdev->dev);
3150         if (error < 0)
3151                 goto err_wqueue;
3152
3153         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3154                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3155                                     hdev);
3156         if (hdev->rfkill) {
3157                 if (rfkill_register(hdev->rfkill) < 0) {
3158                         rfkill_destroy(hdev->rfkill);
3159                         hdev->rfkill = NULL;
3160                 }
3161         }
3162
3163         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3164                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3165
3166         hci_dev_set_flag(hdev, HCI_SETUP);
3167         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3168
3169         if (hdev->dev_type == HCI_BREDR) {
3170                 /* Assume BR/EDR support until proven otherwise (such as
3171                  * through reading supported features during init.
3172                  */
3173                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3174         }
3175
3176         write_lock(&hci_dev_list_lock);
3177         list_add(&hdev->list, &hci_dev_list);
3178         write_unlock(&hci_dev_list_lock);
3179
3180         /* Devices that are marked for raw-only usage are unconfigured
3181          * and should not be included in normal operation.
3182          */
3183         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3184                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3185
3186         hci_notify(hdev, HCI_DEV_REG);
3187         hci_dev_hold(hdev);
3188
3189         queue_work(hdev->req_workqueue, &hdev->power_on);
3190
3191         return id;
3192
3193 err_wqueue:
3194         destroy_workqueue(hdev->workqueue);
3195         destroy_workqueue(hdev->req_workqueue);
3196 err:
3197         ida_simple_remove(&hci_index_ida, hdev->id);
3198
3199         return error;
3200 }
3201 EXPORT_SYMBOL(hci_register_dev);
3202
3203 /* Unregister HCI device */
3204 void hci_unregister_dev(struct hci_dev *hdev)
3205 {
3206         int id;
3207
3208         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3209
3210         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3211
3212         id = hdev->id;
3213
3214         write_lock(&hci_dev_list_lock);
3215         list_del(&hdev->list);
3216         write_unlock(&hci_dev_list_lock);
3217
3218         hci_dev_do_close(hdev);
3219
3220         cancel_work_sync(&hdev->power_on);
3221
3222         if (!test_bit(HCI_INIT, &hdev->flags) &&
3223             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3224             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3225                 hci_dev_lock(hdev);
3226                 mgmt_index_removed(hdev);
3227                 hci_dev_unlock(hdev);
3228         }
3229
3230         /* mgmt_index_removed should take care of emptying the
3231          * pending list */
3232         BUG_ON(!list_empty(&hdev->mgmt_pending));
3233
3234         hci_notify(hdev, HCI_DEV_UNREG);
3235
3236         if (hdev->rfkill) {
3237                 rfkill_unregister(hdev->rfkill);
3238                 rfkill_destroy(hdev->rfkill);
3239         }
3240
3241         device_del(&hdev->dev);
3242
3243         debugfs_remove_recursive(hdev->debugfs);
3244
3245         destroy_workqueue(hdev->workqueue);
3246         destroy_workqueue(hdev->req_workqueue);
3247
3248         hci_dev_lock(hdev);
3249         hci_bdaddr_list_clear(&hdev->blacklist);
3250         hci_bdaddr_list_clear(&hdev->whitelist);
3251         hci_uuids_clear(hdev);
3252         hci_link_keys_clear(hdev);
3253         hci_smp_ltks_clear(hdev);
3254         hci_smp_irks_clear(hdev);
3255         hci_remote_oob_data_clear(hdev);
3256         hci_bdaddr_list_clear(&hdev->le_white_list);
3257         hci_conn_params_clear_all(hdev);
3258         hci_discovery_filter_clear(hdev);
3259         hci_dev_unlock(hdev);
3260
3261         hci_dev_put(hdev);
3262
3263         ida_simple_remove(&hci_index_ida, id);
3264 }
3265 EXPORT_SYMBOL(hci_unregister_dev);
3266
3267 /* Suspend HCI device */
3268 int hci_suspend_dev(struct hci_dev *hdev)
3269 {
3270         hci_notify(hdev, HCI_DEV_SUSPEND);
3271         return 0;
3272 }
3273 EXPORT_SYMBOL(hci_suspend_dev);
3274
3275 /* Resume HCI device */
3276 int hci_resume_dev(struct hci_dev *hdev)
3277 {
3278         hci_notify(hdev, HCI_DEV_RESUME);
3279         return 0;
3280 }
3281 EXPORT_SYMBOL(hci_resume_dev);
3282
3283 /* Reset HCI device */
3284 int hci_reset_dev(struct hci_dev *hdev)
3285 {
3286         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3287         struct sk_buff *skb;
3288
3289         skb = bt_skb_alloc(3, GFP_ATOMIC);
3290         if (!skb)
3291                 return -ENOMEM;
3292
3293         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3294         memcpy(skb_put(skb, 3), hw_err, 3);
3295
3296         /* Send Hardware Error to upper stack */
3297         return hci_recv_frame(hdev, skb);
3298 }
3299 EXPORT_SYMBOL(hci_reset_dev);
3300
3301 /* Receive frame from HCI drivers */
3302 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3303 {
3304         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3305                       && !test_bit(HCI_INIT, &hdev->flags))) {
3306                 kfree_skb(skb);
3307                 return -ENXIO;
3308         }
3309
3310         /* Incoming skb */
3311         bt_cb(skb)->incoming = 1;
3312
3313         /* Time stamp */
3314         __net_timestamp(skb);
3315
3316         skb_queue_tail(&hdev->rx_q, skb);
3317         queue_work(hdev->workqueue, &hdev->rx_work);
3318
3319         return 0;
3320 }
3321 EXPORT_SYMBOL(hci_recv_frame);
3322
3323 /* ---- Interface to upper protocols ---- */
3324
3325 int hci_register_cb(struct hci_cb *cb)
3326 {
3327         BT_DBG("%p name %s", cb, cb->name);
3328
3329         mutex_lock(&hci_cb_list_lock);
3330         list_add_tail(&cb->list, &hci_cb_list);
3331         mutex_unlock(&hci_cb_list_lock);
3332
3333         return 0;
3334 }
3335 EXPORT_SYMBOL(hci_register_cb);
3336
3337 int hci_unregister_cb(struct hci_cb *cb)
3338 {
3339         BT_DBG("%p name %s", cb, cb->name);
3340
3341         mutex_lock(&hci_cb_list_lock);
3342         list_del(&cb->list);
3343         mutex_unlock(&hci_cb_list_lock);
3344
3345         return 0;
3346 }
3347 EXPORT_SYMBOL(hci_unregister_cb);
3348
3349 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3350 {
3351         int err;
3352
3353         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3354
3355         /* Time stamp */
3356         __net_timestamp(skb);
3357
3358         /* Send copy to monitor */
3359         hci_send_to_monitor(hdev, skb);
3360
3361         if (atomic_read(&hdev->promisc)) {
3362                 /* Send copy to the sockets */
3363                 hci_send_to_sock(hdev, skb);
3364         }
3365
3366         /* Get rid of skb owner, prior to sending to the driver. */
3367         skb_orphan(skb);
3368
3369         err = hdev->send(hdev, skb);
3370         if (err < 0) {
3371                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3372                 kfree_skb(skb);
3373         }
3374 }
3375
3376 /* Send HCI command */
3377 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3378                  const void *param)
3379 {
3380         struct sk_buff *skb;
3381
3382         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3383
3384         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3385         if (!skb) {
3386                 BT_ERR("%s no memory for command", hdev->name);
3387                 return -ENOMEM;
3388         }
3389
3390         /* Stand-alone HCI commands must be flagged as
3391          * single-command requests.
3392          */
3393         bt_cb(skb)->req.start = true;
3394
3395         skb_queue_tail(&hdev->cmd_q, skb);
3396         queue_work(hdev->workqueue, &hdev->cmd_work);
3397
3398         return 0;
3399 }
3400
3401 /* Get data from the previously sent command */
3402 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3403 {
3404         struct hci_command_hdr *hdr;
3405
3406         if (!hdev->sent_cmd)
3407                 return NULL;
3408
3409         hdr = (void *) hdev->sent_cmd->data;
3410
3411         if (hdr->opcode != cpu_to_le16(opcode))
3412                 return NULL;
3413
3414         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3415
3416         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3417 }
3418
3419 /* Send ACL data */
3420 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3421 {
3422         struct hci_acl_hdr *hdr;
3423         int len = skb->len;
3424
3425         skb_push(skb, HCI_ACL_HDR_SIZE);
3426         skb_reset_transport_header(skb);
3427         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3428         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3429         hdr->dlen   = cpu_to_le16(len);
3430 }
3431
3432 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3433                           struct sk_buff *skb, __u16 flags)
3434 {
3435         struct hci_conn *conn = chan->conn;
3436         struct hci_dev *hdev = conn->hdev;
3437         struct sk_buff *list;
3438
3439         skb->len = skb_headlen(skb);
3440         skb->data_len = 0;
3441
3442         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3443
3444         switch (hdev->dev_type) {
3445         case HCI_BREDR:
3446                 hci_add_acl_hdr(skb, conn->handle, flags);
3447                 break;
3448         case HCI_AMP:
3449                 hci_add_acl_hdr(skb, chan->handle, flags);
3450                 break;
3451         default:
3452                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3453                 return;
3454         }
3455
3456         list = skb_shinfo(skb)->frag_list;
3457         if (!list) {
3458                 /* Non fragmented */
3459                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3460
3461                 skb_queue_tail(queue, skb);
3462         } else {
3463                 /* Fragmented */
3464                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3465
3466                 skb_shinfo(skb)->frag_list = NULL;
3467
3468                 /* Queue all fragments atomically. We need to use spin_lock_bh
3469                  * here because of 6LoWPAN links, as there this function is
3470                  * called from softirq and using normal spin lock could cause
3471                  * deadlocks.
3472                  */
3473                 spin_lock_bh(&queue->lock);
3474
3475                 __skb_queue_tail(queue, skb);
3476
3477                 flags &= ~ACL_START;
3478                 flags |= ACL_CONT;
3479                 do {
3480                         skb = list; list = list->next;
3481
3482                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3483                         hci_add_acl_hdr(skb, conn->handle, flags);
3484
3485                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3486
3487                         __skb_queue_tail(queue, skb);
3488                 } while (list);
3489
3490                 spin_unlock_bh(&queue->lock);
3491         }
3492 }
3493
3494 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3495 {
3496         struct hci_dev *hdev = chan->conn->hdev;
3497
3498         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3499
3500         hci_queue_acl(chan, &chan->data_q, skb, flags);
3501
3502         queue_work(hdev->workqueue, &hdev->tx_work);
3503 }
3504
3505 /* Send SCO data */
3506 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3507 {
3508         struct hci_dev *hdev = conn->hdev;
3509         struct hci_sco_hdr hdr;
3510
3511         BT_DBG("%s len %d", hdev->name, skb->len);
3512
3513         hdr.handle = cpu_to_le16(conn->handle);
3514         hdr.dlen   = skb->len;
3515
3516         skb_push(skb, HCI_SCO_HDR_SIZE);
3517         skb_reset_transport_header(skb);
3518         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3519
3520         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3521
3522         skb_queue_tail(&conn->data_q, skb);
3523         queue_work(hdev->workqueue, &hdev->tx_work);
3524 }
3525
3526 /* ---- HCI TX task (outgoing data) ---- */
3527
3528 /* HCI Connection scheduler */
3529 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3530                                      int *quote)
3531 {
3532         struct hci_conn_hash *h = &hdev->conn_hash;
3533         struct hci_conn *conn = NULL, *c;
3534         unsigned int num = 0, min = ~0;
3535
3536         /* We don't have to lock device here. Connections are always
3537          * added and removed with TX task disabled. */
3538
3539         rcu_read_lock();
3540
3541         list_for_each_entry_rcu(c, &h->list, list) {
3542                 if (c->type != type || skb_queue_empty(&c->data_q))
3543                         continue;
3544
3545                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3546                         continue;
3547
3548                 num++;
3549
3550                 if (c->sent < min) {
3551                         min  = c->sent;
3552                         conn = c;
3553                 }
3554
3555                 if (hci_conn_num(hdev, type) == num)
3556                         break;
3557         }
3558
3559         rcu_read_unlock();
3560
3561         if (conn) {
3562                 int cnt, q;
3563
3564                 switch (conn->type) {
3565                 case ACL_LINK:
3566                         cnt = hdev->acl_cnt;
3567                         break;
3568                 case SCO_LINK:
3569                 case ESCO_LINK:
3570                         cnt = hdev->sco_cnt;
3571                         break;
3572                 case LE_LINK:
3573                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3574                         break;
3575                 default:
3576                         cnt = 0;
3577                         BT_ERR("Unknown link type");
3578                 }
3579
3580                 q = cnt / num;
3581                 *quote = q ? q : 1;
3582         } else
3583                 *quote = 0;
3584
3585         BT_DBG("conn %p quote %d", conn, *quote);
3586         return conn;
3587 }
3588
3589 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3590 {
3591         struct hci_conn_hash *h = &hdev->conn_hash;
3592         struct hci_conn *c;
3593
3594         BT_ERR("%s link tx timeout", hdev->name);
3595
3596         rcu_read_lock();
3597
3598         /* Kill stalled connections */
3599         list_for_each_entry_rcu(c, &h->list, list) {
3600                 if (c->type == type && c->sent) {
3601                         BT_ERR("%s killing stalled connection %pMR",
3602                                hdev->name, &c->dst);
3603                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3604                 }
3605         }
3606
3607         rcu_read_unlock();
3608 }
3609
3610 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3611                                       int *quote)
3612 {
3613         struct hci_conn_hash *h = &hdev->conn_hash;
3614         struct hci_chan *chan = NULL;
3615         unsigned int num = 0, min = ~0, cur_prio = 0;
3616         struct hci_conn *conn;
3617         int cnt, q, conn_num = 0;
3618
3619         BT_DBG("%s", hdev->name);
3620
3621         rcu_read_lock();
3622
3623         list_for_each_entry_rcu(conn, &h->list, list) {
3624                 struct hci_chan *tmp;
3625
3626                 if (conn->type != type)
3627                         continue;
3628
3629                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3630                         continue;
3631
3632                 conn_num++;
3633
3634                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3635                         struct sk_buff *skb;
3636
3637                         if (skb_queue_empty(&tmp->data_q))
3638                                 continue;
3639
3640                         skb = skb_peek(&tmp->data_q);
3641                         if (skb->priority < cur_prio)
3642                                 continue;
3643
3644                         if (skb->priority > cur_prio) {
3645                                 num = 0;
3646                                 min = ~0;
3647                                 cur_prio = skb->priority;
3648                         }
3649
3650                         num++;
3651
3652                         if (conn->sent < min) {
3653                                 min  = conn->sent;
3654                                 chan = tmp;
3655                         }
3656                 }
3657
3658                 if (hci_conn_num(hdev, type) == conn_num)
3659                         break;
3660         }
3661
3662         rcu_read_unlock();
3663
3664         if (!chan)
3665                 return NULL;
3666
3667         switch (chan->conn->type) {
3668         case ACL_LINK:
3669                 cnt = hdev->acl_cnt;
3670                 break;
3671         case AMP_LINK:
3672                 cnt = hdev->block_cnt;
3673                 break;
3674         case SCO_LINK:
3675         case ESCO_LINK:
3676                 cnt = hdev->sco_cnt;
3677                 break;
3678         case LE_LINK:
3679                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3680                 break;
3681         default:
3682                 cnt = 0;
3683                 BT_ERR("Unknown link type");
3684         }
3685
3686         q = cnt / num;
3687         *quote = q ? q : 1;
3688         BT_DBG("chan %p quote %d", chan, *quote);
3689         return chan;
3690 }
3691
3692 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3693 {
3694         struct hci_conn_hash *h = &hdev->conn_hash;
3695         struct hci_conn *conn;
3696         int num = 0;
3697
3698         BT_DBG("%s", hdev->name);
3699
3700         rcu_read_lock();
3701
3702         list_for_each_entry_rcu(conn, &h->list, list) {
3703                 struct hci_chan *chan;
3704
3705                 if (conn->type != type)
3706                         continue;
3707
3708                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3709                         continue;
3710
3711                 num++;
3712
3713                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3714                         struct sk_buff *skb;
3715
3716                         if (chan->sent) {
3717                                 chan->sent = 0;
3718                                 continue;
3719                         }
3720
3721                         if (skb_queue_empty(&chan->data_q))
3722                                 continue;
3723
3724                         skb = skb_peek(&chan->data_q);
3725                         if (skb->priority >= HCI_PRIO_MAX - 1)
3726                                 continue;
3727
3728                         skb->priority = HCI_PRIO_MAX - 1;
3729
3730                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3731                                skb->priority);
3732                 }
3733
3734                 if (hci_conn_num(hdev, type) == num)
3735                         break;
3736         }
3737
3738         rcu_read_unlock();
3739
3740 }
3741
3742 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3743 {
3744         /* Calculate count of blocks used by this packet */
3745         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3746 }
3747
3748 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3749 {
3750         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3751                 /* ACL tx timeout must be longer than maximum
3752                  * link supervision timeout (40.9 seconds) */
3753                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3754                                        HCI_ACL_TX_TIMEOUT))
3755                         hci_link_tx_to(hdev, ACL_LINK);
3756         }
3757 }
3758
3759 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3760 {
3761         unsigned int cnt = hdev->acl_cnt;
3762         struct hci_chan *chan;
3763         struct sk_buff *skb;
3764         int quote;
3765
3766         __check_timeout(hdev, cnt);
3767
3768         while (hdev->acl_cnt &&
3769                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3770                 u32 priority = (skb_peek(&chan->data_q))->priority;
3771                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3772                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3773                                skb->len, skb->priority);
3774
3775                         /* Stop if priority has changed */
3776                         if (skb->priority < priority)
3777                                 break;
3778
3779                         skb = skb_dequeue(&chan->data_q);
3780
3781                         hci_conn_enter_active_mode(chan->conn,
3782                                                    bt_cb(skb)->force_active);
3783
3784                         hci_send_frame(hdev, skb);
3785                         hdev->acl_last_tx = jiffies;
3786
3787                         hdev->acl_cnt--;
3788                         chan->sent++;
3789                         chan->conn->sent++;
3790                 }
3791         }
3792
3793         if (cnt != hdev->acl_cnt)
3794                 hci_prio_recalculate(hdev, ACL_LINK);
3795 }
3796
3797 static void hci_sched_acl_blk(struct hci_dev *hdev)
3798 {
3799         unsigned int cnt = hdev->block_cnt;
3800         struct hci_chan *chan;
3801         struct sk_buff *skb;
3802         int quote;
3803         u8 type;
3804
3805         __check_timeout(hdev, cnt);
3806
3807         BT_DBG("%s", hdev->name);
3808
3809         if (hdev->dev_type == HCI_AMP)
3810                 type = AMP_LINK;
3811         else
3812                 type = ACL_LINK;
3813
3814         while (hdev->block_cnt > 0 &&
3815                (chan = hci_chan_sent(hdev, type, &quote))) {
3816                 u32 priority = (skb_peek(&chan->data_q))->priority;
3817                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3818                         int blocks;
3819
3820                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3821                                skb->len, skb->priority);
3822
3823                         /* Stop if priority has changed */
3824                         if (skb->priority < priority)
3825                                 break;
3826
3827                         skb = skb_dequeue(&chan->data_q);
3828
3829                         blocks = __get_blocks(hdev, skb);
3830                         if (blocks > hdev->block_cnt)
3831                                 return;
3832
3833                         hci_conn_enter_active_mode(chan->conn,
3834                                                    bt_cb(skb)->force_active);
3835
3836                         hci_send_frame(hdev, skb);
3837                         hdev->acl_last_tx = jiffies;
3838
3839                         hdev->block_cnt -= blocks;
3840                         quote -= blocks;
3841
3842                         chan->sent += blocks;
3843                         chan->conn->sent += blocks;
3844                 }
3845         }
3846
3847         if (cnt != hdev->block_cnt)
3848                 hci_prio_recalculate(hdev, type);
3849 }
3850
3851 static void hci_sched_acl(struct hci_dev *hdev)
3852 {
3853         BT_DBG("%s", hdev->name);
3854
3855         /* No ACL link over BR/EDR controller */
3856         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3857                 return;
3858
3859         /* No AMP link over AMP controller */
3860         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3861                 return;
3862
3863         switch (hdev->flow_ctl_mode) {
3864         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3865                 hci_sched_acl_pkt(hdev);
3866                 break;
3867
3868         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3869                 hci_sched_acl_blk(hdev);
3870                 break;
3871         }
3872 }
3873
3874 /* Schedule SCO */
3875 static void hci_sched_sco(struct hci_dev *hdev)
3876 {
3877         struct hci_conn *conn;
3878         struct sk_buff *skb;
3879         int quote;
3880
3881         BT_DBG("%s", hdev->name);
3882
3883         if (!hci_conn_num(hdev, SCO_LINK))
3884                 return;
3885
3886         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3887                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3888                         BT_DBG("skb %p len %d", skb, skb->len);
3889                         hci_send_frame(hdev, skb);
3890
3891                         conn->sent++;
3892                         if (conn->sent == ~0)
3893                                 conn->sent = 0;
3894                 }
3895         }
3896 }
3897
3898 static void hci_sched_esco(struct hci_dev *hdev)
3899 {
3900         struct hci_conn *conn;
3901         struct sk_buff *skb;
3902         int quote;
3903
3904         BT_DBG("%s", hdev->name);
3905
3906         if (!hci_conn_num(hdev, ESCO_LINK))
3907                 return;
3908
3909         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3910                                                      &quote))) {
3911                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3912                         BT_DBG("skb %p len %d", skb, skb->len);
3913                         hci_send_frame(hdev, skb);
3914
3915                         conn->sent++;
3916                         if (conn->sent == ~0)
3917                                 conn->sent = 0;
3918                 }
3919         }
3920 }
3921
3922 static void hci_sched_le(struct hci_dev *hdev)
3923 {
3924         struct hci_chan *chan;
3925         struct sk_buff *skb;
3926         int quote, cnt, tmp;
3927
3928         BT_DBG("%s", hdev->name);
3929
3930         if (!hci_conn_num(hdev, LE_LINK))
3931                 return;
3932
3933         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3934                 /* LE tx timeout must be longer than maximum
3935                  * link supervision timeout (40.9 seconds) */
3936                 if (!hdev->le_cnt && hdev->le_pkts &&
3937                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3938                         hci_link_tx_to(hdev, LE_LINK);
3939         }
3940
3941         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3942         tmp = cnt;
3943         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3944                 u32 priority = (skb_peek(&chan->data_q))->priority;
3945                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3946                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3947                                skb->len, skb->priority);
3948
3949                         /* Stop if priority has changed */
3950                         if (skb->priority < priority)
3951                                 break;
3952
3953                         skb = skb_dequeue(&chan->data_q);
3954
3955                         hci_send_frame(hdev, skb);
3956                         hdev->le_last_tx = jiffies;
3957
3958                         cnt--;
3959                         chan->sent++;
3960                         chan->conn->sent++;
3961                 }
3962         }
3963
3964         if (hdev->le_pkts)
3965                 hdev->le_cnt = cnt;
3966         else
3967                 hdev->acl_cnt = cnt;
3968
3969         if (cnt != tmp)
3970                 hci_prio_recalculate(hdev, LE_LINK);
3971 }
3972
3973 static void hci_tx_work(struct work_struct *work)
3974 {
3975         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3976         struct sk_buff *skb;
3977
3978         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3979                hdev->sco_cnt, hdev->le_cnt);
3980
3981         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3982                 /* Schedule queues and send stuff to HCI driver */
3983                 hci_sched_acl(hdev);
3984                 hci_sched_sco(hdev);
3985                 hci_sched_esco(hdev);
3986                 hci_sched_le(hdev);
3987         }
3988
3989         /* Send next queued raw (unknown type) packet */
3990         while ((skb = skb_dequeue(&hdev->raw_q)))
3991                 hci_send_frame(hdev, skb);
3992 }
3993
3994 /* ----- HCI RX task (incoming data processing) ----- */
3995
3996 /* ACL data packet */
3997 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3998 {
3999         struct hci_acl_hdr *hdr = (void *) skb->data;
4000         struct hci_conn *conn;
4001         __u16 handle, flags;
4002
4003         skb_pull(skb, HCI_ACL_HDR_SIZE);
4004
4005         handle = __le16_to_cpu(hdr->handle);
4006         flags  = hci_flags(handle);
4007         handle = hci_handle(handle);
4008
4009         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4010                handle, flags);
4011
4012         hdev->stat.acl_rx++;
4013
4014         hci_dev_lock(hdev);
4015         conn = hci_conn_hash_lookup_handle(hdev, handle);
4016         hci_dev_unlock(hdev);
4017
4018         if (conn) {
4019                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4020
4021                 /* Send to upper protocol */
4022                 l2cap_recv_acldata(conn, skb, flags);
4023                 return;
4024         } else {
4025                 BT_ERR("%s ACL packet for unknown connection handle %d",
4026                        hdev->name, handle);
4027         }
4028
4029         kfree_skb(skb);
4030 }
4031
4032 /* SCO data packet */
4033 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4034 {
4035         struct hci_sco_hdr *hdr = (void *) skb->data;
4036         struct hci_conn *conn;
4037         __u16 handle;
4038
4039         skb_pull(skb, HCI_SCO_HDR_SIZE);
4040
4041         handle = __le16_to_cpu(hdr->handle);
4042
4043         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4044
4045         hdev->stat.sco_rx++;
4046
4047         hci_dev_lock(hdev);
4048         conn = hci_conn_hash_lookup_handle(hdev, handle);
4049         hci_dev_unlock(hdev);
4050
4051         if (conn) {
4052                 /* Send to upper protocol */
4053                 sco_recv_scodata(conn, skb);
4054                 return;
4055         } else {
4056                 BT_ERR("%s SCO packet for unknown connection handle %d",
4057                        hdev->name, handle);
4058         }
4059
4060         kfree_skb(skb);
4061 }
4062
4063 static bool hci_req_is_complete(struct hci_dev *hdev)
4064 {
4065         struct sk_buff *skb;
4066
4067         skb = skb_peek(&hdev->cmd_q);
4068         if (!skb)
4069                 return true;
4070
4071         return bt_cb(skb)->req.start;
4072 }
4073
4074 static void hci_resend_last(struct hci_dev *hdev)
4075 {
4076         struct hci_command_hdr *sent;
4077         struct sk_buff *skb;
4078         u16 opcode;
4079
4080         if (!hdev->sent_cmd)
4081                 return;
4082
4083         sent = (void *) hdev->sent_cmd->data;
4084         opcode = __le16_to_cpu(sent->opcode);
4085         if (opcode == HCI_OP_RESET)
4086                 return;
4087
4088         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4089         if (!skb)
4090                 return;
4091
4092         skb_queue_head(&hdev->cmd_q, skb);
4093         queue_work(hdev->workqueue, &hdev->cmd_work);
4094 }
4095
4096 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4097                           hci_req_complete_t *req_complete,
4098                           hci_req_complete_skb_t *req_complete_skb)
4099 {
4100         struct sk_buff *skb;
4101         unsigned long flags;
4102
4103         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4104
4105         /* If the completed command doesn't match the last one that was
4106          * sent we need to do special handling of it.
4107          */
4108         if (!hci_sent_cmd_data(hdev, opcode)) {
4109                 /* Some CSR based controllers generate a spontaneous
4110                  * reset complete event during init and any pending
4111                  * command will never be completed. In such a case we
4112                  * need to resend whatever was the last sent
4113                  * command.
4114                  */
4115                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4116                         hci_resend_last(hdev);
4117
4118                 return;
4119         }
4120
4121         /* If the command succeeded and there's still more commands in
4122          * this request the request is not yet complete.
4123          */
4124         if (!status && !hci_req_is_complete(hdev))
4125                 return;
4126
4127         /* If this was the last command in a request the complete
4128          * callback would be found in hdev->sent_cmd instead of the
4129          * command queue (hdev->cmd_q).
4130          */
4131         if (bt_cb(hdev->sent_cmd)->req.complete) {
4132                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4133                 return;
4134         }
4135
4136         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4137                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4138                 return;
4139         }
4140
4141         /* Remove all pending commands belonging to this request */
4142         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4143         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4144                 if (bt_cb(skb)->req.start) {
4145                         __skb_queue_head(&hdev->cmd_q, skb);
4146                         break;
4147                 }
4148
4149                 *req_complete = bt_cb(skb)->req.complete;
4150                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4151                 kfree_skb(skb);
4152         }
4153         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4154 }
4155
4156 static void hci_rx_work(struct work_struct *work)
4157 {
4158         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4159         struct sk_buff *skb;
4160
4161         BT_DBG("%s", hdev->name);
4162
4163         while ((skb = skb_dequeue(&hdev->rx_q))) {
4164                 /* Send copy to monitor */
4165                 hci_send_to_monitor(hdev, skb);
4166
4167                 if (atomic_read(&hdev->promisc)) {
4168                         /* Send copy to the sockets */
4169                         hci_send_to_sock(hdev, skb);
4170                 }
4171
4172                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4173                         kfree_skb(skb);
4174                         continue;
4175                 }
4176
4177                 if (test_bit(HCI_INIT, &hdev->flags)) {
4178                         /* Don't process data packets in this states. */
4179                         switch (bt_cb(skb)->pkt_type) {
4180                         case HCI_ACLDATA_PKT:
4181                         case HCI_SCODATA_PKT:
4182                                 kfree_skb(skb);
4183                                 continue;
4184                         }
4185                 }
4186
4187                 /* Process frame */
4188                 switch (bt_cb(skb)->pkt_type) {
4189                 case HCI_EVENT_PKT:
4190                         BT_DBG("%s Event packet", hdev->name);
4191                         hci_event_packet(hdev, skb);
4192                         break;
4193
4194                 case HCI_ACLDATA_PKT:
4195                         BT_DBG("%s ACL data packet", hdev->name);
4196                         hci_acldata_packet(hdev, skb);
4197                         break;
4198
4199                 case HCI_SCODATA_PKT:
4200                         BT_DBG("%s SCO data packet", hdev->name);
4201                         hci_scodata_packet(hdev, skb);
4202                         break;
4203
4204                 default:
4205                         kfree_skb(skb);
4206                         break;
4207                 }
4208         }
4209 }
4210
4211 static void hci_cmd_work(struct work_struct *work)
4212 {
4213         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4214         struct sk_buff *skb;
4215
4216         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4217                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4218
4219         /* Send queued commands */
4220         if (atomic_read(&hdev->cmd_cnt)) {
4221                 skb = skb_dequeue(&hdev->cmd_q);
4222                 if (!skb)
4223                         return;
4224
4225                 kfree_skb(hdev->sent_cmd);
4226
4227                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4228                 if (hdev->sent_cmd) {
4229                         atomic_dec(&hdev->cmd_cnt);
4230                         hci_send_frame(hdev, skb);
4231                         if (test_bit(HCI_RESET, &hdev->flags))
4232                                 cancel_delayed_work(&hdev->cmd_timer);
4233                         else
4234                                 schedule_delayed_work(&hdev->cmd_timer,
4235                                                       HCI_CMD_TIMEOUT);
4236                 } else {
4237                         skb_queue_head(&hdev->cmd_q, skb);
4238                         queue_work(hdev->workqueue, &hdev->cmd_work);
4239                 }
4240         }
4241 }