Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39         u16 pkt_type;
40         u16 max_latency;
41         u8  retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
46         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
47         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
48         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
49         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
54         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
59         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
60 };
61
62 static void hci_le_create_connection_cancel(struct hci_conn *conn)
63 {
64         hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
65 }
66
67 static void hci_acl_create_connection(struct hci_conn *conn)
68 {
69         struct hci_dev *hdev = conn->hdev;
70         struct inquiry_entry *ie;
71         struct hci_cp_create_conn cp;
72
73         BT_DBG("hcon %p", conn);
74
75         conn->state = BT_CONNECT;
76         conn->out = true;
77         conn->role = HCI_ROLE_MASTER;
78
79         conn->attempt++;
80
81         conn->link_policy = hdev->link_policy;
82
83         memset(&cp, 0, sizeof(cp));
84         bacpy(&cp.bdaddr, &conn->dst);
85         cp.pscan_rep_mode = 0x02;
86
87         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
88         if (ie) {
89                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
90                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
91                         cp.pscan_mode     = ie->data.pscan_mode;
92                         cp.clock_offset   = ie->data.clock_offset |
93                                             cpu_to_le16(0x8000);
94                 }
95
96                 memcpy(conn->dev_class, ie->data.dev_class, 3);
97                 if (ie->data.ssp_mode > 0)
98                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
99         }
100
101         cp.pkt_type = cpu_to_le16(conn->pkt_type);
102         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
103                 cp.role_switch = 0x01;
104         else
105                 cp.role_switch = 0x00;
106
107         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
108 }
109
110 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
111 {
112         struct hci_cp_create_conn_cancel cp;
113
114         BT_DBG("hcon %p", conn);
115
116         if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
117                 return;
118
119         bacpy(&cp.bdaddr, &conn->dst);
120         hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
121 }
122
123 static void hci_reject_sco(struct hci_conn *conn)
124 {
125         struct hci_cp_reject_sync_conn_req cp;
126
127         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
128         bacpy(&cp.bdaddr, &conn->dst);
129
130         hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
131 }
132
133 int hci_disconnect(struct hci_conn *conn, __u8 reason)
134 {
135         struct hci_cp_disconnect cp;
136
137         BT_DBG("hcon %p", conn);
138
139         /* When we are master of an established connection and it enters
140          * the disconnect timeout, then go ahead and try to read the
141          * current clock offset.  Processing of the result is done
142          * within the event handling and hci_clock_offset_evt function.
143          */
144         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
145                 struct hci_dev *hdev = conn->hdev;
146                 struct hci_cp_read_clock_offset clkoff_cp;
147
148                 clkoff_cp.handle = cpu_to_le16(conn->handle);
149                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
150                              &clkoff_cp);
151         }
152
153         conn->state = BT_DISCONN;
154
155         cp.handle = cpu_to_le16(conn->handle);
156         cp.reason = reason;
157         return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
158 }
159
160 static void hci_amp_disconn(struct hci_conn *conn)
161 {
162         struct hci_cp_disconn_phy_link cp;
163
164         BT_DBG("hcon %p", conn);
165
166         conn->state = BT_DISCONN;
167
168         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
169         cp.reason = hci_proto_disconn_ind(conn);
170         hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
171                      sizeof(cp), &cp);
172 }
173
174 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
175 {
176         struct hci_dev *hdev = conn->hdev;
177         struct hci_cp_add_sco cp;
178
179         BT_DBG("hcon %p", conn);
180
181         conn->state = BT_CONNECT;
182         conn->out = true;
183
184         conn->attempt++;
185
186         cp.handle   = cpu_to_le16(handle);
187         cp.pkt_type = cpu_to_le16(conn->pkt_type);
188
189         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
190 }
191
192 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
193 {
194         struct hci_dev *hdev = conn->hdev;
195         struct hci_cp_setup_sync_conn cp;
196         const struct sco_param *param;
197
198         BT_DBG("hcon %p", conn);
199
200         conn->state = BT_CONNECT;
201         conn->out = true;
202
203         conn->attempt++;
204
205         cp.handle   = cpu_to_le16(handle);
206
207         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
208         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
209         cp.voice_setting  = cpu_to_le16(conn->setting);
210
211         switch (conn->setting & SCO_AIRMODE_MASK) {
212         case SCO_AIRMODE_TRANSP:
213                 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
214                         return false;
215                 param = &esco_param_msbc[conn->attempt - 1];
216                 break;
217         case SCO_AIRMODE_CVSD:
218                 if (lmp_esco_capable(conn->link)) {
219                         if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
220                                 return false;
221                         param = &esco_param_cvsd[conn->attempt - 1];
222                 } else {
223                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
224                                 return false;
225                         param = &sco_param_cvsd[conn->attempt - 1];
226                 }
227                 break;
228         default:
229                 return false;
230         }
231
232         cp.retrans_effort = param->retrans_effort;
233         cp.pkt_type = __cpu_to_le16(param->pkt_type);
234         cp.max_latency = __cpu_to_le16(param->max_latency);
235
236         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
237                 return false;
238
239         return true;
240 }
241
242 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
243                       u16 to_multiplier)
244 {
245         struct hci_dev *hdev = conn->hdev;
246         struct hci_conn_params *params;
247         struct hci_cp_le_conn_update cp;
248
249         hci_dev_lock(hdev);
250
251         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
252         if (params) {
253                 params->conn_min_interval = min;
254                 params->conn_max_interval = max;
255                 params->conn_latency = latency;
256                 params->supervision_timeout = to_multiplier;
257         }
258
259         hci_dev_unlock(hdev);
260
261         memset(&cp, 0, sizeof(cp));
262         cp.handle               = cpu_to_le16(conn->handle);
263         cp.conn_interval_min    = cpu_to_le16(min);
264         cp.conn_interval_max    = cpu_to_le16(max);
265         cp.conn_latency         = cpu_to_le16(latency);
266         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
267         cp.min_ce_len           = cpu_to_le16(0x0000);
268         cp.max_ce_len           = cpu_to_le16(0x0000);
269
270         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
271
272         if (params)
273                 return 0x01;
274
275         return 0x00;
276 }
277
278 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
279                       __u8 ltk[16])
280 {
281         struct hci_dev *hdev = conn->hdev;
282         struct hci_cp_le_start_enc cp;
283
284         BT_DBG("hcon %p", conn);
285
286         memset(&cp, 0, sizeof(cp));
287
288         cp.handle = cpu_to_le16(conn->handle);
289         cp.rand = rand;
290         cp.ediv = ediv;
291         memcpy(cp.ltk, ltk, sizeof(cp.ltk));
292
293         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
294 }
295
296 /* Device _must_ be locked */
297 void hci_sco_setup(struct hci_conn *conn, __u8 status)
298 {
299         struct hci_conn *sco = conn->link;
300
301         if (!sco)
302                 return;
303
304         BT_DBG("hcon %p", conn);
305
306         if (!status) {
307                 if (lmp_esco_capable(conn->hdev))
308                         hci_setup_sync(sco, conn->handle);
309                 else
310                         hci_add_sco(sco, conn->handle);
311         } else {
312                 hci_connect_cfm(sco, status);
313                 hci_conn_del(sco);
314         }
315 }
316
317 static void hci_conn_timeout(struct work_struct *work)
318 {
319         struct hci_conn *conn = container_of(work, struct hci_conn,
320                                              disc_work.work);
321         int refcnt = atomic_read(&conn->refcnt);
322
323         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
324
325         WARN_ON(refcnt < 0);
326
327         /* FIXME: It was observed that in pairing failed scenario, refcnt
328          * drops below 0. Probably this is because l2cap_conn_del calls
329          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
330          * dropped. After that loop hci_chan_del is called which also drops
331          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
332          * otherwise drop it.
333          */
334         if (refcnt > 0)
335                 return;
336
337         switch (conn->state) {
338         case BT_CONNECT:
339         case BT_CONNECT2:
340                 if (conn->out) {
341                         if (conn->type == ACL_LINK)
342                                 hci_acl_create_connection_cancel(conn);
343                         else if (conn->type == LE_LINK)
344                                 hci_le_create_connection_cancel(conn);
345                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
346                         hci_reject_sco(conn);
347                 }
348                 break;
349         case BT_CONFIG:
350         case BT_CONNECTED:
351                 if (conn->type == AMP_LINK) {
352                         hci_amp_disconn(conn);
353                 } else {
354                         __u8 reason = hci_proto_disconn_ind(conn);
355                         hci_disconnect(conn, reason);
356                 }
357                 break;
358         default:
359                 conn->state = BT_CLOSED;
360                 break;
361         }
362 }
363
364 /* Enter sniff mode */
365 static void hci_conn_idle(struct work_struct *work)
366 {
367         struct hci_conn *conn = container_of(work, struct hci_conn,
368                                              idle_work.work);
369         struct hci_dev *hdev = conn->hdev;
370
371         BT_DBG("hcon %p mode %d", conn, conn->mode);
372
373         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
374                 return;
375
376         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
377                 return;
378
379         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
380                 struct hci_cp_sniff_subrate cp;
381                 cp.handle             = cpu_to_le16(conn->handle);
382                 cp.max_latency        = cpu_to_le16(0);
383                 cp.min_remote_timeout = cpu_to_le16(0);
384                 cp.min_local_timeout  = cpu_to_le16(0);
385                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
386         }
387
388         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
389                 struct hci_cp_sniff_mode cp;
390                 cp.handle       = cpu_to_le16(conn->handle);
391                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
392                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
393                 cp.attempt      = cpu_to_le16(4);
394                 cp.timeout      = cpu_to_le16(1);
395                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
396         }
397 }
398
399 static void hci_conn_auto_accept(struct work_struct *work)
400 {
401         struct hci_conn *conn = container_of(work, struct hci_conn,
402                                              auto_accept_work.work);
403
404         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
405                      &conn->dst);
406 }
407
408 static void le_conn_timeout(struct work_struct *work)
409 {
410         struct hci_conn *conn = container_of(work, struct hci_conn,
411                                              le_conn_timeout.work);
412         struct hci_dev *hdev = conn->hdev;
413
414         BT_DBG("");
415
416         /* We could end up here due to having done directed advertising,
417          * so clean up the state if necessary. This should however only
418          * happen with broken hardware or if low duty cycle was used
419          * (which doesn't have a timeout of its own).
420          */
421         if (conn->role == HCI_ROLE_SLAVE) {
422                 u8 enable = 0x00;
423                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
424                              &enable);
425                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
426                 return;
427         }
428
429         hci_le_create_connection_cancel(conn);
430 }
431
432 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
433                               u8 role)
434 {
435         struct hci_conn *conn;
436
437         BT_DBG("%s dst %pMR", hdev->name, dst);
438
439         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
440         if (!conn)
441                 return NULL;
442
443         bacpy(&conn->dst, dst);
444         bacpy(&conn->src, &hdev->bdaddr);
445         conn->hdev  = hdev;
446         conn->type  = type;
447         conn->role  = role;
448         conn->mode  = HCI_CM_ACTIVE;
449         conn->state = BT_OPEN;
450         conn->auth_type = HCI_AT_GENERAL_BONDING;
451         conn->io_capability = hdev->io_capability;
452         conn->remote_auth = 0xff;
453         conn->key_type = 0xff;
454         conn->rssi = HCI_RSSI_INVALID;
455         conn->tx_power = HCI_TX_POWER_INVALID;
456         conn->max_tx_power = HCI_TX_POWER_INVALID;
457
458         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
459         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
460
461         if (conn->role == HCI_ROLE_MASTER)
462                 conn->out = true;
463
464         switch (type) {
465         case ACL_LINK:
466                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
467                 break;
468         case LE_LINK:
469                 /* conn->src should reflect the local identity address */
470                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
471                 break;
472         case SCO_LINK:
473                 if (lmp_esco_capable(hdev))
474                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
475                                         (hdev->esco_type & EDR_ESCO_MASK);
476                 else
477                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
478                 break;
479         case ESCO_LINK:
480                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
481                 break;
482         }
483
484         skb_queue_head_init(&conn->data_q);
485
486         INIT_LIST_HEAD(&conn->chan_list);
487
488         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
489         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
490         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
491         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
492
493         atomic_set(&conn->refcnt, 0);
494
495         hci_dev_hold(hdev);
496
497         hci_conn_hash_add(hdev, conn);
498         if (hdev->notify)
499                 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
500
501         hci_conn_init_sysfs(conn);
502
503         return conn;
504 }
505
506 int hci_conn_del(struct hci_conn *conn)
507 {
508         struct hci_dev *hdev = conn->hdev;
509
510         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
511
512         cancel_delayed_work_sync(&conn->disc_work);
513         cancel_delayed_work_sync(&conn->auto_accept_work);
514         cancel_delayed_work_sync(&conn->idle_work);
515
516         if (conn->type == ACL_LINK) {
517                 struct hci_conn *sco = conn->link;
518                 if (sco)
519                         sco->link = NULL;
520
521                 /* Unacked frames */
522                 hdev->acl_cnt += conn->sent;
523         } else if (conn->type == LE_LINK) {
524                 cancel_delayed_work(&conn->le_conn_timeout);
525
526                 if (hdev->le_pkts)
527                         hdev->le_cnt += conn->sent;
528                 else
529                         hdev->acl_cnt += conn->sent;
530         } else {
531                 struct hci_conn *acl = conn->link;
532                 if (acl) {
533                         acl->link = NULL;
534                         hci_conn_drop(acl);
535                 }
536         }
537
538         hci_chan_list_flush(conn);
539
540         if (conn->amp_mgr)
541                 amp_mgr_put(conn->amp_mgr);
542
543         hci_conn_hash_del(hdev, conn);
544         if (hdev->notify)
545                 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
546
547         skb_queue_purge(&conn->data_q);
548
549         hci_conn_del_sysfs(conn);
550
551         debugfs_remove_recursive(conn->debugfs);
552
553         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
554                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
555
556         hci_dev_put(hdev);
557
558         hci_conn_put(conn);
559
560         return 0;
561 }
562
563 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
564 {
565         int use_src = bacmp(src, BDADDR_ANY);
566         struct hci_dev *hdev = NULL, *d;
567
568         BT_DBG("%pMR -> %pMR", src, dst);
569
570         read_lock(&hci_dev_list_lock);
571
572         list_for_each_entry(d, &hci_dev_list, list) {
573                 if (!test_bit(HCI_UP, &d->flags) ||
574                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
575                     d->dev_type != HCI_BREDR)
576                         continue;
577
578                 /* Simple routing:
579                  *   No source address - find interface with bdaddr != dst
580                  *   Source address    - find interface with bdaddr == src
581                  */
582
583                 if (use_src) {
584                         if (!bacmp(&d->bdaddr, src)) {
585                                 hdev = d; break;
586                         }
587                 } else {
588                         if (bacmp(&d->bdaddr, dst)) {
589                                 hdev = d; break;
590                         }
591                 }
592         }
593
594         if (hdev)
595                 hdev = hci_dev_hold(hdev);
596
597         read_unlock(&hci_dev_list_lock);
598         return hdev;
599 }
600 EXPORT_SYMBOL(hci_get_route);
601
602 /* This function requires the caller holds hdev->lock */
603 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
604 {
605         struct hci_dev *hdev = conn->hdev;
606         struct hci_conn_params *params;
607
608         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
609                                            conn->dst_type);
610         if (params && params->conn) {
611                 hci_conn_drop(params->conn);
612                 hci_conn_put(params->conn);
613                 params->conn = NULL;
614         }
615
616         conn->state = BT_CLOSED;
617
618         mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
619                             status);
620
621         hci_connect_cfm(conn, status);
622
623         hci_conn_del(conn);
624
625         /* Since we may have temporarily stopped the background scanning in
626          * favor of connection establishment, we should restart it.
627          */
628         hci_update_background_scan(hdev);
629
630         /* Re-enable advertising in case this was a failed connection
631          * attempt as a peripheral.
632          */
633         mgmt_reenable_advertising(hdev);
634 }
635
636 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
637 {
638         struct hci_conn *conn;
639
640         if (status == 0)
641                 return;
642
643         BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
644                status);
645
646         hci_dev_lock(hdev);
647
648         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
649         if (!conn)
650                 goto done;
651
652         hci_le_conn_failed(conn, status);
653
654 done:
655         hci_dev_unlock(hdev);
656 }
657
658 static void hci_req_add_le_create_conn(struct hci_request *req,
659                                        struct hci_conn *conn)
660 {
661         struct hci_cp_le_create_conn cp;
662         struct hci_dev *hdev = conn->hdev;
663         u8 own_addr_type;
664
665         memset(&cp, 0, sizeof(cp));
666
667         /* Update random address, but set require_privacy to false so
668          * that we never connect with an non-resolvable address.
669          */
670         if (hci_update_random_address(req, false, &own_addr_type))
671                 return;
672
673         cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
674         cp.scan_window = cpu_to_le16(hdev->le_scan_window);
675         bacpy(&cp.peer_addr, &conn->dst);
676         cp.peer_addr_type = conn->dst_type;
677         cp.own_address_type = own_addr_type;
678         cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
679         cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
680         cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
681         cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
682         cp.min_ce_len = cpu_to_le16(0x0000);
683         cp.max_ce_len = cpu_to_le16(0x0000);
684
685         hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
686
687         conn->state = BT_CONNECT;
688 }
689
690 static void hci_req_directed_advertising(struct hci_request *req,
691                                          struct hci_conn *conn)
692 {
693         struct hci_dev *hdev = req->hdev;
694         struct hci_cp_le_set_adv_param cp;
695         u8 own_addr_type;
696         u8 enable;
697
698         /* Clear the HCI_LE_ADV bit temporarily so that the
699          * hci_update_random_address knows that it's safe to go ahead
700          * and write a new random address. The flag will be set back on
701          * as soon as the SET_ADV_ENABLE HCI command completes.
702          */
703         hci_dev_clear_flag(hdev, HCI_LE_ADV);
704
705         /* Set require_privacy to false so that the remote device has a
706          * chance of identifying us.
707          */
708         if (hci_update_random_address(req, false, &own_addr_type) < 0)
709                 return;
710
711         memset(&cp, 0, sizeof(cp));
712         cp.type = LE_ADV_DIRECT_IND;
713         cp.own_address_type = own_addr_type;
714         cp.direct_addr_type = conn->dst_type;
715         bacpy(&cp.direct_addr, &conn->dst);
716         cp.channel_map = hdev->le_adv_channel_map;
717
718         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
719
720         enable = 0x01;
721         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
722
723         conn->state = BT_CONNECT;
724 }
725
726 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
727                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
728                                 u8 role)
729 {
730         struct hci_conn_params *params;
731         struct hci_conn *conn;
732         struct smp_irk *irk;
733         struct hci_request req;
734         int err;
735
736         /* Let's make sure that le is enabled.*/
737         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
738                 if (lmp_le_capable(hdev))
739                         return ERR_PTR(-ECONNREFUSED);
740
741                 return ERR_PTR(-EOPNOTSUPP);
742         }
743
744         /* Some devices send ATT messages as soon as the physical link is
745          * established. To be able to handle these ATT messages, the user-
746          * space first establishes the connection and then starts the pairing
747          * process.
748          *
749          * So if a hci_conn object already exists for the following connection
750          * attempt, we simply update pending_sec_level and auth_type fields
751          * and return the object found.
752          */
753         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
754         if (conn) {
755                 conn->pending_sec_level = sec_level;
756                 goto done;
757         }
758
759         /* Since the controller supports only one LE connection attempt at a
760          * time, we return -EBUSY if there is any connection attempt running.
761          */
762         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
763         if (conn)
764                 return ERR_PTR(-EBUSY);
765
766         /* When given an identity address with existing identity
767          * resolving key, the connection needs to be established
768          * to a resolvable random address.
769          *
770          * This uses the cached random resolvable address from
771          * a previous scan. When no cached address is available,
772          * try connecting to the identity address instead.
773          *
774          * Storing the resolvable random address is required here
775          * to handle connection failures. The address will later
776          * be resolved back into the original identity address
777          * from the connect request.
778          */
779         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
780         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
781                 dst = &irk->rpa;
782                 dst_type = ADDR_LE_DEV_RANDOM;
783         }
784
785         conn = hci_conn_add(hdev, LE_LINK, dst, role);
786         if (!conn)
787                 return ERR_PTR(-ENOMEM);
788
789         conn->dst_type = dst_type;
790         conn->sec_level = BT_SECURITY_LOW;
791         conn->pending_sec_level = sec_level;
792         conn->conn_timeout = conn_timeout;
793
794         hci_req_init(&req, hdev);
795
796         /* Disable advertising if we're active. For master role
797          * connections most controllers will refuse to connect if
798          * advertising is enabled, and for slave role connections we
799          * anyway have to disable it in order to start directed
800          * advertising.
801          */
802         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
803                 u8 enable = 0x00;
804                 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
805                             &enable);
806         }
807
808         /* If requested to connect as slave use directed advertising */
809         if (conn->role == HCI_ROLE_SLAVE) {
810                 /* If we're active scanning most controllers are unable
811                  * to initiate advertising. Simply reject the attempt.
812                  */
813                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
814                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
815                         skb_queue_purge(&req.cmd_q);
816                         hci_conn_del(conn);
817                         return ERR_PTR(-EBUSY);
818                 }
819
820                 hci_req_directed_advertising(&req, conn);
821                 goto create_conn;
822         }
823
824         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
825         if (params) {
826                 conn->le_conn_min_interval = params->conn_min_interval;
827                 conn->le_conn_max_interval = params->conn_max_interval;
828                 conn->le_conn_latency = params->conn_latency;
829                 conn->le_supv_timeout = params->supervision_timeout;
830         } else {
831                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
832                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
833                 conn->le_conn_latency = hdev->le_conn_latency;
834                 conn->le_supv_timeout = hdev->le_supv_timeout;
835         }
836
837         /* If controller is scanning, we stop it since some controllers are
838          * not able to scan and connect at the same time. Also set the
839          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
840          * handler for scan disabling knows to set the correct discovery
841          * state.
842          */
843         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
844                 hci_req_add_le_scan_disable(&req);
845                 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
846         }
847
848         hci_req_add_le_create_conn(&req, conn);
849
850 create_conn:
851         err = hci_req_run(&req, create_le_conn_complete);
852         if (err) {
853                 hci_conn_del(conn);
854                 return ERR_PTR(err);
855         }
856
857 done:
858         hci_conn_hold(conn);
859         return conn;
860 }
861
862 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
863                                  u8 sec_level, u8 auth_type)
864 {
865         struct hci_conn *acl;
866
867         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
868                 if (lmp_bredr_capable(hdev))
869                         return ERR_PTR(-ECONNREFUSED);
870
871                 return ERR_PTR(-EOPNOTSUPP);
872         }
873
874         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
875         if (!acl) {
876                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
877                 if (!acl)
878                         return ERR_PTR(-ENOMEM);
879         }
880
881         hci_conn_hold(acl);
882
883         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
884                 acl->sec_level = BT_SECURITY_LOW;
885                 acl->pending_sec_level = sec_level;
886                 acl->auth_type = auth_type;
887                 hci_acl_create_connection(acl);
888         }
889
890         return acl;
891 }
892
893 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
894                                  __u16 setting)
895 {
896         struct hci_conn *acl;
897         struct hci_conn *sco;
898
899         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
900         if (IS_ERR(acl))
901                 return acl;
902
903         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
904         if (!sco) {
905                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
906                 if (!sco) {
907                         hci_conn_drop(acl);
908                         return ERR_PTR(-ENOMEM);
909                 }
910         }
911
912         acl->link = sco;
913         sco->link = acl;
914
915         hci_conn_hold(sco);
916
917         sco->setting = setting;
918
919         if (acl->state == BT_CONNECTED &&
920             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
921                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
922                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
923
924                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
925                         /* defer SCO setup until mode change completed */
926                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
927                         return sco;
928                 }
929
930                 hci_sco_setup(acl, 0x00);
931         }
932
933         return sco;
934 }
935
936 /* Check link security requirement */
937 int hci_conn_check_link_mode(struct hci_conn *conn)
938 {
939         BT_DBG("hcon %p", conn);
940
941         /* In Secure Connections Only mode, it is required that Secure
942          * Connections is used and the link is encrypted with AES-CCM
943          * using a P-256 authenticated combination key.
944          */
945         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
946                 if (!hci_conn_sc_enabled(conn) ||
947                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
948                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
949                         return 0;
950         }
951
952         if (hci_conn_ssp_enabled(conn) &&
953             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
954                 return 0;
955
956         return 1;
957 }
958
959 /* Authenticate remote device */
960 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
961 {
962         BT_DBG("hcon %p", conn);
963
964         if (conn->pending_sec_level > sec_level)
965                 sec_level = conn->pending_sec_level;
966
967         if (sec_level > conn->sec_level)
968                 conn->pending_sec_level = sec_level;
969         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
970                 return 1;
971
972         /* Make sure we preserve an existing MITM requirement*/
973         auth_type |= (conn->auth_type & 0x01);
974
975         conn->auth_type = auth_type;
976
977         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
978                 struct hci_cp_auth_requested cp;
979
980                 cp.handle = cpu_to_le16(conn->handle);
981                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
982                              sizeof(cp), &cp);
983
984                 /* If we're already encrypted set the REAUTH_PEND flag,
985                  * otherwise set the ENCRYPT_PEND.
986                  */
987                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
988                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
989                 else
990                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
991         }
992
993         return 0;
994 }
995
996 /* Encrypt the the link */
997 static void hci_conn_encrypt(struct hci_conn *conn)
998 {
999         BT_DBG("hcon %p", conn);
1000
1001         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1002                 struct hci_cp_set_conn_encrypt cp;
1003                 cp.handle  = cpu_to_le16(conn->handle);
1004                 cp.encrypt = 0x01;
1005                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1006                              &cp);
1007         }
1008 }
1009
1010 /* Enable security */
1011 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1012                       bool initiator)
1013 {
1014         BT_DBG("hcon %p", conn);
1015
1016         if (conn->type == LE_LINK)
1017                 return smp_conn_security(conn, sec_level);
1018
1019         /* For sdp we don't need the link key. */
1020         if (sec_level == BT_SECURITY_SDP)
1021                 return 1;
1022
1023         /* For non 2.1 devices and low security level we don't need the link
1024            key. */
1025         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1026                 return 1;
1027
1028         /* For other security levels we need the link key. */
1029         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1030                 goto auth;
1031
1032         /* An authenticated FIPS approved combination key has sufficient
1033          * security for security level 4. */
1034         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1035             sec_level == BT_SECURITY_FIPS)
1036                 goto encrypt;
1037
1038         /* An authenticated combination key has sufficient security for
1039            security level 3. */
1040         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1041              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1042             sec_level == BT_SECURITY_HIGH)
1043                 goto encrypt;
1044
1045         /* An unauthenticated combination key has sufficient security for
1046            security level 1 and 2. */
1047         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1048              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1049             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1050                 goto encrypt;
1051
1052         /* A combination key has always sufficient security for the security
1053            levels 1 or 2. High security level requires the combination key
1054            is generated using maximum PIN code length (16).
1055            For pre 2.1 units. */
1056         if (conn->key_type == HCI_LK_COMBINATION &&
1057             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1058              conn->pin_length == 16))
1059                 goto encrypt;
1060
1061 auth:
1062         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1063                 return 0;
1064
1065         if (initiator)
1066                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1067
1068         if (!hci_conn_auth(conn, sec_level, auth_type))
1069                 return 0;
1070
1071 encrypt:
1072         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1073                 return 1;
1074
1075         hci_conn_encrypt(conn);
1076         return 0;
1077 }
1078 EXPORT_SYMBOL(hci_conn_security);
1079
1080 /* Check secure link requirement */
1081 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1082 {
1083         BT_DBG("hcon %p", conn);
1084
1085         /* Accept if non-secure or higher security level is required */
1086         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1087                 return 1;
1088
1089         /* Accept if secure or higher security level is already present */
1090         if (conn->sec_level == BT_SECURITY_HIGH ||
1091             conn->sec_level == BT_SECURITY_FIPS)
1092                 return 1;
1093
1094         /* Reject not secure link */
1095         return 0;
1096 }
1097 EXPORT_SYMBOL(hci_conn_check_secure);
1098
1099 /* Switch role */
1100 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1101 {
1102         BT_DBG("hcon %p", conn);
1103
1104         if (role == conn->role)
1105                 return 1;
1106
1107         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1108                 struct hci_cp_switch_role cp;
1109                 bacpy(&cp.bdaddr, &conn->dst);
1110                 cp.role = role;
1111                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1112         }
1113
1114         return 0;
1115 }
1116 EXPORT_SYMBOL(hci_conn_switch_role);
1117
1118 /* Enter active mode */
1119 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1120 {
1121         struct hci_dev *hdev = conn->hdev;
1122
1123         BT_DBG("hcon %p mode %d", conn, conn->mode);
1124
1125         if (conn->mode != HCI_CM_SNIFF)
1126                 goto timer;
1127
1128         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1129                 goto timer;
1130
1131         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1132                 struct hci_cp_exit_sniff_mode cp;
1133                 cp.handle = cpu_to_le16(conn->handle);
1134                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1135         }
1136
1137 timer:
1138         if (hdev->idle_timeout > 0)
1139                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1140                                    msecs_to_jiffies(hdev->idle_timeout));
1141 }
1142
1143 /* Drop all connection on the device */
1144 void hci_conn_hash_flush(struct hci_dev *hdev)
1145 {
1146         struct hci_conn_hash *h = &hdev->conn_hash;
1147         struct hci_conn *c, *n;
1148
1149         BT_DBG("hdev %s", hdev->name);
1150
1151         list_for_each_entry_safe(c, n, &h->list, list) {
1152                 c->state = BT_CLOSED;
1153
1154                 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1155                 hci_conn_del(c);
1156         }
1157 }
1158
1159 /* Check pending connect attempts */
1160 void hci_conn_check_pending(struct hci_dev *hdev)
1161 {
1162         struct hci_conn *conn;
1163
1164         BT_DBG("hdev %s", hdev->name);
1165
1166         hci_dev_lock(hdev);
1167
1168         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1169         if (conn)
1170                 hci_acl_create_connection(conn);
1171
1172         hci_dev_unlock(hdev);
1173 }
1174
1175 static u32 get_link_mode(struct hci_conn *conn)
1176 {
1177         u32 link_mode = 0;
1178
1179         if (conn->role == HCI_ROLE_MASTER)
1180                 link_mode |= HCI_LM_MASTER;
1181
1182         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1183                 link_mode |= HCI_LM_ENCRYPT;
1184
1185         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1186                 link_mode |= HCI_LM_AUTH;
1187
1188         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1189                 link_mode |= HCI_LM_SECURE;
1190
1191         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1192                 link_mode |= HCI_LM_FIPS;
1193
1194         return link_mode;
1195 }
1196
1197 int hci_get_conn_list(void __user *arg)
1198 {
1199         struct hci_conn *c;
1200         struct hci_conn_list_req req, *cl;
1201         struct hci_conn_info *ci;
1202         struct hci_dev *hdev;
1203         int n = 0, size, err;
1204
1205         if (copy_from_user(&req, arg, sizeof(req)))
1206                 return -EFAULT;
1207
1208         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1209                 return -EINVAL;
1210
1211         size = sizeof(req) + req.conn_num * sizeof(*ci);
1212
1213         cl = kmalloc(size, GFP_KERNEL);
1214         if (!cl)
1215                 return -ENOMEM;
1216
1217         hdev = hci_dev_get(req.dev_id);
1218         if (!hdev) {
1219                 kfree(cl);
1220                 return -ENODEV;
1221         }
1222
1223         ci = cl->conn_info;
1224
1225         hci_dev_lock(hdev);
1226         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1227                 bacpy(&(ci + n)->bdaddr, &c->dst);
1228                 (ci + n)->handle = c->handle;
1229                 (ci + n)->type  = c->type;
1230                 (ci + n)->out   = c->out;
1231                 (ci + n)->state = c->state;
1232                 (ci + n)->link_mode = get_link_mode(c);
1233                 if (++n >= req.conn_num)
1234                         break;
1235         }
1236         hci_dev_unlock(hdev);
1237
1238         cl->dev_id = hdev->id;
1239         cl->conn_num = n;
1240         size = sizeof(req) + n * sizeof(*ci);
1241
1242         hci_dev_put(hdev);
1243
1244         err = copy_to_user(arg, cl, size);
1245         kfree(cl);
1246
1247         return err ? -EFAULT : 0;
1248 }
1249
1250 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1251 {
1252         struct hci_conn_info_req req;
1253         struct hci_conn_info ci;
1254         struct hci_conn *conn;
1255         char __user *ptr = arg + sizeof(req);
1256
1257         if (copy_from_user(&req, arg, sizeof(req)))
1258                 return -EFAULT;
1259
1260         hci_dev_lock(hdev);
1261         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1262         if (conn) {
1263                 bacpy(&ci.bdaddr, &conn->dst);
1264                 ci.handle = conn->handle;
1265                 ci.type  = conn->type;
1266                 ci.out   = conn->out;
1267                 ci.state = conn->state;
1268                 ci.link_mode = get_link_mode(conn);
1269         }
1270         hci_dev_unlock(hdev);
1271
1272         if (!conn)
1273                 return -ENOENT;
1274
1275         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1276 }
1277
1278 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1279 {
1280         struct hci_auth_info_req req;
1281         struct hci_conn *conn;
1282
1283         if (copy_from_user(&req, arg, sizeof(req)))
1284                 return -EFAULT;
1285
1286         hci_dev_lock(hdev);
1287         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1288         if (conn)
1289                 req.type = conn->auth_type;
1290         hci_dev_unlock(hdev);
1291
1292         if (!conn)
1293                 return -ENOENT;
1294
1295         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1296 }
1297
1298 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1299 {
1300         struct hci_dev *hdev = conn->hdev;
1301         struct hci_chan *chan;
1302
1303         BT_DBG("%s hcon %p", hdev->name, conn);
1304
1305         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1306                 BT_DBG("Refusing to create new hci_chan");
1307                 return NULL;
1308         }
1309
1310         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1311         if (!chan)
1312                 return NULL;
1313
1314         chan->conn = hci_conn_get(conn);
1315         skb_queue_head_init(&chan->data_q);
1316         chan->state = BT_CONNECTED;
1317
1318         list_add_rcu(&chan->list, &conn->chan_list);
1319
1320         return chan;
1321 }
1322
1323 void hci_chan_del(struct hci_chan *chan)
1324 {
1325         struct hci_conn *conn = chan->conn;
1326         struct hci_dev *hdev = conn->hdev;
1327
1328         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1329
1330         list_del_rcu(&chan->list);
1331
1332         synchronize_rcu();
1333
1334         /* Prevent new hci_chan's to be created for this hci_conn */
1335         set_bit(HCI_CONN_DROP, &conn->flags);
1336
1337         hci_conn_put(conn);
1338
1339         skb_queue_purge(&chan->data_q);
1340         kfree(chan);
1341 }
1342
1343 void hci_chan_list_flush(struct hci_conn *conn)
1344 {
1345         struct hci_chan *chan, *n;
1346
1347         BT_DBG("hcon %p", conn);
1348
1349         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1350                 hci_chan_del(chan);
1351 }
1352
1353 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1354                                                  __u16 handle)
1355 {
1356         struct hci_chan *hchan;
1357
1358         list_for_each_entry(hchan, &hcon->chan_list, list) {
1359                 if (hchan->handle == handle)
1360                         return hchan;
1361         }
1362
1363         return NULL;
1364 }
1365
1366 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1367 {
1368         struct hci_conn_hash *h = &hdev->conn_hash;
1369         struct hci_conn *hcon;
1370         struct hci_chan *hchan = NULL;
1371
1372         rcu_read_lock();
1373
1374         list_for_each_entry_rcu(hcon, &h->list, list) {
1375                 hchan = __hci_chan_lookup_handle(hcon, handle);
1376                 if (hchan)
1377                         break;
1378         }
1379
1380         rcu_read_unlock();
1381
1382         return hchan;
1383 }