These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39         u16 pkt_type;
40         u16 max_latency;
41         u8  retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
46         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
47         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
48         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
49         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
54         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
59         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
60 };
61
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65         struct hci_conn_params *params;
66         struct hci_dev *hdev = conn->hdev;
67         struct smp_irk *irk;
68         bdaddr_t *bdaddr;
69         u8 bdaddr_type;
70
71         bdaddr = &conn->dst;
72         bdaddr_type = conn->dst_type;
73
74         /* Check if we need to convert to identity address */
75         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76         if (irk) {
77                 bdaddr = &irk->bdaddr;
78                 bdaddr_type = irk->addr_type;
79         }
80
81         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82                                            bdaddr_type);
83         if (!params || !params->explicit_connect)
84                 return;
85
86         /* The connection attempt was doing scan for new RPA, and is
87          * in scan phase. If params are not associated with any other
88          * autoconnect action, remove them completely. If they are, just unmark
89          * them as waiting for connection, by clearing explicit_connect field.
90          */
91         params->explicit_connect = false;
92
93         list_del_init(&params->action);
94
95         switch (params->auto_connect) {
96         case HCI_AUTO_CONN_EXPLICIT:
97                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98                 /* return instead of break to avoid duplicate scan update */
99                 return;
100         case HCI_AUTO_CONN_DIRECT:
101         case HCI_AUTO_CONN_ALWAYS:
102                 list_add(&params->action, &hdev->pend_le_conns);
103                 break;
104         case HCI_AUTO_CONN_REPORT:
105                 list_add(&params->action, &hdev->pend_le_reports);
106                 break;
107         default:
108                 break;
109         }
110
111         hci_update_background_scan(hdev);
112 }
113
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116         struct hci_dev *hdev = conn->hdev;
117
118         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120
121         hci_chan_list_flush(conn);
122
123         hci_conn_hash_del(hdev, conn);
124
125         if (hdev->notify)
126                 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
127
128         hci_conn_del_sysfs(conn);
129
130         debugfs_remove_recursive(conn->debugfs);
131
132         hci_dev_put(hdev);
133
134         hci_conn_put(conn);
135 }
136
137 static void le_scan_cleanup(struct work_struct *work)
138 {
139         struct hci_conn *conn = container_of(work, struct hci_conn,
140                                              le_scan_cleanup);
141         struct hci_dev *hdev = conn->hdev;
142         struct hci_conn *c = NULL;
143
144         BT_DBG("%s hcon %p", hdev->name, conn);
145
146         hci_dev_lock(hdev);
147
148         /* Check that the hci_conn is still around */
149         rcu_read_lock();
150         list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
151                 if (c == conn)
152                         break;
153         }
154         rcu_read_unlock();
155
156         if (c == conn) {
157                 hci_connect_le_scan_cleanup(conn);
158                 hci_conn_cleanup(conn);
159         }
160
161         hci_dev_unlock(hdev);
162         hci_dev_put(hdev);
163         hci_conn_put(conn);
164 }
165
166 static void hci_connect_le_scan_remove(struct hci_conn *conn)
167 {
168         BT_DBG("%s hcon %p", conn->hdev->name, conn);
169
170         /* We can't call hci_conn_del/hci_conn_cleanup here since that
171          * could deadlock with another hci_conn_del() call that's holding
172          * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
173          * Instead, grab temporary extra references to the hci_dev and
174          * hci_conn and perform the necessary cleanup in a separate work
175          * callback.
176          */
177
178         hci_dev_hold(conn->hdev);
179         hci_conn_get(conn);
180
181         schedule_work(&conn->le_scan_cleanup);
182 }
183
184 static void hci_acl_create_connection(struct hci_conn *conn)
185 {
186         struct hci_dev *hdev = conn->hdev;
187         struct inquiry_entry *ie;
188         struct hci_cp_create_conn cp;
189
190         BT_DBG("hcon %p", conn);
191
192         conn->state = BT_CONNECT;
193         conn->out = true;
194         conn->role = HCI_ROLE_MASTER;
195
196         conn->attempt++;
197
198         conn->link_policy = hdev->link_policy;
199
200         memset(&cp, 0, sizeof(cp));
201         bacpy(&cp.bdaddr, &conn->dst);
202         cp.pscan_rep_mode = 0x02;
203
204         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
205         if (ie) {
206                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
207                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
208                         cp.pscan_mode     = ie->data.pscan_mode;
209                         cp.clock_offset   = ie->data.clock_offset |
210                                             cpu_to_le16(0x8000);
211                 }
212
213                 memcpy(conn->dev_class, ie->data.dev_class, 3);
214                 if (ie->data.ssp_mode > 0)
215                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
216         }
217
218         cp.pkt_type = cpu_to_le16(conn->pkt_type);
219         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
220                 cp.role_switch = 0x01;
221         else
222                 cp.role_switch = 0x00;
223
224         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
225 }
226
227 int hci_disconnect(struct hci_conn *conn, __u8 reason)
228 {
229         BT_DBG("hcon %p", conn);
230
231         /* When we are master of an established connection and it enters
232          * the disconnect timeout, then go ahead and try to read the
233          * current clock offset.  Processing of the result is done
234          * within the event handling and hci_clock_offset_evt function.
235          */
236         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
237             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
238                 struct hci_dev *hdev = conn->hdev;
239                 struct hci_cp_read_clock_offset clkoff_cp;
240
241                 clkoff_cp.handle = cpu_to_le16(conn->handle);
242                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
243                              &clkoff_cp);
244         }
245
246         return hci_abort_conn(conn, reason);
247 }
248
249 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
250 {
251         struct hci_dev *hdev = conn->hdev;
252         struct hci_cp_add_sco cp;
253
254         BT_DBG("hcon %p", conn);
255
256         conn->state = BT_CONNECT;
257         conn->out = true;
258
259         conn->attempt++;
260
261         cp.handle   = cpu_to_le16(handle);
262         cp.pkt_type = cpu_to_le16(conn->pkt_type);
263
264         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
265 }
266
267 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
268 {
269         struct hci_dev *hdev = conn->hdev;
270         struct hci_cp_setup_sync_conn cp;
271         const struct sco_param *param;
272
273         BT_DBG("hcon %p", conn);
274
275         conn->state = BT_CONNECT;
276         conn->out = true;
277
278         conn->attempt++;
279
280         cp.handle   = cpu_to_le16(handle);
281
282         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
283         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
284         cp.voice_setting  = cpu_to_le16(conn->setting);
285
286         switch (conn->setting & SCO_AIRMODE_MASK) {
287         case SCO_AIRMODE_TRANSP:
288                 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
289                         return false;
290                 param = &esco_param_msbc[conn->attempt - 1];
291                 break;
292         case SCO_AIRMODE_CVSD:
293                 if (lmp_esco_capable(conn->link)) {
294                         if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
295                                 return false;
296                         param = &esco_param_cvsd[conn->attempt - 1];
297                 } else {
298                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
299                                 return false;
300                         param = &sco_param_cvsd[conn->attempt - 1];
301                 }
302                 break;
303         default:
304                 return false;
305         }
306
307         cp.retrans_effort = param->retrans_effort;
308         cp.pkt_type = __cpu_to_le16(param->pkt_type);
309         cp.max_latency = __cpu_to_le16(param->max_latency);
310
311         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
312                 return false;
313
314         return true;
315 }
316
317 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
318                       u16 to_multiplier)
319 {
320         struct hci_dev *hdev = conn->hdev;
321         struct hci_conn_params *params;
322         struct hci_cp_le_conn_update cp;
323
324         hci_dev_lock(hdev);
325
326         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
327         if (params) {
328                 params->conn_min_interval = min;
329                 params->conn_max_interval = max;
330                 params->conn_latency = latency;
331                 params->supervision_timeout = to_multiplier;
332         }
333
334         hci_dev_unlock(hdev);
335
336         memset(&cp, 0, sizeof(cp));
337         cp.handle               = cpu_to_le16(conn->handle);
338         cp.conn_interval_min    = cpu_to_le16(min);
339         cp.conn_interval_max    = cpu_to_le16(max);
340         cp.conn_latency         = cpu_to_le16(latency);
341         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
342         cp.min_ce_len           = cpu_to_le16(0x0000);
343         cp.max_ce_len           = cpu_to_le16(0x0000);
344
345         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
346
347         if (params)
348                 return 0x01;
349
350         return 0x00;
351 }
352
353 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
354                       __u8 ltk[16], __u8 key_size)
355 {
356         struct hci_dev *hdev = conn->hdev;
357         struct hci_cp_le_start_enc cp;
358
359         BT_DBG("hcon %p", conn);
360
361         memset(&cp, 0, sizeof(cp));
362
363         cp.handle = cpu_to_le16(conn->handle);
364         cp.rand = rand;
365         cp.ediv = ediv;
366         memcpy(cp.ltk, ltk, key_size);
367
368         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
369 }
370
371 /* Device _must_ be locked */
372 void hci_sco_setup(struct hci_conn *conn, __u8 status)
373 {
374         struct hci_conn *sco = conn->link;
375
376         if (!sco)
377                 return;
378
379         BT_DBG("hcon %p", conn);
380
381         if (!status) {
382                 if (lmp_esco_capable(conn->hdev))
383                         hci_setup_sync(sco, conn->handle);
384                 else
385                         hci_add_sco(sco, conn->handle);
386         } else {
387                 hci_connect_cfm(sco, status);
388                 hci_conn_del(sco);
389         }
390 }
391
392 static void hci_conn_timeout(struct work_struct *work)
393 {
394         struct hci_conn *conn = container_of(work, struct hci_conn,
395                                              disc_work.work);
396         int refcnt = atomic_read(&conn->refcnt);
397
398         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
399
400         WARN_ON(refcnt < 0);
401
402         /* FIXME: It was observed that in pairing failed scenario, refcnt
403          * drops below 0. Probably this is because l2cap_conn_del calls
404          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
405          * dropped. After that loop hci_chan_del is called which also drops
406          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
407          * otherwise drop it.
408          */
409         if (refcnt > 0)
410                 return;
411
412         /* LE connections in scanning state need special handling */
413         if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
414             test_bit(HCI_CONN_SCANNING, &conn->flags)) {
415                 hci_connect_le_scan_remove(conn);
416                 return;
417         }
418
419         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
420 }
421
422 /* Enter sniff mode */
423 static void hci_conn_idle(struct work_struct *work)
424 {
425         struct hci_conn *conn = container_of(work, struct hci_conn,
426                                              idle_work.work);
427         struct hci_dev *hdev = conn->hdev;
428
429         BT_DBG("hcon %p mode %d", conn, conn->mode);
430
431         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
432                 return;
433
434         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
435                 return;
436
437         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
438                 struct hci_cp_sniff_subrate cp;
439                 cp.handle             = cpu_to_le16(conn->handle);
440                 cp.max_latency        = cpu_to_le16(0);
441                 cp.min_remote_timeout = cpu_to_le16(0);
442                 cp.min_local_timeout  = cpu_to_le16(0);
443                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
444         }
445
446         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
447                 struct hci_cp_sniff_mode cp;
448                 cp.handle       = cpu_to_le16(conn->handle);
449                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
450                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
451                 cp.attempt      = cpu_to_le16(4);
452                 cp.timeout      = cpu_to_le16(1);
453                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
454         }
455 }
456
457 static void hci_conn_auto_accept(struct work_struct *work)
458 {
459         struct hci_conn *conn = container_of(work, struct hci_conn,
460                                              auto_accept_work.work);
461
462         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
463                      &conn->dst);
464 }
465
466 static void le_conn_timeout(struct work_struct *work)
467 {
468         struct hci_conn *conn = container_of(work, struct hci_conn,
469                                              le_conn_timeout.work);
470         struct hci_dev *hdev = conn->hdev;
471
472         BT_DBG("");
473
474         /* We could end up here due to having done directed advertising,
475          * so clean up the state if necessary. This should however only
476          * happen with broken hardware or if low duty cycle was used
477          * (which doesn't have a timeout of its own).
478          */
479         if (conn->role == HCI_ROLE_SLAVE) {
480                 u8 enable = 0x00;
481                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
482                              &enable);
483                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
484                 return;
485         }
486
487         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
488 }
489
490 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
491                               u8 role)
492 {
493         struct hci_conn *conn;
494
495         BT_DBG("%s dst %pMR", hdev->name, dst);
496
497         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
498         if (!conn)
499                 return NULL;
500
501         bacpy(&conn->dst, dst);
502         bacpy(&conn->src, &hdev->bdaddr);
503         conn->hdev  = hdev;
504         conn->type  = type;
505         conn->role  = role;
506         conn->mode  = HCI_CM_ACTIVE;
507         conn->state = BT_OPEN;
508         conn->auth_type = HCI_AT_GENERAL_BONDING;
509         conn->io_capability = hdev->io_capability;
510         conn->remote_auth = 0xff;
511         conn->key_type = 0xff;
512         conn->rssi = HCI_RSSI_INVALID;
513         conn->tx_power = HCI_TX_POWER_INVALID;
514         conn->max_tx_power = HCI_TX_POWER_INVALID;
515
516         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
517         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
518
519         if (conn->role == HCI_ROLE_MASTER)
520                 conn->out = true;
521
522         switch (type) {
523         case ACL_LINK:
524                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
525                 break;
526         case LE_LINK:
527                 /* conn->src should reflect the local identity address */
528                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
529                 break;
530         case SCO_LINK:
531                 if (lmp_esco_capable(hdev))
532                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
533                                         (hdev->esco_type & EDR_ESCO_MASK);
534                 else
535                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
536                 break;
537         case ESCO_LINK:
538                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
539                 break;
540         }
541
542         skb_queue_head_init(&conn->data_q);
543
544         INIT_LIST_HEAD(&conn->chan_list);
545
546         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
547         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
548         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
549         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
550         INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
551
552         atomic_set(&conn->refcnt, 0);
553
554         hci_dev_hold(hdev);
555
556         hci_conn_hash_add(hdev, conn);
557         if (hdev->notify)
558                 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
559
560         hci_conn_init_sysfs(conn);
561
562         return conn;
563 }
564
565 int hci_conn_del(struct hci_conn *conn)
566 {
567         struct hci_dev *hdev = conn->hdev;
568
569         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
570
571         cancel_delayed_work_sync(&conn->disc_work);
572         cancel_delayed_work_sync(&conn->auto_accept_work);
573         cancel_delayed_work_sync(&conn->idle_work);
574
575         if (conn->type == ACL_LINK) {
576                 struct hci_conn *sco = conn->link;
577                 if (sco)
578                         sco->link = NULL;
579
580                 /* Unacked frames */
581                 hdev->acl_cnt += conn->sent;
582         } else if (conn->type == LE_LINK) {
583                 cancel_delayed_work(&conn->le_conn_timeout);
584
585                 if (hdev->le_pkts)
586                         hdev->le_cnt += conn->sent;
587                 else
588                         hdev->acl_cnt += conn->sent;
589         } else {
590                 struct hci_conn *acl = conn->link;
591                 if (acl) {
592                         acl->link = NULL;
593                         hci_conn_drop(acl);
594                 }
595         }
596
597         if (conn->amp_mgr)
598                 amp_mgr_put(conn->amp_mgr);
599
600         skb_queue_purge(&conn->data_q);
601
602         /* Remove the connection from the list and cleanup its remaining
603          * state. This is a separate function since for some cases like
604          * BT_CONNECT_SCAN we *only* want the cleanup part without the
605          * rest of hci_conn_del.
606          */
607         hci_conn_cleanup(conn);
608
609         return 0;
610 }
611
612 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
613 {
614         int use_src = bacmp(src, BDADDR_ANY);
615         struct hci_dev *hdev = NULL, *d;
616
617         BT_DBG("%pMR -> %pMR", src, dst);
618
619         read_lock(&hci_dev_list_lock);
620
621         list_for_each_entry(d, &hci_dev_list, list) {
622                 if (!test_bit(HCI_UP, &d->flags) ||
623                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
624                     d->dev_type != HCI_BREDR)
625                         continue;
626
627                 /* Simple routing:
628                  *   No source address - find interface with bdaddr != dst
629                  *   Source address    - find interface with bdaddr == src
630                  */
631
632                 if (use_src) {
633                         if (!bacmp(&d->bdaddr, src)) {
634                                 hdev = d; break;
635                         }
636                 } else {
637                         if (bacmp(&d->bdaddr, dst)) {
638                                 hdev = d; break;
639                         }
640                 }
641         }
642
643         if (hdev)
644                 hdev = hci_dev_hold(hdev);
645
646         read_unlock(&hci_dev_list_lock);
647         return hdev;
648 }
649 EXPORT_SYMBOL(hci_get_route);
650
651 /* This function requires the caller holds hdev->lock */
652 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
653 {
654         struct hci_dev *hdev = conn->hdev;
655         struct hci_conn_params *params;
656
657         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
658                                            conn->dst_type);
659         if (params && params->conn) {
660                 hci_conn_drop(params->conn);
661                 hci_conn_put(params->conn);
662                 params->conn = NULL;
663         }
664
665         conn->state = BT_CLOSED;
666
667         mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
668                             status);
669
670         hci_connect_cfm(conn, status);
671
672         hci_conn_del(conn);
673
674         /* Since we may have temporarily stopped the background scanning in
675          * favor of connection establishment, we should restart it.
676          */
677         hci_update_background_scan(hdev);
678
679         /* Re-enable advertising in case this was a failed connection
680          * attempt as a peripheral.
681          */
682         mgmt_reenable_advertising(hdev);
683 }
684
685 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
686 {
687         struct hci_conn *conn;
688
689         hci_dev_lock(hdev);
690
691         conn = hci_lookup_le_connect(hdev);
692
693         if (!status) {
694                 hci_connect_le_scan_cleanup(conn);
695                 goto done;
696         }
697
698         BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
699                status);
700
701         if (!conn)
702                 goto done;
703
704         hci_le_conn_failed(conn, status);
705
706 done:
707         hci_dev_unlock(hdev);
708 }
709
710 static void hci_req_add_le_create_conn(struct hci_request *req,
711                                        struct hci_conn *conn)
712 {
713         struct hci_cp_le_create_conn cp;
714         struct hci_dev *hdev = conn->hdev;
715         u8 own_addr_type;
716
717         memset(&cp, 0, sizeof(cp));
718
719         /* Update random address, but set require_privacy to false so
720          * that we never connect with an non-resolvable address.
721          */
722         if (hci_update_random_address(req, false, &own_addr_type))
723                 return;
724
725         /* Set window to be the same value as the interval to enable
726          * continuous scanning.
727          */
728         cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
729         cp.scan_window = cp.scan_interval;
730
731         bacpy(&cp.peer_addr, &conn->dst);
732         cp.peer_addr_type = conn->dst_type;
733         cp.own_address_type = own_addr_type;
734         cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
735         cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
736         cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
737         cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
738         cp.min_ce_len = cpu_to_le16(0x0000);
739         cp.max_ce_len = cpu_to_le16(0x0000);
740
741         hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
742
743         conn->state = BT_CONNECT;
744         clear_bit(HCI_CONN_SCANNING, &conn->flags);
745 }
746
747 static void hci_req_directed_advertising(struct hci_request *req,
748                                          struct hci_conn *conn)
749 {
750         struct hci_dev *hdev = req->hdev;
751         struct hci_cp_le_set_adv_param cp;
752         u8 own_addr_type;
753         u8 enable;
754
755         /* Clear the HCI_LE_ADV bit temporarily so that the
756          * hci_update_random_address knows that it's safe to go ahead
757          * and write a new random address. The flag will be set back on
758          * as soon as the SET_ADV_ENABLE HCI command completes.
759          */
760         hci_dev_clear_flag(hdev, HCI_LE_ADV);
761
762         /* Set require_privacy to false so that the remote device has a
763          * chance of identifying us.
764          */
765         if (hci_update_random_address(req, false, &own_addr_type) < 0)
766                 return;
767
768         memset(&cp, 0, sizeof(cp));
769         cp.type = LE_ADV_DIRECT_IND;
770         cp.own_address_type = own_addr_type;
771         cp.direct_addr_type = conn->dst_type;
772         bacpy(&cp.direct_addr, &conn->dst);
773         cp.channel_map = hdev->le_adv_channel_map;
774
775         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
776
777         enable = 0x01;
778         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
779
780         conn->state = BT_CONNECT;
781 }
782
783 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
784                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
785                                 u8 role)
786 {
787         struct hci_conn_params *params;
788         struct hci_conn *conn, *conn_unfinished;
789         struct smp_irk *irk;
790         struct hci_request req;
791         int err;
792
793         /* Let's make sure that le is enabled.*/
794         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
795                 if (lmp_le_capable(hdev))
796                         return ERR_PTR(-ECONNREFUSED);
797
798                 return ERR_PTR(-EOPNOTSUPP);
799         }
800
801         /* Some devices send ATT messages as soon as the physical link is
802          * established. To be able to handle these ATT messages, the user-
803          * space first establishes the connection and then starts the pairing
804          * process.
805          *
806          * So if a hci_conn object already exists for the following connection
807          * attempt, we simply update pending_sec_level and auth_type fields
808          * and return the object found.
809          */
810         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
811         conn_unfinished = NULL;
812         if (conn) {
813                 if (conn->state == BT_CONNECT &&
814                     test_bit(HCI_CONN_SCANNING, &conn->flags)) {
815                         BT_DBG("will continue unfinished conn %pMR", dst);
816                         conn_unfinished = conn;
817                 } else {
818                         if (conn->pending_sec_level < sec_level)
819                                 conn->pending_sec_level = sec_level;
820                         goto done;
821                 }
822         }
823
824         /* Since the controller supports only one LE connection attempt at a
825          * time, we return -EBUSY if there is any connection attempt running.
826          */
827         if (hci_lookup_le_connect(hdev))
828                 return ERR_PTR(-EBUSY);
829
830         /* When given an identity address with existing identity
831          * resolving key, the connection needs to be established
832          * to a resolvable random address.
833          *
834          * Storing the resolvable random address is required here
835          * to handle connection failures. The address will later
836          * be resolved back into the original identity address
837          * from the connect request.
838          */
839         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
840         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
841                 dst = &irk->rpa;
842                 dst_type = ADDR_LE_DEV_RANDOM;
843         }
844
845         if (conn_unfinished) {
846                 conn = conn_unfinished;
847                 bacpy(&conn->dst, dst);
848         } else {
849                 conn = hci_conn_add(hdev, LE_LINK, dst, role);
850         }
851
852         if (!conn)
853                 return ERR_PTR(-ENOMEM);
854
855         conn->dst_type = dst_type;
856         conn->sec_level = BT_SECURITY_LOW;
857         conn->conn_timeout = conn_timeout;
858
859         if (!conn_unfinished)
860                 conn->pending_sec_level = sec_level;
861
862         hci_req_init(&req, hdev);
863
864         /* Disable advertising if we're active. For master role
865          * connections most controllers will refuse to connect if
866          * advertising is enabled, and for slave role connections we
867          * anyway have to disable it in order to start directed
868          * advertising.
869          */
870         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
871                 u8 enable = 0x00;
872                 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
873                             &enable);
874         }
875
876         /* If requested to connect as slave use directed advertising */
877         if (conn->role == HCI_ROLE_SLAVE) {
878                 /* If we're active scanning most controllers are unable
879                  * to initiate advertising. Simply reject the attempt.
880                  */
881                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
882                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
883                         skb_queue_purge(&req.cmd_q);
884                         hci_conn_del(conn);
885                         return ERR_PTR(-EBUSY);
886                 }
887
888                 hci_req_directed_advertising(&req, conn);
889                 goto create_conn;
890         }
891
892         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
893         if (params) {
894                 conn->le_conn_min_interval = params->conn_min_interval;
895                 conn->le_conn_max_interval = params->conn_max_interval;
896                 conn->le_conn_latency = params->conn_latency;
897                 conn->le_supv_timeout = params->supervision_timeout;
898         } else {
899                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
900                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
901                 conn->le_conn_latency = hdev->le_conn_latency;
902                 conn->le_supv_timeout = hdev->le_supv_timeout;
903         }
904
905         /* If controller is scanning, we stop it since some controllers are
906          * not able to scan and connect at the same time. Also set the
907          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
908          * handler for scan disabling knows to set the correct discovery
909          * state.
910          */
911         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
912                 hci_req_add_le_scan_disable(&req);
913                 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
914         }
915
916         hci_req_add_le_create_conn(&req, conn);
917
918 create_conn:
919         err = hci_req_run(&req, create_le_conn_complete);
920         if (err) {
921                 hci_conn_del(conn);
922                 return ERR_PTR(err);
923         }
924
925 done:
926         /* If this is continuation of connect started by hci_connect_le_scan,
927          * it already called hci_conn_hold and calling it again would mess the
928          * counter.
929          */
930         if (!conn_unfinished)
931                 hci_conn_hold(conn);
932
933         return conn;
934 }
935
936 static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
937                                          u16 opcode)
938 {
939         struct hci_conn *conn;
940
941         if (!status)
942                 return;
943
944         BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
945                status);
946
947         hci_dev_lock(hdev);
948
949         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
950         if (conn)
951                 hci_le_conn_failed(conn, status);
952
953         hci_dev_unlock(hdev);
954 }
955
956 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
957 {
958         struct hci_conn *conn;
959
960         conn = hci_conn_hash_lookup_le(hdev, addr, type);
961         if (!conn)
962                 return false;
963
964         if (conn->state != BT_CONNECTED)
965                 return false;
966
967         return true;
968 }
969
970 /* This function requires the caller holds hdev->lock */
971 static int hci_explicit_conn_params_set(struct hci_request *req,
972                                         bdaddr_t *addr, u8 addr_type)
973 {
974         struct hci_dev *hdev = req->hdev;
975         struct hci_conn_params *params;
976
977         if (is_connected(hdev, addr, addr_type))
978                 return -EISCONN;
979
980         params = hci_conn_params_lookup(hdev, addr, addr_type);
981         if (!params) {
982                 params = hci_conn_params_add(hdev, addr, addr_type);
983                 if (!params)
984                         return -ENOMEM;
985
986                 /* If we created new params, mark them to be deleted in
987                  * hci_connect_le_scan_cleanup. It's different case than
988                  * existing disabled params, those will stay after cleanup.
989                  */
990                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
991         }
992
993         /* We're trying to connect, so make sure params are at pend_le_conns */
994         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
995             params->auto_connect == HCI_AUTO_CONN_REPORT ||
996             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
997                 list_del_init(&params->action);
998                 list_add(&params->action, &hdev->pend_le_conns);
999         }
1000
1001         params->explicit_connect = true;
1002         __hci_update_background_scan(req);
1003
1004         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1005                params->auto_connect);
1006
1007         return 0;
1008 }
1009
1010 /* This function requires the caller holds hdev->lock */
1011 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1012                                      u8 dst_type, u8 sec_level,
1013                                      u16 conn_timeout, u8 role)
1014 {
1015         struct hci_conn *conn;
1016         struct hci_request req;
1017         int err;
1018
1019         /* Let's make sure that le is enabled.*/
1020         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1021                 if (lmp_le_capable(hdev))
1022                         return ERR_PTR(-ECONNREFUSED);
1023
1024                 return ERR_PTR(-EOPNOTSUPP);
1025         }
1026
1027         /* Some devices send ATT messages as soon as the physical link is
1028          * established. To be able to handle these ATT messages, the user-
1029          * space first establishes the connection and then starts the pairing
1030          * process.
1031          *
1032          * So if a hci_conn object already exists for the following connection
1033          * attempt, we simply update pending_sec_level and auth_type fields
1034          * and return the object found.
1035          */
1036         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1037         if (conn) {
1038                 if (conn->pending_sec_level < sec_level)
1039                         conn->pending_sec_level = sec_level;
1040                 goto done;
1041         }
1042
1043         BT_DBG("requesting refresh of dst_addr");
1044
1045         conn = hci_conn_add(hdev, LE_LINK, dst, role);
1046         if (!conn)
1047                 return ERR_PTR(-ENOMEM);
1048
1049         hci_req_init(&req, hdev);
1050
1051         if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
1052                 return ERR_PTR(-EBUSY);
1053
1054         conn->state = BT_CONNECT;
1055         set_bit(HCI_CONN_SCANNING, &conn->flags);
1056
1057         err = hci_req_run(&req, hci_connect_le_scan_complete);
1058         if (err && err != -ENODATA) {
1059                 hci_conn_del(conn);
1060                 return ERR_PTR(err);
1061         }
1062
1063         conn->dst_type = dst_type;
1064         conn->sec_level = BT_SECURITY_LOW;
1065         conn->pending_sec_level = sec_level;
1066         conn->conn_timeout = conn_timeout;
1067
1068 done:
1069         hci_conn_hold(conn);
1070         return conn;
1071 }
1072
1073 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1074                                  u8 sec_level, u8 auth_type)
1075 {
1076         struct hci_conn *acl;
1077
1078         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1079                 if (lmp_bredr_capable(hdev))
1080                         return ERR_PTR(-ECONNREFUSED);
1081
1082                 return ERR_PTR(-EOPNOTSUPP);
1083         }
1084
1085         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1086         if (!acl) {
1087                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1088                 if (!acl)
1089                         return ERR_PTR(-ENOMEM);
1090         }
1091
1092         hci_conn_hold(acl);
1093
1094         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1095                 acl->sec_level = BT_SECURITY_LOW;
1096                 acl->pending_sec_level = sec_level;
1097                 acl->auth_type = auth_type;
1098                 hci_acl_create_connection(acl);
1099         }
1100
1101         return acl;
1102 }
1103
1104 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1105                                  __u16 setting)
1106 {
1107         struct hci_conn *acl;
1108         struct hci_conn *sco;
1109
1110         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1111         if (IS_ERR(acl))
1112                 return acl;
1113
1114         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1115         if (!sco) {
1116                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1117                 if (!sco) {
1118                         hci_conn_drop(acl);
1119                         return ERR_PTR(-ENOMEM);
1120                 }
1121         }
1122
1123         acl->link = sco;
1124         sco->link = acl;
1125
1126         hci_conn_hold(sco);
1127
1128         sco->setting = setting;
1129
1130         if (acl->state == BT_CONNECTED &&
1131             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1132                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1133                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1134
1135                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1136                         /* defer SCO setup until mode change completed */
1137                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1138                         return sco;
1139                 }
1140
1141                 hci_sco_setup(acl, 0x00);
1142         }
1143
1144         return sco;
1145 }
1146
1147 /* Check link security requirement */
1148 int hci_conn_check_link_mode(struct hci_conn *conn)
1149 {
1150         BT_DBG("hcon %p", conn);
1151
1152         /* In Secure Connections Only mode, it is required that Secure
1153          * Connections is used and the link is encrypted with AES-CCM
1154          * using a P-256 authenticated combination key.
1155          */
1156         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1157                 if (!hci_conn_sc_enabled(conn) ||
1158                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1159                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1160                         return 0;
1161         }
1162
1163         if (hci_conn_ssp_enabled(conn) &&
1164             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1165                 return 0;
1166
1167         return 1;
1168 }
1169
1170 /* Authenticate remote device */
1171 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1172 {
1173         BT_DBG("hcon %p", conn);
1174
1175         if (conn->pending_sec_level > sec_level)
1176                 sec_level = conn->pending_sec_level;
1177
1178         if (sec_level > conn->sec_level)
1179                 conn->pending_sec_level = sec_level;
1180         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1181                 return 1;
1182
1183         /* Make sure we preserve an existing MITM requirement*/
1184         auth_type |= (conn->auth_type & 0x01);
1185
1186         conn->auth_type = auth_type;
1187
1188         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1189                 struct hci_cp_auth_requested cp;
1190
1191                 cp.handle = cpu_to_le16(conn->handle);
1192                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1193                              sizeof(cp), &cp);
1194
1195                 /* If we're already encrypted set the REAUTH_PEND flag,
1196                  * otherwise set the ENCRYPT_PEND.
1197                  */
1198                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1199                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1200                 else
1201                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1202         }
1203
1204         return 0;
1205 }
1206
1207 /* Encrypt the the link */
1208 static void hci_conn_encrypt(struct hci_conn *conn)
1209 {
1210         BT_DBG("hcon %p", conn);
1211
1212         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1213                 struct hci_cp_set_conn_encrypt cp;
1214                 cp.handle  = cpu_to_le16(conn->handle);
1215                 cp.encrypt = 0x01;
1216                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1217                              &cp);
1218         }
1219 }
1220
1221 /* Enable security */
1222 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1223                       bool initiator)
1224 {
1225         BT_DBG("hcon %p", conn);
1226
1227         if (conn->type == LE_LINK)
1228                 return smp_conn_security(conn, sec_level);
1229
1230         /* For sdp we don't need the link key. */
1231         if (sec_level == BT_SECURITY_SDP)
1232                 return 1;
1233
1234         /* For non 2.1 devices and low security level we don't need the link
1235            key. */
1236         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1237                 return 1;
1238
1239         /* For other security levels we need the link key. */
1240         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1241                 goto auth;
1242
1243         /* An authenticated FIPS approved combination key has sufficient
1244          * security for security level 4. */
1245         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1246             sec_level == BT_SECURITY_FIPS)
1247                 goto encrypt;
1248
1249         /* An authenticated combination key has sufficient security for
1250            security level 3. */
1251         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1252              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1253             sec_level == BT_SECURITY_HIGH)
1254                 goto encrypt;
1255
1256         /* An unauthenticated combination key has sufficient security for
1257            security level 1 and 2. */
1258         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1259              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1260             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1261                 goto encrypt;
1262
1263         /* A combination key has always sufficient security for the security
1264            levels 1 or 2. High security level requires the combination key
1265            is generated using maximum PIN code length (16).
1266            For pre 2.1 units. */
1267         if (conn->key_type == HCI_LK_COMBINATION &&
1268             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1269              conn->pin_length == 16))
1270                 goto encrypt;
1271
1272 auth:
1273         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1274                 return 0;
1275
1276         if (initiator)
1277                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1278
1279         if (!hci_conn_auth(conn, sec_level, auth_type))
1280                 return 0;
1281
1282 encrypt:
1283         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1284                 return 1;
1285
1286         hci_conn_encrypt(conn);
1287         return 0;
1288 }
1289 EXPORT_SYMBOL(hci_conn_security);
1290
1291 /* Check secure link requirement */
1292 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1293 {
1294         BT_DBG("hcon %p", conn);
1295
1296         /* Accept if non-secure or higher security level is required */
1297         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1298                 return 1;
1299
1300         /* Accept if secure or higher security level is already present */
1301         if (conn->sec_level == BT_SECURITY_HIGH ||
1302             conn->sec_level == BT_SECURITY_FIPS)
1303                 return 1;
1304
1305         /* Reject not secure link */
1306         return 0;
1307 }
1308 EXPORT_SYMBOL(hci_conn_check_secure);
1309
1310 /* Switch role */
1311 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1312 {
1313         BT_DBG("hcon %p", conn);
1314
1315         if (role == conn->role)
1316                 return 1;
1317
1318         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1319                 struct hci_cp_switch_role cp;
1320                 bacpy(&cp.bdaddr, &conn->dst);
1321                 cp.role = role;
1322                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1323         }
1324
1325         return 0;
1326 }
1327 EXPORT_SYMBOL(hci_conn_switch_role);
1328
1329 /* Enter active mode */
1330 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1331 {
1332         struct hci_dev *hdev = conn->hdev;
1333
1334         BT_DBG("hcon %p mode %d", conn, conn->mode);
1335
1336         if (conn->mode != HCI_CM_SNIFF)
1337                 goto timer;
1338
1339         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1340                 goto timer;
1341
1342         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1343                 struct hci_cp_exit_sniff_mode cp;
1344                 cp.handle = cpu_to_le16(conn->handle);
1345                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1346         }
1347
1348 timer:
1349         if (hdev->idle_timeout > 0)
1350                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1351                                    msecs_to_jiffies(hdev->idle_timeout));
1352 }
1353
1354 /* Drop all connection on the device */
1355 void hci_conn_hash_flush(struct hci_dev *hdev)
1356 {
1357         struct hci_conn_hash *h = &hdev->conn_hash;
1358         struct hci_conn *c, *n;
1359
1360         BT_DBG("hdev %s", hdev->name);
1361
1362         list_for_each_entry_safe(c, n, &h->list, list) {
1363                 c->state = BT_CLOSED;
1364
1365                 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1366                 hci_conn_del(c);
1367         }
1368 }
1369
1370 /* Check pending connect attempts */
1371 void hci_conn_check_pending(struct hci_dev *hdev)
1372 {
1373         struct hci_conn *conn;
1374
1375         BT_DBG("hdev %s", hdev->name);
1376
1377         hci_dev_lock(hdev);
1378
1379         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1380         if (conn)
1381                 hci_acl_create_connection(conn);
1382
1383         hci_dev_unlock(hdev);
1384 }
1385
1386 static u32 get_link_mode(struct hci_conn *conn)
1387 {
1388         u32 link_mode = 0;
1389
1390         if (conn->role == HCI_ROLE_MASTER)
1391                 link_mode |= HCI_LM_MASTER;
1392
1393         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1394                 link_mode |= HCI_LM_ENCRYPT;
1395
1396         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1397                 link_mode |= HCI_LM_AUTH;
1398
1399         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1400                 link_mode |= HCI_LM_SECURE;
1401
1402         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1403                 link_mode |= HCI_LM_FIPS;
1404
1405         return link_mode;
1406 }
1407
1408 int hci_get_conn_list(void __user *arg)
1409 {
1410         struct hci_conn *c;
1411         struct hci_conn_list_req req, *cl;
1412         struct hci_conn_info *ci;
1413         struct hci_dev *hdev;
1414         int n = 0, size, err;
1415
1416         if (copy_from_user(&req, arg, sizeof(req)))
1417                 return -EFAULT;
1418
1419         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1420                 return -EINVAL;
1421
1422         size = sizeof(req) + req.conn_num * sizeof(*ci);
1423
1424         cl = kmalloc(size, GFP_KERNEL);
1425         if (!cl)
1426                 return -ENOMEM;
1427
1428         hdev = hci_dev_get(req.dev_id);
1429         if (!hdev) {
1430                 kfree(cl);
1431                 return -ENODEV;
1432         }
1433
1434         ci = cl->conn_info;
1435
1436         hci_dev_lock(hdev);
1437         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1438                 bacpy(&(ci + n)->bdaddr, &c->dst);
1439                 (ci + n)->handle = c->handle;
1440                 (ci + n)->type  = c->type;
1441                 (ci + n)->out   = c->out;
1442                 (ci + n)->state = c->state;
1443                 (ci + n)->link_mode = get_link_mode(c);
1444                 if (++n >= req.conn_num)
1445                         break;
1446         }
1447         hci_dev_unlock(hdev);
1448
1449         cl->dev_id = hdev->id;
1450         cl->conn_num = n;
1451         size = sizeof(req) + n * sizeof(*ci);
1452
1453         hci_dev_put(hdev);
1454
1455         err = copy_to_user(arg, cl, size);
1456         kfree(cl);
1457
1458         return err ? -EFAULT : 0;
1459 }
1460
1461 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1462 {
1463         struct hci_conn_info_req req;
1464         struct hci_conn_info ci;
1465         struct hci_conn *conn;
1466         char __user *ptr = arg + sizeof(req);
1467
1468         if (copy_from_user(&req, arg, sizeof(req)))
1469                 return -EFAULT;
1470
1471         hci_dev_lock(hdev);
1472         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1473         if (conn) {
1474                 bacpy(&ci.bdaddr, &conn->dst);
1475                 ci.handle = conn->handle;
1476                 ci.type  = conn->type;
1477                 ci.out   = conn->out;
1478                 ci.state = conn->state;
1479                 ci.link_mode = get_link_mode(conn);
1480         }
1481         hci_dev_unlock(hdev);
1482
1483         if (!conn)
1484                 return -ENOENT;
1485
1486         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1487 }
1488
1489 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1490 {
1491         struct hci_auth_info_req req;
1492         struct hci_conn *conn;
1493
1494         if (copy_from_user(&req, arg, sizeof(req)))
1495                 return -EFAULT;
1496
1497         hci_dev_lock(hdev);
1498         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1499         if (conn)
1500                 req.type = conn->auth_type;
1501         hci_dev_unlock(hdev);
1502
1503         if (!conn)
1504                 return -ENOENT;
1505
1506         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1507 }
1508
1509 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1510 {
1511         struct hci_dev *hdev = conn->hdev;
1512         struct hci_chan *chan;
1513
1514         BT_DBG("%s hcon %p", hdev->name, conn);
1515
1516         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1517                 BT_DBG("Refusing to create new hci_chan");
1518                 return NULL;
1519         }
1520
1521         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1522         if (!chan)
1523                 return NULL;
1524
1525         chan->conn = hci_conn_get(conn);
1526         skb_queue_head_init(&chan->data_q);
1527         chan->state = BT_CONNECTED;
1528
1529         list_add_rcu(&chan->list, &conn->chan_list);
1530
1531         return chan;
1532 }
1533
1534 void hci_chan_del(struct hci_chan *chan)
1535 {
1536         struct hci_conn *conn = chan->conn;
1537         struct hci_dev *hdev = conn->hdev;
1538
1539         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1540
1541         list_del_rcu(&chan->list);
1542
1543         synchronize_rcu();
1544
1545         /* Prevent new hci_chan's to be created for this hci_conn */
1546         set_bit(HCI_CONN_DROP, &conn->flags);
1547
1548         hci_conn_put(conn);
1549
1550         skb_queue_purge(&chan->data_q);
1551         kfree(chan);
1552 }
1553
1554 void hci_chan_list_flush(struct hci_conn *conn)
1555 {
1556         struct hci_chan *chan, *n;
1557
1558         BT_DBG("hcon %p", conn);
1559
1560         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1561                 hci_chan_del(chan);
1562 }
1563
1564 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1565                                                  __u16 handle)
1566 {
1567         struct hci_chan *hchan;
1568
1569         list_for_each_entry(hchan, &hcon->chan_list, list) {
1570                 if (hchan->handle == handle)
1571                         return hchan;
1572         }
1573
1574         return NULL;
1575 }
1576
1577 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1578 {
1579         struct hci_conn_hash *h = &hdev->conn_hash;
1580         struct hci_conn *hcon;
1581         struct hci_chan *hchan = NULL;
1582
1583         rcu_read_lock();
1584
1585         list_for_each_entry_rcu(hcon, &h->list, list) {
1586                 hchan = __hci_chan_lookup_handle(hcon, handle);
1587                 if (hchan)
1588                         break;
1589         }
1590
1591         rcu_read_unlock();
1592
1593         return hchan;
1594 }