blob: 6c12110b75a7bdf7258c832a0b4e8b350cafd5c3 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Johan Hedberg42c6b122013-03-05 20:37:49 +0200144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
500static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
501{
502 if (lmp_ext_inq_capable(hdev))
503 return 0x02;
504
505 if (lmp_inq_rssi_capable(hdev))
506 return 0x01;
507
508 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509 hdev->lmp_subver == 0x0757)
510 return 0x01;
511
512 if (hdev->manufacturer == 15) {
513 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
514 return 0x01;
515 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
516 return 0x01;
517 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
518 return 0x01;
519 }
520
521 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522 hdev->lmp_subver == 0x1805)
523 return 0x01;
524
525 return 0x00;
526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
530 u8 mode;
531
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535}
536
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 struct hci_dev *hdev = req->hdev;
540
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 /* The second byte is 0xff instead of 0x9f (two reserved bits
542 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
543 * command otherwise.
544 */
545 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
546
547 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548 * any event mask for pre 1.2 devices.
549 */
550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
551 return;
552
553 if (lmp_bredr_capable(hdev)) {
554 events[4] |= 0x01; /* Flow Specification Complete */
555 events[4] |= 0x02; /* Inquiry Result with RSSI */
556 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557 events[5] |= 0x08; /* Synchronous Connection Complete */
558 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700559 } else {
560 /* Use a different default for LE-only devices */
561 memset(events, 0, sizeof(events));
562 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700563 events[1] |= 0x08; /* Read Remote Version Information Complete */
564 events[1] |= 0x20; /* Command Complete */
565 events[1] |= 0x40; /* Command Status */
566 events[1] |= 0x80; /* Hardware Error */
567 events[2] |= 0x04; /* Number of Completed Packets */
568 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200569
570 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571 events[0] |= 0x80; /* Encryption Change */
572 events[5] |= 0x80; /* Encryption Key Refresh Complete */
573 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574 }
575
576 if (lmp_inq_rssi_capable(hdev))
577 events[4] |= 0x02; /* Inquiry Result with RSSI */
578
579 if (lmp_sniffsubr_capable(hdev))
580 events[5] |= 0x20; /* Sniff Subrating */
581
582 if (lmp_pause_enc_capable(hdev))
583 events[5] |= 0x80; /* Encryption Key Refresh Complete */
584
585 if (lmp_ext_inq_capable(hdev))
586 events[5] |= 0x40; /* Extended Inquiry Result */
587
588 if (lmp_no_flush_capable(hdev))
589 events[7] |= 0x01; /* Enhanced Flush Complete */
590
591 if (lmp_lsto_capable(hdev))
592 events[6] |= 0x80; /* Link Supervision Timeout Changed */
593
594 if (lmp_ssp_capable(hdev)) {
595 events[6] |= 0x01; /* IO Capability Request */
596 events[6] |= 0x02; /* IO Capability Response */
597 events[6] |= 0x04; /* User Confirmation Request */
598 events[6] |= 0x08; /* User Passkey Request */
599 events[6] |= 0x10; /* Remote OOB Data Request */
600 events[6] |= 0x20; /* Simple Pairing Complete */
601 events[7] |= 0x04; /* User Passkey Notification */
602 events[7] |= 0x08; /* Keypress Notification */
603 events[7] |= 0x10; /* Remote Host Supported
604 * Features Notification
605 */
606 }
607
608 if (lmp_le_capable(hdev))
609 events[7] |= 0x20; /* LE Meta-Event */
610
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612}
613
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200616 struct hci_dev *hdev = req->hdev;
617
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300620 else
621 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622
623 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100626 /* All Bluetooth 1.2 and later controllers should support the
627 * HCI command for reading the local supported commands.
628 *
629 * Unfortunately some controllers indicate Bluetooth 1.2 support,
630 * but do not have support for this command. If that is the case,
631 * the driver can quirk the behavior and skip reading the local
632 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300633 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100634 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
635 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200636 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200637
638 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700639 /* When SSP is available, then the host features page
640 * should also be available as well. However some
641 * controllers list the max_page as 0 as long as SSP
642 * has not been enabled. To achieve proper debugging
643 * output, force the minimum max_page to 1 at least.
644 */
645 hdev->max_page = 0x01;
646
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
648 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
650 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651 } else {
652 struct hci_cp_write_eir cp;
653
654 memset(hdev->eir, 0, sizeof(hdev->eir));
655 memset(&cp, 0, sizeof(cp));
656
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658 }
659 }
660
661 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
664 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200666
667 if (lmp_ext_feat_capable(hdev)) {
668 struct hci_cp_read_local_ext_features cp;
669
670 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
672 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200673 }
674
675 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
676 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
678 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 }
680}
681
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 struct hci_cp_write_def_link_policy cp;
686 u16 link_policy = 0;
687
688 if (lmp_rswitch_capable(hdev))
689 link_policy |= HCI_LP_RSWITCH;
690 if (lmp_hold_capable(hdev))
691 link_policy |= HCI_LP_HOLD;
692 if (lmp_sniff_capable(hdev))
693 link_policy |= HCI_LP_SNIFF;
694 if (lmp_park_capable(hdev))
695 link_policy |= HCI_LP_PARK;
696
697 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699}
700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704 struct hci_cp_write_le_host_supported cp;
705
Johan Hedbergc73eee92013-04-19 18:35:21 +0300706 /* LE-only devices do not support explicit enablement */
707 if (!lmp_bredr_capable(hdev))
708 return;
709
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710 memset(&cp, 0, sizeof(cp));
711
712 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
713 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200714 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200715 }
716
717 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
719 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720}
721
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300722static void hci_set_event_mask_page_2(struct hci_request *req)
723{
724 struct hci_dev *hdev = req->hdev;
725 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
726
727 /* If Connectionless Slave Broadcast master role is supported
728 * enable all necessary events for it.
729 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800730 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300731 events[1] |= 0x40; /* Triggered Clock Capture */
732 events[1] |= 0x80; /* Synchronization Train Complete */
733 events[2] |= 0x10; /* Slave Page Response Timeout */
734 events[2] |= 0x20; /* CSB Channel Map Change */
735 }
736
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
739 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800740 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
745 }
746
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800747 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200748 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800749 events[2] |= 0x80;
750
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300751 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
752}
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200755{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300757 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200758
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200759 hci_setup_event_mask(req);
760
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100761 /* Some Broadcom based Bluetooth controllers do not support the
762 * Delete Stored Link Key command. They are clearly indicating its
763 * absence in the bit mask of supported commands.
764 *
765 * Check the supported commands and only if the the command is marked
766 * as supported send it. If not supported assume that the controller
767 * does not have actual support for stored link keys which makes this
768 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800769 *
770 * Some controllers indicate that they support handling deleting
771 * stored link keys, but they don't. The quirk lets a driver
772 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700773 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800774 if (hdev->commands[6] & 0x80 &&
775 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +0300776 struct hci_cp_delete_stored_link_key cp;
777
778 bacpy(&cp.bdaddr, BDADDR_ANY);
779 cp.delete_all = 0x01;
780 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
781 sizeof(cp), &cp);
782 }
783
Johan Hedberg2177bab2013-03-05 20:37:43 +0200784 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200785 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200786
Marcel Holtmann417287d2014-12-11 20:21:54 +0100787 if (hdev->commands[8] & 0x01)
788 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
789
790 /* Some older Broadcom based Bluetooth 1.2 controllers do not
791 * support the Read Page Scan Type command. Check support for
792 * this command in the bit mask of supported commands.
793 */
794 if (hdev->commands[13] & 0x01)
795 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
796
Andre Guedes9193c6e2014-07-01 18:10:09 -0300797 if (lmp_le_capable(hdev)) {
798 u8 events[8];
799
800 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200801 events[0] = 0x0f;
802
803 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
804 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300805
806 /* If controller supports the Connection Parameters Request
807 * Link Layer Procedure, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
810 events[0] |= 0x20; /* LE Remote Connection
811 * Parameter Request
812 */
813
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100814 /* If the controller supports the Data Length Extension
815 * feature, enable the corresponding event.
816 */
817 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
818 events[0] |= 0x40; /* LE Data Length Change */
819
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100820 /* If the controller supports Extended Scanner Filter
821 * Policies, enable the correspondig event.
822 */
823 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
824 events[1] |= 0x04; /* LE Direct Advertising
825 * Report
826 */
827
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100828 /* If the controller supports the LE Read Local P-256
829 * Public Key command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x02)
832 events[0] |= 0x80; /* LE Read Local P-256
833 * Public Key Complete
834 */
835
836 /* If the controller supports the LE Generate DHKey
837 * command, enable the corresponding event.
838 */
839 if (hdev->commands[34] & 0x04)
840 events[1] |= 0x01; /* LE Generate DHKey Complete */
841
Andre Guedes9193c6e2014-07-01 18:10:09 -0300842 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
843 events);
844
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200845 if (hdev->commands[25] & 0x40) {
846 /* Read LE Advertising Channel TX Power */
847 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
848 }
849
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100850 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
851 /* Read LE Maximum Data Length */
852 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
853
854 /* Read LE Suggested Default Data Length */
855 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
856 }
857
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300859 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300860
861 /* Read features beyond page 1 if available */
862 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
863 struct hci_cp_read_local_ext_features cp;
864
865 cp.page = p;
866 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
867 sizeof(cp), &cp);
868 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869}
870
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300871static void hci_init4_req(struct hci_request *req, unsigned long opt)
872{
873 struct hci_dev *hdev = req->hdev;
874
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300875 /* Set event mask page 2 if the HCI command for it is supported */
876 if (hdev->commands[22] & 0x04)
877 hci_set_event_mask_page_2(req);
878
Marcel Holtmann109e3192014-07-23 19:24:56 +0200879 /* Read local codec list if the HCI command is supported */
880 if (hdev->commands[29] & 0x20)
881 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
882
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200883 /* Get MWS transport configuration if the HCI command is supported */
884 if (hdev->commands[30] & 0x08)
885 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
886
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300887 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800888 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300889 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800890
891 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +0300892 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800893 u8 support = 0x01;
894 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
895 sizeof(support), &support);
896 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300897}
898
Johan Hedberg2177bab2013-03-05 20:37:43 +0200899static int __hci_init(struct hci_dev *hdev)
900{
901 int err;
902
903 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
904 if (err < 0)
905 return err;
906
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700907 /* The Device Under Test (DUT) mode is special and available for
908 * all controller types. So just create it early on.
909 */
910 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
911 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
912 &dut_mode_fops);
913 }
914
Johan Hedberg2177bab2013-03-05 20:37:43 +0200915 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
916 * BR/EDR/LE type controllers. AMP controllers only need the
917 * first stage init.
918 */
919 if (hdev->dev_type != HCI_BREDR)
920 return 0;
921
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
923 if (err < 0)
924 return err;
925
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300926 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
927 if (err < 0)
928 return err;
929
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700930 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
931 if (err < 0)
932 return err;
933
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800934 /* This function is only called when the controller is actually in
935 * configured state. When the controller is marked as unconfigured,
936 * this initialization procedure is not run.
937 *
938 * It means that it is possible that a controller runs through its
939 * setup phase and then discovers missing settings. If that is the
940 * case, then this function will not be called. It then will only
941 * be called during the config phase.
942 *
943 * So only when in setup phase or config phase, create the debugfs
944 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700945 */
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800946 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
947 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700948 return 0;
949
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100950 hci_debugfs_create_common(hdev);
951
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100952 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100953 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700954
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700955 if (lmp_le_capable(hdev)) {
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100956 hci_debugfs_create_le(hdev);
Johan Hedberg711eafe2014-08-08 09:32:52 +0300957 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700958 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700959
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700960 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200961}
962
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200963static void hci_init0_req(struct hci_request *req, unsigned long opt)
964{
965 struct hci_dev *hdev = req->hdev;
966
967 BT_DBG("%s %ld", hdev->name, opt);
968
969 /* Reset */
970 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
971 hci_reset_req(req, 0);
972
973 /* Read Local Version */
974 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
975
976 /* Read BD Address */
977 if (hdev->set_bdaddr)
978 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
979}
980
981static int __hci_unconf_init(struct hci_dev *hdev)
982{
983 int err;
984
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200985 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
986 return 0;
987
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200988 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
989 if (err < 0)
990 return err;
991
992 return 0;
993}
994
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 __u8 scan = opt;
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003}
1004
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006{
1007 __u8 auth = opt;
1008
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
1011 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001012 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
1017 __u8 encrypt = opt;
1018
Johan Hedberg42c6b122013-03-05 20:37:49 +02001019 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001021 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001022 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023}
1024
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001026{
1027 __le16 policy = cpu_to_le16(opt);
1028
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001030
1031 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001033}
1034
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001035/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 * Device is held on return. */
1037struct hci_dev *hci_dev_get(int index)
1038{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001039 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 BT_DBG("%d", index);
1042
1043 if (index < 0)
1044 return NULL;
1045
1046 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001047 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 if (d->id == index) {
1049 hdev = hci_dev_hold(d);
1050 break;
1051 }
1052 }
1053 read_unlock(&hci_dev_list_lock);
1054 return hdev;
1055}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
1057/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001058
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001059bool hci_discovery_active(struct hci_dev *hdev)
1060{
1061 struct discovery_state *discov = &hdev->discovery;
1062
Andre Guedes6fbe1952012-02-03 17:47:58 -03001063 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001064 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001065 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001066 return true;
1067
Andre Guedes6fbe1952012-02-03 17:47:58 -03001068 default:
1069 return false;
1070 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001071}
1072
Johan Hedbergff9ef572012-01-04 14:23:45 +02001073void hci_discovery_set_state(struct hci_dev *hdev, int state)
1074{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001075 int old_state = hdev->discovery.state;
1076
Johan Hedbergff9ef572012-01-04 14:23:45 +02001077 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1078
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001079 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001080 return;
1081
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001082 hdev->discovery.state = state;
1083
Johan Hedbergff9ef572012-01-04 14:23:45 +02001084 switch (state) {
1085 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001086 hci_update_background_scan(hdev);
1087
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001088 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001089 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001090 break;
1091 case DISCOVERY_STARTING:
1092 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001093 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001094 mgmt_discovering(hdev, 1);
1095 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001096 case DISCOVERY_RESOLVING:
1097 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001098 case DISCOVERY_STOPPING:
1099 break;
1100 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001101}
1102
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001103void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
Johan Hedberg30883512012-01-04 14:16:21 +02001105 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001106 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
Johan Hedberg561aafb2012-01-04 13:31:59 +02001108 list_for_each_entry_safe(p, n, &cache->all, all) {
1109 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001110 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001112
1113 INIT_LIST_HEAD(&cache->unknown);
1114 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001117struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1118 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
Johan Hedberg30883512012-01-04 14:16:21 +02001120 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 struct inquiry_entry *e;
1122
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001123 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Johan Hedberg561aafb2012-01-04 13:31:59 +02001125 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001127 return e;
1128 }
1129
1130 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131}
1132
Johan Hedberg561aafb2012-01-04 13:31:59 +02001133struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001134 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001135{
Johan Hedberg30883512012-01-04 14:16:21 +02001136 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001137 struct inquiry_entry *e;
1138
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001139 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001140
1141 list_for_each_entry(e, &cache->unknown, list) {
1142 if (!bacmp(&e->data.bdaddr, bdaddr))
1143 return e;
1144 }
1145
1146 return NULL;
1147}
1148
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001149struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001150 bdaddr_t *bdaddr,
1151 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001152{
1153 struct discovery_state *cache = &hdev->discovery;
1154 struct inquiry_entry *e;
1155
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001156 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001157
1158 list_for_each_entry(e, &cache->resolve, list) {
1159 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1160 return e;
1161 if (!bacmp(&e->data.bdaddr, bdaddr))
1162 return e;
1163 }
1164
1165 return NULL;
1166}
1167
Johan Hedberga3d4e202012-01-09 00:53:02 +02001168void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001169 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001170{
1171 struct discovery_state *cache = &hdev->discovery;
1172 struct list_head *pos = &cache->resolve;
1173 struct inquiry_entry *p;
1174
1175 list_del(&ie->list);
1176
1177 list_for_each_entry(p, &cache->resolve, list) {
1178 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001179 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001180 break;
1181 pos = &p->list;
1182 }
1183
1184 list_add(&ie->list, pos);
1185}
1186
Marcel Holtmannaf589252014-07-01 14:11:20 +02001187u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1188 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg30883512012-01-04 14:16:21 +02001190 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001191 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001192 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001194 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Johan Hedberg6928a922014-10-26 20:46:09 +01001196 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001197
Marcel Holtmannaf589252014-07-01 14:11:20 +02001198 if (!data->ssp_mode)
1199 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001200
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001201 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001202 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001203 if (!ie->data.ssp_mode)
1204 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001205
Johan Hedberga3d4e202012-01-09 00:53:02 +02001206 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001207 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001208 ie->data.rssi = data->rssi;
1209 hci_inquiry_cache_update_resolve(hdev, ie);
1210 }
1211
Johan Hedberg561aafb2012-01-04 13:31:59 +02001212 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001213 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001214
Johan Hedberg561aafb2012-01-04 13:31:59 +02001215 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001216 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001217 if (!ie) {
1218 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1219 goto done;
1220 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001221
1222 list_add(&ie->all, &cache->all);
1223
1224 if (name_known) {
1225 ie->name_state = NAME_KNOWN;
1226 } else {
1227 ie->name_state = NAME_NOT_KNOWN;
1228 list_add(&ie->list, &cache->unknown);
1229 }
1230
1231update:
1232 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001233 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001234 ie->name_state = NAME_KNOWN;
1235 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 }
1237
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001238 memcpy(&ie->data, data, sizeof(*data));
1239 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001241
1242 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001243 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001244
Marcel Holtmannaf589252014-07-01 14:11:20 +02001245done:
1246 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247}
1248
1249static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1250{
Johan Hedberg30883512012-01-04 14:16:21 +02001251 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 struct inquiry_info *info = (struct inquiry_info *) buf;
1253 struct inquiry_entry *e;
1254 int copied = 0;
1255
Johan Hedberg561aafb2012-01-04 13:31:59 +02001256 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001258
1259 if (copied >= num)
1260 break;
1261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 bacpy(&info->bdaddr, &data->bdaddr);
1263 info->pscan_rep_mode = data->pscan_rep_mode;
1264 info->pscan_period_mode = data->pscan_period_mode;
1265 info->pscan_mode = data->pscan_mode;
1266 memcpy(info->dev_class, data->dev_class, 3);
1267 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001270 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
1272
1273 BT_DBG("cache %p, copied %d", cache, copied);
1274 return copied;
1275}
1276
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
1279 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 struct hci_cp_inquiry cp;
1282
1283 BT_DBG("%s", hdev->name);
1284
1285 if (test_bit(HCI_INQUIRY, &hdev->flags))
1286 return;
1287
1288 /* Start Inquiry */
1289 memcpy(&cp.lap, &ir->lap, 3);
1290 cp.length = ir->length;
1291 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
1295int hci_inquiry(void __user *arg)
1296{
1297 __u8 __user *ptr = arg;
1298 struct hci_inquiry_req ir;
1299 struct hci_dev *hdev;
1300 int err = 0, do_inquiry = 0, max_rsp;
1301 long timeo;
1302 __u8 *buf;
1303
1304 if (copy_from_user(&ir, ptr, sizeof(ir)))
1305 return -EFAULT;
1306
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001307 hdev = hci_dev_get(ir.dev_id);
1308 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return -ENODEV;
1310
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001311 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1312 err = -EBUSY;
1313 goto done;
1314 }
1315
Marcel Holtmann4a964402014-07-02 19:10:33 +02001316 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001317 err = -EOPNOTSUPP;
1318 goto done;
1319 }
1320
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001321 if (hdev->dev_type != HCI_BREDR) {
1322 err = -EOPNOTSUPP;
1323 goto done;
1324 }
1325
Johan Hedberg56f87902013-10-02 13:43:13 +03001326 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1327 err = -EOPNOTSUPP;
1328 goto done;
1329 }
1330
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001331 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001332 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001333 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001334 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 do_inquiry = 1;
1336 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001337 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Marcel Holtmann04837f62006-07-03 10:02:33 +02001339 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001340
1341 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001342 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1343 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001344 if (err < 0)
1345 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001346
1347 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1348 * cleared). If it is interrupted by a signal, return -EINTR.
1349 */
NeilBrown74316202014-07-07 15:16:04 +10001350 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001351 TASK_INTERRUPTIBLE))
1352 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001355 /* for unlimited number of responses we will use buffer with
1356 * 255 entries
1357 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1359
1360 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1361 * copy it to the user space.
1362 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001363 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001364 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 err = -ENOMEM;
1366 goto done;
1367 }
1368
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001369 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001371 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 BT_DBG("num_rsp %d", ir.num_rsp);
1374
1375 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1376 ptr += sizeof(ir);
1377 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001378 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001380 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 err = -EFAULT;
1382
1383 kfree(buf);
1384
1385done:
1386 hci_dev_put(hdev);
1387 return err;
1388}
1389
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001390static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 int ret = 0;
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 BT_DBG("%s %p", hdev->name, hdev);
1395
1396 hci_req_lock(hdev);
1397
Johan Hovold94324962012-03-15 14:48:41 +01001398 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1399 ret = -ENODEV;
1400 goto done;
1401 }
1402
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001403 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1404 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001405 /* Check for rfkill but allow the HCI setup stage to
1406 * proceed (which in itself doesn't cause any RF activity).
1407 */
1408 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1409 ret = -ERFKILL;
1410 goto done;
1411 }
1412
1413 /* Check for valid public address or a configured static
1414 * random adddress, but let the HCI setup proceed to
1415 * be able to determine if there is a public address
1416 * or not.
1417 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001418 * In case of user channel usage, it is not important
1419 * if a public address or static random address is
1420 * available.
1421 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001422 * This check is only valid for BR/EDR controllers
1423 * since AMP controllers do not have an address.
1424 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001425 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1426 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001427 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1428 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1429 ret = -EADDRNOTAVAIL;
1430 goto done;
1431 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001432 }
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 if (test_bit(HCI_UP, &hdev->flags)) {
1435 ret = -EALREADY;
1436 goto done;
1437 }
1438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 if (hdev->open(hdev)) {
1440 ret = -EIO;
1441 goto done;
1442 }
1443
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001444 atomic_set(&hdev->cmd_cnt, 1);
1445 set_bit(HCI_INIT, &hdev->flags);
1446
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001447 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1448 if (hdev->setup)
1449 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001450
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001451 /* The transport driver can set these quirks before
1452 * creating the HCI device or in its setup callback.
1453 *
1454 * In case any of them is set, the controller has to
1455 * start up as unconfigured.
1456 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001457 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1458 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001459 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001460
1461 /* For an unconfigured controller it is required to
1462 * read at least the version information provided by
1463 * the Read Local Version Information command.
1464 *
1465 * If the set_bdaddr driver callback is provided, then
1466 * also the original Bluetooth public device address
1467 * will be read using the Read BD Address command.
1468 */
1469 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1470 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001471 }
1472
Marcel Holtmann9713c172014-07-06 12:11:15 +02001473 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1474 /* If public address change is configured, ensure that
1475 * the address gets programmed. If the driver does not
1476 * support changing the public address, fail the power
1477 * on procedure.
1478 */
1479 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1480 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001481 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1482 else
1483 ret = -EADDRNOTAVAIL;
1484 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001485
1486 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001487 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001488 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001489 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
1491
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001492 clear_bit(HCI_INIT, &hdev->flags);
1493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 if (!ret) {
1495 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001496 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 set_bit(HCI_UP, &hdev->flags);
1498 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001499 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001500 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001501 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001502 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001503 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001504 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001505 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001506 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001507 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001508 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001510 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001511 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001512 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
1514 skb_queue_purge(&hdev->cmd_q);
1515 skb_queue_purge(&hdev->rx_q);
1516
1517 if (hdev->flush)
1518 hdev->flush(hdev);
1519
1520 if (hdev->sent_cmd) {
1521 kfree_skb(hdev->sent_cmd);
1522 hdev->sent_cmd = NULL;
1523 }
1524
1525 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001526 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528
1529done:
1530 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return ret;
1532}
1533
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001534/* ---- HCI ioctl helpers ---- */
1535
1536int hci_dev_open(__u16 dev)
1537{
1538 struct hci_dev *hdev;
1539 int err;
1540
1541 hdev = hci_dev_get(dev);
1542 if (!hdev)
1543 return -ENODEV;
1544
Marcel Holtmann4a964402014-07-02 19:10:33 +02001545 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001546 * up as user channel. Trying to bring them up as normal devices
1547 * will result into a failure. Only user channel operation is
1548 * possible.
1549 *
1550 * When this function is called for a user channel, the flag
1551 * HCI_USER_CHANNEL will be set first before attempting to
1552 * open the device.
1553 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001554 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001555 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1556 err = -EOPNOTSUPP;
1557 goto done;
1558 }
1559
Johan Hedberge1d08f42013-10-01 22:44:50 +03001560 /* We need to ensure that no other power on/off work is pending
1561 * before proceeding to call hci_dev_do_open. This is
1562 * particularly important if the setup procedure has not yet
1563 * completed.
1564 */
1565 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1566 cancel_delayed_work(&hdev->power_off);
1567
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001568 /* After this call it is guaranteed that the setup procedure
1569 * has finished. This means that error conditions like RFKILL
1570 * or no valid public or static random address apply.
1571 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001572 flush_workqueue(hdev->req_workqueue);
1573
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001574 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001575 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001576 * so that pairing works for them. Once the management interface
1577 * is in use this bit will be cleared again and userspace has
1578 * to explicitly enable it.
1579 */
1580 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1581 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001582 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001583
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001584 err = hci_dev_do_open(hdev);
1585
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001586done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001587 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001588 return err;
1589}
1590
Johan Hedbergd7347f32014-07-04 12:37:23 +03001591/* This function requires the caller holds hdev->lock */
1592static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1593{
1594 struct hci_conn_params *p;
1595
Johan Hedbergf161dd42014-08-15 21:06:54 +03001596 list_for_each_entry(p, &hdev->le_conn_params, list) {
1597 if (p->conn) {
1598 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001599 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001600 p->conn = NULL;
1601 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001602 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001603 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001604
1605 BT_DBG("All LE pending actions cleared");
1606}
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608static int hci_dev_do_close(struct hci_dev *hdev)
1609{
1610 BT_DBG("%s %p", hdev->name, hdev);
1611
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001612 cancel_delayed_work(&hdev->power_off);
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 hci_req_cancel(hdev, ENODEV);
1615 hci_req_lock(hdev);
1616
1617 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001618 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 hci_req_unlock(hdev);
1620 return 0;
1621 }
1622
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001623 /* Flush RX and TX works */
1624 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001625 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001627 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001628 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001629 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001630 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001631 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001632 }
1633
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001634 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001635 cancel_delayed_work(&hdev->service_cache);
1636
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001637 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001638
1639 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1640 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001641
Johan Hedberg76727c02014-11-18 09:00:14 +02001642 /* Avoid potential lockdep warnings from the *_flush() calls by
1643 * ensuring the workqueue is empty up front.
1644 */
1645 drain_workqueue(hdev->workqueue);
1646
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001647 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001648
1649 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1650 if (hdev->dev_type == HCI_BREDR)
1651 mgmt_powered(hdev, 0);
1652 }
1653
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001654 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001655 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001656 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001657 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 hci_notify(hdev, HCI_DEV_DOWN);
1660
1661 if (hdev->flush)
1662 hdev->flush(hdev);
1663
1664 /* Reset device */
1665 skb_queue_purge(&hdev->cmd_q);
1666 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001667 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1668 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001669 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001671 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 clear_bit(HCI_INIT, &hdev->flags);
1673 }
1674
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001675 /* flush cmd work */
1676 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 /* Drop queues */
1679 skb_queue_purge(&hdev->rx_q);
1680 skb_queue_purge(&hdev->cmd_q);
1681 skb_queue_purge(&hdev->raw_q);
1682
1683 /* Drop last sent command */
1684 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001685 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 kfree_skb(hdev->sent_cmd);
1687 hdev->sent_cmd = NULL;
1688 }
1689
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001690 kfree_skb(hdev->recv_evt);
1691 hdev->recv_evt = NULL;
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 /* After this point our queues are empty
1694 * and no tasks are scheduled. */
1695 hdev->close(hdev);
1696
Johan Hedberg35b973c2013-03-15 17:06:59 -05001697 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001698 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001699 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1700
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001701 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001702 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001703
Johan Hedberge59fda82012-02-22 18:11:53 +02001704 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001705 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001706 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 hci_req_unlock(hdev);
1709
1710 hci_dev_put(hdev);
1711 return 0;
1712}
1713
1714int hci_dev_close(__u16 dev)
1715{
1716 struct hci_dev *hdev;
1717 int err;
1718
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001719 hdev = hci_dev_get(dev);
1720 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1724 err = -EBUSY;
1725 goto done;
1726 }
1727
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001728 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1729 cancel_delayed_work(&hdev->power_off);
1730
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001732
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001733done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 hci_dev_put(hdev);
1735 return err;
1736}
1737
1738int hci_dev_reset(__u16 dev)
1739{
1740 struct hci_dev *hdev;
1741 int ret = 0;
1742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001743 hdev = hci_dev_get(dev);
1744 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 return -ENODEV;
1746
1747 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Marcel Holtmann808a0492013-08-26 20:57:58 -07001749 if (!test_bit(HCI_UP, &hdev->flags)) {
1750 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1755 ret = -EBUSY;
1756 goto done;
1757 }
1758
Marcel Holtmann4a964402014-07-02 19:10:33 +02001759 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001760 ret = -EOPNOTSUPP;
1761 goto done;
1762 }
1763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 /* Drop queues */
1765 skb_queue_purge(&hdev->rx_q);
1766 skb_queue_purge(&hdev->cmd_q);
1767
Johan Hedberg76727c02014-11-18 09:00:14 +02001768 /* Avoid potential lockdep warnings from the *_flush() calls by
1769 * ensuring the workqueue is empty up front.
1770 */
1771 drain_workqueue(hdev->workqueue);
1772
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001773 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001774 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001776 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
1778 if (hdev->flush)
1779 hdev->flush(hdev);
1780
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001781 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001782 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001784 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
1786done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 hci_req_unlock(hdev);
1788 hci_dev_put(hdev);
1789 return ret;
1790}
1791
1792int hci_dev_reset_stat(__u16 dev)
1793{
1794 struct hci_dev *hdev;
1795 int ret = 0;
1796
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001797 hdev = hci_dev_get(dev);
1798 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return -ENODEV;
1800
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001801 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1802 ret = -EBUSY;
1803 goto done;
1804 }
1805
Marcel Holtmann4a964402014-07-02 19:10:33 +02001806 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001807 ret = -EOPNOTSUPP;
1808 goto done;
1809 }
1810
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1812
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001813done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 return ret;
1816}
1817
Johan Hedberg123abc02014-07-10 12:09:07 +03001818static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1819{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001820 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001821
1822 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1823
1824 if ((scan & SCAN_PAGE))
1825 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1826 &hdev->dev_flags);
1827 else
1828 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1829 &hdev->dev_flags);
1830
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001831 if ((scan & SCAN_INQUIRY)) {
1832 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1833 &hdev->dev_flags);
1834 } else {
1835 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1836 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1837 &hdev->dev_flags);
1838 }
1839
Johan Hedberg123abc02014-07-10 12:09:07 +03001840 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1841 return;
1842
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001843 if (conn_changed || discov_changed) {
1844 /* In case this was disabled through mgmt */
1845 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1846
1847 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1848 mgmt_update_adv_data(hdev);
1849
Johan Hedberg123abc02014-07-10 12:09:07 +03001850 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001851 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001852}
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854int hci_dev_cmd(unsigned int cmd, void __user *arg)
1855{
1856 struct hci_dev *hdev;
1857 struct hci_dev_req dr;
1858 int err = 0;
1859
1860 if (copy_from_user(&dr, arg, sizeof(dr)))
1861 return -EFAULT;
1862
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001863 hdev = hci_dev_get(dr.dev_id);
1864 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 return -ENODEV;
1866
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001867 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1868 err = -EBUSY;
1869 goto done;
1870 }
1871
Marcel Holtmann4a964402014-07-02 19:10:33 +02001872 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001873 err = -EOPNOTSUPP;
1874 goto done;
1875 }
1876
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001877 if (hdev->dev_type != HCI_BREDR) {
1878 err = -EOPNOTSUPP;
1879 goto done;
1880 }
1881
Johan Hedberg56f87902013-10-02 13:43:13 +03001882 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1883 err = -EOPNOTSUPP;
1884 goto done;
1885 }
1886
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 switch (cmd) {
1888 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001889 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1890 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 break;
1892
1893 case HCISETENCRYPT:
1894 if (!lmp_encrypt_capable(hdev)) {
1895 err = -EOPNOTSUPP;
1896 break;
1897 }
1898
1899 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1900 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001901 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1902 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 if (err)
1904 break;
1905 }
1906
Johan Hedberg01178cd2013-03-05 20:37:41 +02001907 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1908 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 break;
1910
1911 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001912 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1913 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001914
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001915 /* Ensure that the connectable and discoverable states
1916 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001917 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001918 if (!err)
1919 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 break;
1921
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001922 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001923 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1924 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001925 break;
1926
1927 case HCISETLINKMODE:
1928 hdev->link_mode = ((__u16) dr.dev_opt) &
1929 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1930 break;
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 case HCISETPTYPE:
1933 hdev->pkt_type = (__u16) dr.dev_opt;
1934 break;
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001937 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1938 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 break;
1940
1941 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001942 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1943 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 break;
1945
1946 default:
1947 err = -EINVAL;
1948 break;
1949 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001950
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001951done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 hci_dev_put(hdev);
1953 return err;
1954}
1955
1956int hci_get_dev_list(void __user *arg)
1957{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001958 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 struct hci_dev_list_req *dl;
1960 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 int n = 0, size, err;
1962 __u16 dev_num;
1963
1964 if (get_user(dev_num, (__u16 __user *) arg))
1965 return -EFAULT;
1966
1967 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1968 return -EINVAL;
1969
1970 size = sizeof(*dl) + dev_num * sizeof(*dr);
1971
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001972 dl = kzalloc(size, GFP_KERNEL);
1973 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 return -ENOMEM;
1975
1976 dr = dl->dev_req;
1977
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001978 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001979 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001980 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001981
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001982 /* When the auto-off is configured it means the transport
1983 * is running, but in that case still indicate that the
1984 * device is actually down.
1985 */
1986 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1987 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001990 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 if (++n >= dev_num)
1993 break;
1994 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001995 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
1997 dl->dev_num = n;
1998 size = sizeof(*dl) + n * sizeof(*dr);
1999
2000 err = copy_to_user(arg, dl, size);
2001 kfree(dl);
2002
2003 return err ? -EFAULT : 0;
2004}
2005
2006int hci_get_dev_info(void __user *arg)
2007{
2008 struct hci_dev *hdev;
2009 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002010 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 int err = 0;
2012
2013 if (copy_from_user(&di, arg, sizeof(di)))
2014 return -EFAULT;
2015
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002016 hdev = hci_dev_get(di.dev_id);
2017 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 return -ENODEV;
2019
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002020 /* When the auto-off is configured it means the transport
2021 * is running, but in that case still indicate that the
2022 * device is actually down.
2023 */
2024 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2025 flags = hdev->flags & ~BIT(HCI_UP);
2026 else
2027 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 strcpy(di.name, hdev->name);
2030 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002031 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002032 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002034 if (lmp_bredr_capable(hdev)) {
2035 di.acl_mtu = hdev->acl_mtu;
2036 di.acl_pkts = hdev->acl_pkts;
2037 di.sco_mtu = hdev->sco_mtu;
2038 di.sco_pkts = hdev->sco_pkts;
2039 } else {
2040 di.acl_mtu = hdev->le_mtu;
2041 di.acl_pkts = hdev->le_pkts;
2042 di.sco_mtu = 0;
2043 di.sco_pkts = 0;
2044 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 di.link_policy = hdev->link_policy;
2046 di.link_mode = hdev->link_mode;
2047
2048 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2049 memcpy(&di.features, &hdev->features, sizeof(di.features));
2050
2051 if (copy_to_user(arg, &di, sizeof(di)))
2052 err = -EFAULT;
2053
2054 hci_dev_put(hdev);
2055
2056 return err;
2057}
2058
2059/* ---- Interface to HCI drivers ---- */
2060
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002061static int hci_rfkill_set_block(void *data, bool blocked)
2062{
2063 struct hci_dev *hdev = data;
2064
2065 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2066
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002067 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2068 return -EBUSY;
2069
Johan Hedberg5e130362013-09-13 08:58:17 +03002070 if (blocked) {
2071 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002072 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2073 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002074 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002075 } else {
2076 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002077 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002078
2079 return 0;
2080}
2081
2082static const struct rfkill_ops hci_rfkill_ops = {
2083 .set_block = hci_rfkill_set_block,
2084};
2085
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002086static void hci_power_on(struct work_struct *work)
2087{
2088 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002089 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002090
2091 BT_DBG("%s", hdev->name);
2092
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002093 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002094 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302095 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002096 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302097 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002098 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002099 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002100
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002101 /* During the HCI setup phase, a few error conditions are
2102 * ignored and they need to be checked now. If they are still
2103 * valid, it is important to turn the device back off.
2104 */
2105 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002106 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002107 (hdev->dev_type == HCI_BREDR &&
2108 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2109 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002110 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2111 hci_dev_do_close(hdev);
2112 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002113 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2114 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002115 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002116
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002117 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002118 /* For unconfigured devices, set the HCI_RAW flag
2119 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002120 */
2121 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2122 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002123
2124 /* For fully configured devices, this will send
2125 * the Index Added event. For unconfigured devices,
2126 * it will send Unconfigued Index Added event.
2127 *
2128 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2129 * and no event will be send.
2130 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002131 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002132 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002133 /* When the controller is now configured, then it
2134 * is important to clear the HCI_RAW flag.
2135 */
2136 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2137 clear_bit(HCI_RAW, &hdev->flags);
2138
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002139 /* Powering on the controller with HCI_CONFIG set only
2140 * happens with the transition from unconfigured to
2141 * configured. This will send the Index Added event.
2142 */
2143 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002144 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002145}
2146
2147static void hci_power_off(struct work_struct *work)
2148{
Johan Hedberg32435532011-11-07 22:16:04 +02002149 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002150 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002151
2152 BT_DBG("%s", hdev->name);
2153
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002154 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002155}
2156
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002157static void hci_discov_off(struct work_struct *work)
2158{
2159 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002160
2161 hdev = container_of(work, struct hci_dev, discov_off.work);
2162
2163 BT_DBG("%s", hdev->name);
2164
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002165 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002166}
2167
Johan Hedberg35f74982014-02-18 17:14:32 +02002168void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002169{
Johan Hedberg48210022013-01-27 00:31:28 +02002170 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002171
Johan Hedberg48210022013-01-27 00:31:28 +02002172 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2173 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002174 kfree(uuid);
2175 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002176}
2177
Johan Hedberg35f74982014-02-18 17:14:32 +02002178void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002179{
Johan Hedberg0378b592014-11-19 15:22:22 +02002180 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002181
Johan Hedberg0378b592014-11-19 15:22:22 +02002182 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2183 list_del_rcu(&key->list);
2184 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002185 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002186}
2187
Johan Hedberg35f74982014-02-18 17:14:32 +02002188void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002189{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002190 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002191
Johan Hedberg970d0f12014-11-13 14:37:47 +02002192 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2193 list_del_rcu(&k->list);
2194 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002195 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002196}
2197
Johan Hedberg970c4e42014-02-18 10:19:33 +02002198void hci_smp_irks_clear(struct hci_dev *hdev)
2199{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002200 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002201
Johan Hedbergadae20c2014-11-13 14:37:48 +02002202 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2203 list_del_rcu(&k->list);
2204 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002205 }
2206}
2207
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002208struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2209{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002210 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002211
Johan Hedberg0378b592014-11-19 15:22:22 +02002212 rcu_read_lock();
2213 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2214 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2215 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002216 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002217 }
2218 }
2219 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002220
2221 return NULL;
2222}
2223
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302224static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002225 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002226{
2227 /* Legacy key */
2228 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302229 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002230
2231 /* Debug keys are insecure so don't store them persistently */
2232 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302233 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002234
2235 /* Changed combination key and there's no previous one */
2236 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302237 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002238
2239 /* Security mode 3 case */
2240 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302241 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002242
Johan Hedberge3befab2014-06-01 16:33:39 +03002243 /* BR/EDR key derived using SC from an LE link */
2244 if (conn->type == LE_LINK)
2245 return true;
2246
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002247 /* Neither local nor remote side had no-bonding as requirement */
2248 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302249 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002250
2251 /* Local side had dedicated bonding as requirement */
2252 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302253 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002254
2255 /* Remote side had dedicated bonding as requirement */
2256 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302257 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002258
2259 /* If none of the above criteria match, then don't store the key
2260 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302261 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002262}
2263
Johan Hedberge804d252014-07-16 11:42:28 +03002264static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002265{
Johan Hedberge804d252014-07-16 11:42:28 +03002266 if (type == SMP_LTK)
2267 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002268
Johan Hedberge804d252014-07-16 11:42:28 +03002269 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002270}
2271
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002272struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002274{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002275 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002276
Johan Hedberg970d0f12014-11-13 14:37:47 +02002277 rcu_read_lock();
2278 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002279 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2280 continue;
2281
Johan Hedberg923e2412014-12-03 12:43:39 +02002282 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002283 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002284 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002285 }
2286 }
2287 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002288
2289 return NULL;
2290}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002291
Johan Hedberg970c4e42014-02-18 10:19:33 +02002292struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2293{
2294 struct smp_irk *irk;
2295
Johan Hedbergadae20c2014-11-13 14:37:48 +02002296 rcu_read_lock();
2297 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2298 if (!bacmp(&irk->rpa, rpa)) {
2299 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002300 return irk;
2301 }
2302 }
2303
Johan Hedbergadae20c2014-11-13 14:37:48 +02002304 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2305 if (smp_irk_matches(hdev, irk->val, rpa)) {
2306 bacpy(&irk->rpa, rpa);
2307 rcu_read_unlock();
2308 return irk;
2309 }
2310 }
2311 rcu_read_unlock();
2312
Johan Hedberg970c4e42014-02-18 10:19:33 +02002313 return NULL;
2314}
2315
2316struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2317 u8 addr_type)
2318{
2319 struct smp_irk *irk;
2320
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002321 /* Identity Address must be public or static random */
2322 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2323 return NULL;
2324
Johan Hedbergadae20c2014-11-13 14:37:48 +02002325 rcu_read_lock();
2326 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002327 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002328 bacmp(bdaddr, &irk->bdaddr) == 0) {
2329 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002330 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002331 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002332 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002333 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002334
2335 return NULL;
2336}
2337
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002338struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002339 bdaddr_t *bdaddr, u8 *val, u8 type,
2340 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002341{
2342 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302343 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002344
2345 old_key = hci_find_link_key(hdev, bdaddr);
2346 if (old_key) {
2347 old_key_type = old_key->type;
2348 key = old_key;
2349 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002350 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002351 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002352 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002353 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002354 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355 }
2356
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002357 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002358
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002359 /* Some buggy controller combinations generate a changed
2360 * combination key for legacy pairing even when there's no
2361 * previous key */
2362 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002363 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002364 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002365 if (conn)
2366 conn->key_type = type;
2367 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002368
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002369 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002370 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002371 key->pin_len = pin_len;
2372
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002373 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002374 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002375 else
2376 key->type = type;
2377
Johan Hedberg7652ff62014-06-24 13:15:49 +03002378 if (persistent)
2379 *persistent = hci_persistent_key(hdev, conn, type,
2380 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002381
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002382 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002383}
2384
Johan Hedbergca9142b2014-02-19 14:57:44 +02002385struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002386 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002387 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002388{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002389 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002390 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002391
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002392 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002393 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002394 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002395 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002396 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002397 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002398 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002399 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002400 }
2401
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002402 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002403 key->bdaddr_type = addr_type;
2404 memcpy(key->val, tk, sizeof(key->val));
2405 key->authenticated = authenticated;
2406 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002407 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002408 key->enc_size = enc_size;
2409 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002410
Johan Hedbergca9142b2014-02-19 14:57:44 +02002411 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002412}
2413
Johan Hedbergca9142b2014-02-19 14:57:44 +02002414struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2415 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002416{
2417 struct smp_irk *irk;
2418
2419 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2420 if (!irk) {
2421 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2422 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002423 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002424
2425 bacpy(&irk->bdaddr, bdaddr);
2426 irk->addr_type = addr_type;
2427
Johan Hedbergadae20c2014-11-13 14:37:48 +02002428 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002429 }
2430
2431 memcpy(irk->val, val, 16);
2432 bacpy(&irk->rpa, rpa);
2433
Johan Hedbergca9142b2014-02-19 14:57:44 +02002434 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002435}
2436
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002437int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2438{
2439 struct link_key *key;
2440
2441 key = hci_find_link_key(hdev, bdaddr);
2442 if (!key)
2443 return -ENOENT;
2444
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002445 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002446
Johan Hedberg0378b592014-11-19 15:22:22 +02002447 list_del_rcu(&key->list);
2448 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002449
2450 return 0;
2451}
2452
Johan Hedberge0b2b272014-02-18 17:14:31 +02002453int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002454{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002455 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002456 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002457
Johan Hedberg970d0f12014-11-13 14:37:47 +02002458 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002459 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002460 continue;
2461
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002462 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002463
Johan Hedberg970d0f12014-11-13 14:37:47 +02002464 list_del_rcu(&k->list);
2465 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002466 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002467 }
2468
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002469 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002470}
2471
Johan Hedberga7ec7332014-02-18 17:14:35 +02002472void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2473{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002474 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002475
Johan Hedbergadae20c2014-11-13 14:37:48 +02002476 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002477 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2478 continue;
2479
2480 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2481
Johan Hedbergadae20c2014-11-13 14:37:48 +02002482 list_del_rcu(&k->list);
2483 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002484 }
2485}
2486
Ville Tervo6bd32322011-02-16 16:32:41 +02002487/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002488static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002489{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002490 struct hci_dev *hdev = container_of(work, struct hci_dev,
2491 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002492
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002493 if (hdev->sent_cmd) {
2494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2495 u16 opcode = __le16_to_cpu(sent->opcode);
2496
2497 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2498 } else {
2499 BT_ERR("%s command tx timeout", hdev->name);
2500 }
2501
Ville Tervo6bd32322011-02-16 16:32:41 +02002502 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002503 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002504}
2505
Szymon Janc2763eda2011-03-22 13:12:22 +01002506struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002507 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002508{
2509 struct oob_data *data;
2510
Johan Hedberg6928a922014-10-26 20:46:09 +01002511 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2512 if (bacmp(bdaddr, &data->bdaddr) != 0)
2513 continue;
2514 if (data->bdaddr_type != bdaddr_type)
2515 continue;
2516 return data;
2517 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002518
2519 return NULL;
2520}
2521
Johan Hedberg6928a922014-10-26 20:46:09 +01002522int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2523 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002524{
2525 struct oob_data *data;
2526
Johan Hedberg6928a922014-10-26 20:46:09 +01002527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002528 if (!data)
2529 return -ENOENT;
2530
Johan Hedberg6928a922014-10-26 20:46:09 +01002531 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002532
2533 list_del(&data->list);
2534 kfree(data);
2535
2536 return 0;
2537}
2538
Johan Hedberg35f74982014-02-18 17:14:32 +02002539void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002540{
2541 struct oob_data *data, *n;
2542
2543 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2544 list_del(&data->list);
2545 kfree(data);
2546 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002547}
2548
Marcel Holtmann07988722014-01-10 02:07:29 -08002549int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002550 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002551 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002552{
2553 struct oob_data *data;
2554
Johan Hedberg6928a922014-10-26 20:46:09 +01002555 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002556 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002557 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002558 if (!data)
2559 return -ENOMEM;
2560
2561 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002562 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002563 list_add(&data->list, &hdev->remote_oob_data);
2564 }
2565
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002566 if (hash192 && rand192) {
2567 memcpy(data->hash192, hash192, sizeof(data->hash192));
2568 memcpy(data->rand192, rand192, sizeof(data->rand192));
2569 } else {
2570 memset(data->hash192, 0, sizeof(data->hash192));
2571 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08002572 }
2573
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002574 if (hash256 && rand256) {
2575 memcpy(data->hash256, hash256, sizeof(data->hash256));
2576 memcpy(data->rand256, rand256, sizeof(data->rand256));
2577 } else {
2578 memset(data->hash256, 0, sizeof(data->hash256));
2579 memset(data->rand256, 0, sizeof(data->rand256));
2580 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002581
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002582 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002583
2584 return 0;
2585}
2586
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002587struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002588 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002589{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002590 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002591
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002592 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002593 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002594 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002595 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002596
2597 return NULL;
2598}
2599
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002600void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002601{
2602 struct list_head *p, *n;
2603
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002604 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002605 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002606
2607 list_del(p);
2608 kfree(b);
2609 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002610}
2611
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002612int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002613{
2614 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002615
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002616 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002617 return -EBADF;
2618
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002619 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002620 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002621
Johan Hedberg27f70f32014-07-21 10:50:06 +03002622 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002623 if (!entry)
2624 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002625
2626 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002627 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002628
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002629 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002630
2631 return 0;
2632}
2633
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002634int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002635{
2636 struct bdaddr_list *entry;
2637
Johan Hedberg35f74982014-02-18 17:14:32 +02002638 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002639 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002640 return 0;
2641 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002642
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002643 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002644 if (!entry)
2645 return -ENOENT;
2646
2647 list_del(&entry->list);
2648 kfree(entry);
2649
2650 return 0;
2651}
2652
Andre Guedes15819a72014-02-03 13:56:18 -03002653/* This function requires the caller holds hdev->lock */
2654struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2655 bdaddr_t *addr, u8 addr_type)
2656{
2657 struct hci_conn_params *params;
2658
Johan Hedberg738f6182014-07-03 19:33:51 +03002659 /* The conn params list only contains identity addresses */
2660 if (!hci_is_identity_address(addr, addr_type))
2661 return NULL;
2662
Andre Guedes15819a72014-02-03 13:56:18 -03002663 list_for_each_entry(params, &hdev->le_conn_params, list) {
2664 if (bacmp(&params->addr, addr) == 0 &&
2665 params->addr_type == addr_type) {
2666 return params;
2667 }
2668 }
2669
2670 return NULL;
2671}
2672
2673/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002674struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2675 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002676{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002677 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002678
Johan Hedberg738f6182014-07-03 19:33:51 +03002679 /* The list only contains identity addresses */
2680 if (!hci_is_identity_address(addr, addr_type))
2681 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002682
Johan Hedberg501f8822014-07-04 12:37:26 +03002683 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002684 if (bacmp(&param->addr, addr) == 0 &&
2685 param->addr_type == addr_type)
2686 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002687 }
2688
2689 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002690}
2691
2692/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002693struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2694 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002695{
2696 struct hci_conn_params *params;
2697
Johan Hedbergc46245b2014-07-02 17:37:33 +03002698 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002699 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002700
Andre Guedes15819a72014-02-03 13:56:18 -03002701 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002702 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002703 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002704
2705 params = kzalloc(sizeof(*params), GFP_KERNEL);
2706 if (!params) {
2707 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002708 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002709 }
2710
2711 bacpy(&params->addr, addr);
2712 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002713
2714 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002715 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002716
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002717 params->conn_min_interval = hdev->le_conn_min_interval;
2718 params->conn_max_interval = hdev->le_conn_max_interval;
2719 params->conn_latency = hdev->le_conn_latency;
2720 params->supervision_timeout = hdev->le_supv_timeout;
2721 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2722
2723 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2724
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002725 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002726}
2727
Johan Hedbergf6c63242014-08-15 21:06:59 +03002728static void hci_conn_params_free(struct hci_conn_params *params)
2729{
2730 if (params->conn) {
2731 hci_conn_drop(params->conn);
2732 hci_conn_put(params->conn);
2733 }
2734
2735 list_del(&params->action);
2736 list_del(&params->list);
2737 kfree(params);
2738}
2739
Andre Guedes15819a72014-02-03 13:56:18 -03002740/* This function requires the caller holds hdev->lock */
2741void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2742{
2743 struct hci_conn_params *params;
2744
2745 params = hci_conn_params_lookup(hdev, addr, addr_type);
2746 if (!params)
2747 return;
2748
Johan Hedbergf6c63242014-08-15 21:06:59 +03002749 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002750
Johan Hedberg95305ba2014-07-04 12:37:21 +03002751 hci_update_background_scan(hdev);
2752
Andre Guedes15819a72014-02-03 13:56:18 -03002753 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754}
2755
2756/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002757void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002758{
2759 struct hci_conn_params *params, *tmp;
2760
2761 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002762 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2763 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002764 list_del(&params->list);
2765 kfree(params);
2766 }
2767
Johan Hedberg55af49a82014-07-02 17:37:26 +03002768 BT_DBG("All LE disabled connection parameters were removed");
2769}
2770
2771/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002772void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002773{
2774 struct hci_conn_params *params, *tmp;
2775
Johan Hedbergf6c63242014-08-15 21:06:59 +03002776 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2777 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002778
Johan Hedberga2f41a82014-07-04 12:37:19 +03002779 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002780
Andre Guedes15819a72014-02-03 13:56:18 -03002781 BT_DBG("All LE connection parameters were removed");
2782}
2783
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002784static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002785{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002786 if (status) {
2787 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002788
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002789 hci_dev_lock(hdev);
2790 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2791 hci_dev_unlock(hdev);
2792 return;
2793 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002794}
2795
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002796static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002797{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002798 /* General inquiry access code (GIAC) */
2799 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2800 struct hci_request req;
2801 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002802 int err;
2803
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002804 if (status) {
2805 BT_ERR("Failed to disable LE scanning: status %d", status);
2806 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002807 }
2808
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002809 switch (hdev->discovery.type) {
2810 case DISCOV_TYPE_LE:
2811 hci_dev_lock(hdev);
2812 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2813 hci_dev_unlock(hdev);
2814 break;
2815
2816 case DISCOV_TYPE_INTERLEAVED:
2817 hci_req_init(&req, hdev);
2818
2819 memset(&cp, 0, sizeof(cp));
2820 memcpy(&cp.lap, lap, sizeof(cp.lap));
2821 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2822 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2823
2824 hci_dev_lock(hdev);
2825
2826 hci_inquiry_cache_flush(hdev);
2827
2828 err = hci_req_run(&req, inquiry_complete);
2829 if (err) {
2830 BT_ERR("Inquiry request failed: err %d", err);
2831 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2832 }
2833
2834 hci_dev_unlock(hdev);
2835 break;
2836 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002837}
2838
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002839static void le_scan_disable_work(struct work_struct *work)
2840{
2841 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002842 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002843 struct hci_request req;
2844 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002845
2846 BT_DBG("%s", hdev->name);
2847
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002848 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002849
Andre Guedesb1efcc22014-02-26 20:21:40 -03002850 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002851
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002852 err = hci_req_run(&req, le_scan_disable_work_complete);
2853 if (err)
2854 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002855}
2856
Johan Hedberga1f4c312014-02-27 14:05:41 +02002857/* Copy the Identity Address of the controller.
2858 *
2859 * If the controller has a public BD_ADDR, then by default use that one.
2860 * If this is a LE only controller without a public address, default to
2861 * the static random address.
2862 *
2863 * For debugging purposes it is possible to force controllers with a
2864 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002865 *
2866 * In case BR/EDR has been disabled on a dual-mode controller and
2867 * userspace has configured a static address, then that address
2868 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002869 */
2870void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2871 u8 *bdaddr_type)
2872{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002873 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002874 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2875 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2876 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002877 bacpy(bdaddr, &hdev->static_addr);
2878 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2879 } else {
2880 bacpy(bdaddr, &hdev->bdaddr);
2881 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2882 }
2883}
2884
David Herrmann9be0dab2012-04-22 14:39:57 +02002885/* Alloc HCI device */
2886struct hci_dev *hci_alloc_dev(void)
2887{
2888 struct hci_dev *hdev;
2889
Johan Hedberg27f70f32014-07-21 10:50:06 +03002890 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002891 if (!hdev)
2892 return NULL;
2893
David Herrmannb1b813d2012-04-22 14:39:58 +02002894 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2895 hdev->esco_type = (ESCO_HV1);
2896 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002897 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2898 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002899 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002900 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2901 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002902
David Herrmannb1b813d2012-04-22 14:39:58 +02002903 hdev->sniff_max_interval = 800;
2904 hdev->sniff_min_interval = 80;
2905
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002906 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002907 hdev->le_adv_min_interval = 0x0800;
2908 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002909 hdev->le_scan_interval = 0x0060;
2910 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002911 hdev->le_conn_min_interval = 0x0028;
2912 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002913 hdev->le_conn_latency = 0x0000;
2914 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002915 hdev->le_def_tx_len = 0x001b;
2916 hdev->le_def_tx_time = 0x0148;
2917 hdev->le_max_tx_len = 0x001b;
2918 hdev->le_max_tx_time = 0x0148;
2919 hdev->le_max_rx_len = 0x001b;
2920 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002921
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002922 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002923 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002924 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2925 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002926
David Herrmannb1b813d2012-04-22 14:39:58 +02002927 mutex_init(&hdev->lock);
2928 mutex_init(&hdev->req_lock);
2929
2930 INIT_LIST_HEAD(&hdev->mgmt_pending);
2931 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002932 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002933 INIT_LIST_HEAD(&hdev->uuids);
2934 INIT_LIST_HEAD(&hdev->link_keys);
2935 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002936 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002937 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002938 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002939 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002940 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002941 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002942 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002943
2944 INIT_WORK(&hdev->rx_work, hci_rx_work);
2945 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2946 INIT_WORK(&hdev->tx_work, hci_tx_work);
2947 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002948
David Herrmannb1b813d2012-04-22 14:39:58 +02002949 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2950 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2951 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2952
David Herrmannb1b813d2012-04-22 14:39:58 +02002953 skb_queue_head_init(&hdev->rx_q);
2954 skb_queue_head_init(&hdev->cmd_q);
2955 skb_queue_head_init(&hdev->raw_q);
2956
2957 init_waitqueue_head(&hdev->req_wait_q);
2958
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002959 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02002960
David Herrmannb1b813d2012-04-22 14:39:58 +02002961 hci_init_sysfs(hdev);
2962 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002963
2964 return hdev;
2965}
2966EXPORT_SYMBOL(hci_alloc_dev);
2967
2968/* Free HCI device */
2969void hci_free_dev(struct hci_dev *hdev)
2970{
David Herrmann9be0dab2012-04-22 14:39:57 +02002971 /* will free via device release */
2972 put_device(&hdev->dev);
2973}
2974EXPORT_SYMBOL(hci_free_dev);
2975
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976/* Register HCI device */
2977int hci_register_dev(struct hci_dev *hdev)
2978{
David Herrmannb1b813d2012-04-22 14:39:58 +02002979 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
Marcel Holtmann74292d52014-07-06 15:50:27 +02002981 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 return -EINVAL;
2983
Mat Martineau08add512011-11-02 16:18:36 -07002984 /* Do not allow HCI_AMP devices to register at index 0,
2985 * so the index can be used as the AMP controller ID.
2986 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002987 switch (hdev->dev_type) {
2988 case HCI_BREDR:
2989 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2990 break;
2991 case HCI_AMP:
2992 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2993 break;
2994 default:
2995 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002997
Sasha Levin3df92b32012-05-27 22:36:56 +02002998 if (id < 0)
2999 return id;
3000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 sprintf(hdev->name, "hci%d", id);
3002 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003003
3004 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3005
Kees Cookd8537542013-07-03 15:04:57 -07003006 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3007 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003008 if (!hdev->workqueue) {
3009 error = -ENOMEM;
3010 goto err;
3011 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003012
Kees Cookd8537542013-07-03 15:04:57 -07003013 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3014 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003015 if (!hdev->req_workqueue) {
3016 destroy_workqueue(hdev->workqueue);
3017 error = -ENOMEM;
3018 goto err;
3019 }
3020
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003021 if (!IS_ERR_OR_NULL(bt_debugfs))
3022 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3023
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003024 dev_set_name(&hdev->dev, "%s", hdev->name);
3025
3026 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003027 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003028 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003030 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003031 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3032 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003033 if (hdev->rfkill) {
3034 if (rfkill_register(hdev->rfkill) < 0) {
3035 rfkill_destroy(hdev->rfkill);
3036 hdev->rfkill = NULL;
3037 }
3038 }
3039
Johan Hedberg5e130362013-09-13 08:58:17 +03003040 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3041 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3042
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003043 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003044 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003045
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003046 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003047 /* Assume BR/EDR support until proven otherwise (such as
3048 * through reading supported features during init.
3049 */
3050 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3051 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003052
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003053 write_lock(&hci_dev_list_lock);
3054 list_add(&hdev->list, &hci_dev_list);
3055 write_unlock(&hci_dev_list_lock);
3056
Marcel Holtmann4a964402014-07-02 19:10:33 +02003057 /* Devices that are marked for raw-only usage are unconfigured
3058 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003059 */
3060 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003061 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003062
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003064 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065
Johan Hedberg19202572013-01-14 22:33:51 +02003066 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003067
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003069
David Herrmann33ca9542011-10-08 14:58:49 +02003070err_wqueue:
3071 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003072 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003073err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003074 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003075
David Herrmann33ca9542011-10-08 14:58:49 +02003076 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077}
3078EXPORT_SYMBOL(hci_register_dev);
3079
3080/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003081void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082{
Sasha Levin3df92b32012-05-27 22:36:56 +02003083 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003084
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003085 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Johan Hovold94324962012-03-15 14:48:41 +01003087 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3088
Sasha Levin3df92b32012-05-27 22:36:56 +02003089 id = hdev->id;
3090
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003091 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003093 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094
3095 hci_dev_do_close(hdev);
3096
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303097 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003098 kfree_skb(hdev->reassembly[i]);
3099
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003100 cancel_work_sync(&hdev->power_on);
3101
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003102 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003103 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3104 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003105 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003106 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003107 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003108 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003109
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003110 /* mgmt_index_removed should take care of emptying the
3111 * pending list */
3112 BUG_ON(!list_empty(&hdev->mgmt_pending));
3113
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 hci_notify(hdev, HCI_DEV_UNREG);
3115
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003116 if (hdev->rfkill) {
3117 rfkill_unregister(hdev->rfkill);
3118 rfkill_destroy(hdev->rfkill);
3119 }
3120
Johan Hedberg711eafe2014-08-08 09:32:52 +03003121 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003122
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003123 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003124
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003125 debugfs_remove_recursive(hdev->debugfs);
3126
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003127 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003128 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003129
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003130 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003131 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003132 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003133 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003134 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003135 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003136 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003137 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003138 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003139 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003140 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003141 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003142
David Herrmanndc946bd2012-01-07 15:47:24 +01003143 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003144
3145 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146}
3147EXPORT_SYMBOL(hci_unregister_dev);
3148
3149/* Suspend HCI device */
3150int hci_suspend_dev(struct hci_dev *hdev)
3151{
3152 hci_notify(hdev, HCI_DEV_SUSPEND);
3153 return 0;
3154}
3155EXPORT_SYMBOL(hci_suspend_dev);
3156
3157/* Resume HCI device */
3158int hci_resume_dev(struct hci_dev *hdev)
3159{
3160 hci_notify(hdev, HCI_DEV_RESUME);
3161 return 0;
3162}
3163EXPORT_SYMBOL(hci_resume_dev);
3164
Marcel Holtmann75e05692014-11-02 08:15:38 +01003165/* Reset HCI device */
3166int hci_reset_dev(struct hci_dev *hdev)
3167{
3168 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3169 struct sk_buff *skb;
3170
3171 skb = bt_skb_alloc(3, GFP_ATOMIC);
3172 if (!skb)
3173 return -ENOMEM;
3174
3175 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3176 memcpy(skb_put(skb, 3), hw_err, 3);
3177
3178 /* Send Hardware Error to upper stack */
3179 return hci_recv_frame(hdev, skb);
3180}
3181EXPORT_SYMBOL(hci_reset_dev);
3182
Marcel Holtmann76bca882009-11-18 00:40:39 +01003183/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003184int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003185{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003186 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003187 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003188 kfree_skb(skb);
3189 return -ENXIO;
3190 }
3191
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003192 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003193 bt_cb(skb)->incoming = 1;
3194
3195 /* Time stamp */
3196 __net_timestamp(skb);
3197
Marcel Holtmann76bca882009-11-18 00:40:39 +01003198 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003199 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003200
Marcel Holtmann76bca882009-11-18 00:40:39 +01003201 return 0;
3202}
3203EXPORT_SYMBOL(hci_recv_frame);
3204
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303205static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003206 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303207{
3208 int len = 0;
3209 int hlen = 0;
3210 int remain = count;
3211 struct sk_buff *skb;
3212 struct bt_skb_cb *scb;
3213
3214 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003215 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303216 return -EILSEQ;
3217
3218 skb = hdev->reassembly[index];
3219
3220 if (!skb) {
3221 switch (type) {
3222 case HCI_ACLDATA_PKT:
3223 len = HCI_MAX_FRAME_SIZE;
3224 hlen = HCI_ACL_HDR_SIZE;
3225 break;
3226 case HCI_EVENT_PKT:
3227 len = HCI_MAX_EVENT_SIZE;
3228 hlen = HCI_EVENT_HDR_SIZE;
3229 break;
3230 case HCI_SCODATA_PKT:
3231 len = HCI_MAX_SCO_SIZE;
3232 hlen = HCI_SCO_HDR_SIZE;
3233 break;
3234 }
3235
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003236 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303237 if (!skb)
3238 return -ENOMEM;
3239
3240 scb = (void *) skb->cb;
3241 scb->expect = hlen;
3242 scb->pkt_type = type;
3243
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303244 hdev->reassembly[index] = skb;
3245 }
3246
3247 while (count) {
3248 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003249 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303250
3251 memcpy(skb_put(skb, len), data, len);
3252
3253 count -= len;
3254 data += len;
3255 scb->expect -= len;
3256 remain = count;
3257
3258 switch (type) {
3259 case HCI_EVENT_PKT:
3260 if (skb->len == HCI_EVENT_HDR_SIZE) {
3261 struct hci_event_hdr *h = hci_event_hdr(skb);
3262 scb->expect = h->plen;
3263
3264 if (skb_tailroom(skb) < scb->expect) {
3265 kfree_skb(skb);
3266 hdev->reassembly[index] = NULL;
3267 return -ENOMEM;
3268 }
3269 }
3270 break;
3271
3272 case HCI_ACLDATA_PKT:
3273 if (skb->len == HCI_ACL_HDR_SIZE) {
3274 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3275 scb->expect = __le16_to_cpu(h->dlen);
3276
3277 if (skb_tailroom(skb) < scb->expect) {
3278 kfree_skb(skb);
3279 hdev->reassembly[index] = NULL;
3280 return -ENOMEM;
3281 }
3282 }
3283 break;
3284
3285 case HCI_SCODATA_PKT:
3286 if (skb->len == HCI_SCO_HDR_SIZE) {
3287 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3288 scb->expect = h->dlen;
3289
3290 if (skb_tailroom(skb) < scb->expect) {
3291 kfree_skb(skb);
3292 hdev->reassembly[index] = NULL;
3293 return -ENOMEM;
3294 }
3295 }
3296 break;
3297 }
3298
3299 if (scb->expect == 0) {
3300 /* Complete frame */
3301
3302 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003303 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303304
3305 hdev->reassembly[index] = NULL;
3306 return remain;
3307 }
3308 }
3309
3310 return remain;
3311}
3312
Suraj Sumangala99811512010-07-14 13:02:19 +05303313#define STREAM_REASSEMBLY 0
3314
3315int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3316{
3317 int type;
3318 int rem = 0;
3319
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003320 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303321 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3322
3323 if (!skb) {
3324 struct { char type; } *pkt;
3325
3326 /* Start of the frame */
3327 pkt = data;
3328 type = pkt->type;
3329
3330 data++;
3331 count--;
3332 } else
3333 type = bt_cb(skb)->pkt_type;
3334
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003335 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003336 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303337 if (rem < 0)
3338 return rem;
3339
3340 data += (count - rem);
3341 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003342 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303343
3344 return rem;
3345}
3346EXPORT_SYMBOL(hci_recv_stream_fragment);
3347
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348/* ---- Interface to upper protocols ---- */
3349
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350int hci_register_cb(struct hci_cb *cb)
3351{
3352 BT_DBG("%p name %s", cb, cb->name);
3353
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003354 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003356 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
3358 return 0;
3359}
3360EXPORT_SYMBOL(hci_register_cb);
3361
3362int hci_unregister_cb(struct hci_cb *cb)
3363{
3364 BT_DBG("%p name %s", cb, cb->name);
3365
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003366 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003368 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369
3370 return 0;
3371}
3372EXPORT_SYMBOL(hci_unregister_cb);
3373
Marcel Holtmann51086992013-10-10 14:54:19 -07003374static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003376 int err;
3377
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003378 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003380 /* Time stamp */
3381 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003383 /* Send copy to monitor */
3384 hci_send_to_monitor(hdev, skb);
3385
3386 if (atomic_read(&hdev->promisc)) {
3387 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003388 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 }
3390
3391 /* Get rid of skb owner, prior to sending to the driver. */
3392 skb_orphan(skb);
3393
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003394 err = hdev->send(hdev, skb);
3395 if (err < 0) {
3396 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3397 kfree_skb(skb);
3398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399}
3400
Marcel Holtmann899de762014-07-11 05:51:58 +02003401bool hci_req_pending(struct hci_dev *hdev)
3402{
3403 return (hdev->req_status == HCI_REQ_PEND);
3404}
3405
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003406/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003407int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3408 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003409{
3410 struct sk_buff *skb;
3411
3412 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3413
3414 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3415 if (!skb) {
3416 BT_ERR("%s no memory for command", hdev->name);
3417 return -ENOMEM;
3418 }
3419
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003420 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003421 * single-command requests.
3422 */
3423 bt_cb(skb)->req.start = true;
3424
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003426 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
3428 return 0;
3429}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430
3431/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003432void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433{
3434 struct hci_command_hdr *hdr;
3435
3436 if (!hdev->sent_cmd)
3437 return NULL;
3438
3439 hdr = (void *) hdev->sent_cmd->data;
3440
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003441 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 return NULL;
3443
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003444 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445
3446 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3447}
3448
3449/* Send ACL data */
3450static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3451{
3452 struct hci_acl_hdr *hdr;
3453 int len = skb->len;
3454
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003455 skb_push(skb, HCI_ACL_HDR_SIZE);
3456 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003457 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003458 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3459 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460}
3461
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003462static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003463 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003465 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 struct hci_dev *hdev = conn->hdev;
3467 struct sk_buff *list;
3468
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003469 skb->len = skb_headlen(skb);
3470 skb->data_len = 0;
3471
3472 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003473
3474 switch (hdev->dev_type) {
3475 case HCI_BREDR:
3476 hci_add_acl_hdr(skb, conn->handle, flags);
3477 break;
3478 case HCI_AMP:
3479 hci_add_acl_hdr(skb, chan->handle, flags);
3480 break;
3481 default:
3482 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3483 return;
3484 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003485
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003486 list = skb_shinfo(skb)->frag_list;
3487 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 /* Non fragmented */
3489 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3490
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003491 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 } else {
3493 /* Fragmented */
3494 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3495
3496 skb_shinfo(skb)->frag_list = NULL;
3497
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003498 /* Queue all fragments atomically. We need to use spin_lock_bh
3499 * here because of 6LoWPAN links, as there this function is
3500 * called from softirq and using normal spin lock could cause
3501 * deadlocks.
3502 */
3503 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003505 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003506
3507 flags &= ~ACL_START;
3508 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 do {
3510 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003511
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003512 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003513 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
3515 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3516
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003517 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 } while (list);
3519
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003520 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003522}
3523
3524void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3525{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003526 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003527
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003528 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003529
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003530 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003532 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534
3535/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003536void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537{
3538 struct hci_dev *hdev = conn->hdev;
3539 struct hci_sco_hdr hdr;
3540
3541 BT_DBG("%s len %d", hdev->name, skb->len);
3542
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003543 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 hdr.dlen = skb->len;
3545
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003546 skb_push(skb, HCI_SCO_HDR_SIZE);
3547 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003548 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003550 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003551
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003553 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
3556/* ---- HCI TX task (outgoing data) ---- */
3557
3558/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003559static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3560 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561{
3562 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003563 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003564 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003566 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003568
3569 rcu_read_lock();
3570
3571 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003572 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003574
3575 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3576 continue;
3577
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 num++;
3579
3580 if (c->sent < min) {
3581 min = c->sent;
3582 conn = c;
3583 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003584
3585 if (hci_conn_num(hdev, type) == num)
3586 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 }
3588
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003589 rcu_read_unlock();
3590
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003592 int cnt, q;
3593
3594 switch (conn->type) {
3595 case ACL_LINK:
3596 cnt = hdev->acl_cnt;
3597 break;
3598 case SCO_LINK:
3599 case ESCO_LINK:
3600 cnt = hdev->sco_cnt;
3601 break;
3602 case LE_LINK:
3603 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3604 break;
3605 default:
3606 cnt = 0;
3607 BT_ERR("Unknown link type");
3608 }
3609
3610 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 *quote = q ? q : 1;
3612 } else
3613 *quote = 0;
3614
3615 BT_DBG("conn %p quote %d", conn, *quote);
3616 return conn;
3617}
3618
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003619static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620{
3621 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003622 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623
Ville Tervobae1f5d92011-02-10 22:38:53 -03003624 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003626 rcu_read_lock();
3627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003629 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003630 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003631 BT_ERR("%s killing stalled connection %pMR",
3632 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003633 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 }
3635 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003636
3637 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638}
3639
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003640static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3641 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003642{
3643 struct hci_conn_hash *h = &hdev->conn_hash;
3644 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003645 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003646 struct hci_conn *conn;
3647 int cnt, q, conn_num = 0;
3648
3649 BT_DBG("%s", hdev->name);
3650
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003651 rcu_read_lock();
3652
3653 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003654 struct hci_chan *tmp;
3655
3656 if (conn->type != type)
3657 continue;
3658
3659 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3660 continue;
3661
3662 conn_num++;
3663
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003664 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003665 struct sk_buff *skb;
3666
3667 if (skb_queue_empty(&tmp->data_q))
3668 continue;
3669
3670 skb = skb_peek(&tmp->data_q);
3671 if (skb->priority < cur_prio)
3672 continue;
3673
3674 if (skb->priority > cur_prio) {
3675 num = 0;
3676 min = ~0;
3677 cur_prio = skb->priority;
3678 }
3679
3680 num++;
3681
3682 if (conn->sent < min) {
3683 min = conn->sent;
3684 chan = tmp;
3685 }
3686 }
3687
3688 if (hci_conn_num(hdev, type) == conn_num)
3689 break;
3690 }
3691
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003692 rcu_read_unlock();
3693
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003694 if (!chan)
3695 return NULL;
3696
3697 switch (chan->conn->type) {
3698 case ACL_LINK:
3699 cnt = hdev->acl_cnt;
3700 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003701 case AMP_LINK:
3702 cnt = hdev->block_cnt;
3703 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003704 case SCO_LINK:
3705 case ESCO_LINK:
3706 cnt = hdev->sco_cnt;
3707 break;
3708 case LE_LINK:
3709 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3710 break;
3711 default:
3712 cnt = 0;
3713 BT_ERR("Unknown link type");
3714 }
3715
3716 q = cnt / num;
3717 *quote = q ? q : 1;
3718 BT_DBG("chan %p quote %d", chan, *quote);
3719 return chan;
3720}
3721
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003722static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3723{
3724 struct hci_conn_hash *h = &hdev->conn_hash;
3725 struct hci_conn *conn;
3726 int num = 0;
3727
3728 BT_DBG("%s", hdev->name);
3729
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003730 rcu_read_lock();
3731
3732 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003733 struct hci_chan *chan;
3734
3735 if (conn->type != type)
3736 continue;
3737
3738 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3739 continue;
3740
3741 num++;
3742
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003743 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003744 struct sk_buff *skb;
3745
3746 if (chan->sent) {
3747 chan->sent = 0;
3748 continue;
3749 }
3750
3751 if (skb_queue_empty(&chan->data_q))
3752 continue;
3753
3754 skb = skb_peek(&chan->data_q);
3755 if (skb->priority >= HCI_PRIO_MAX - 1)
3756 continue;
3757
3758 skb->priority = HCI_PRIO_MAX - 1;
3759
3760 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003761 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003762 }
3763
3764 if (hci_conn_num(hdev, type) == num)
3765 break;
3766 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003767
3768 rcu_read_unlock();
3769
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003770}
3771
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003772static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3773{
3774 /* Calculate count of blocks used by this packet */
3775 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3776}
3777
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003778static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003780 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781 /* ACL tx timeout must be longer than maximum
3782 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003783 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003784 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003785 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003787}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003789static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003790{
3791 unsigned int cnt = hdev->acl_cnt;
3792 struct hci_chan *chan;
3793 struct sk_buff *skb;
3794 int quote;
3795
3796 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003797
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003798 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003799 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003800 u32 priority = (skb_peek(&chan->data_q))->priority;
3801 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003802 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003803 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003804
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003805 /* Stop if priority has changed */
3806 if (skb->priority < priority)
3807 break;
3808
3809 skb = skb_dequeue(&chan->data_q);
3810
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003811 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003812 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003813
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003814 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 hdev->acl_last_tx = jiffies;
3816
3817 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003818 chan->sent++;
3819 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 }
3821 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003822
3823 if (cnt != hdev->acl_cnt)
3824 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825}
3826
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003827static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003828{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003829 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003830 struct hci_chan *chan;
3831 struct sk_buff *skb;
3832 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003833 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003834
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003835 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003836
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003837 BT_DBG("%s", hdev->name);
3838
3839 if (hdev->dev_type == HCI_AMP)
3840 type = AMP_LINK;
3841 else
3842 type = ACL_LINK;
3843
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003844 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003845 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003846 u32 priority = (skb_peek(&chan->data_q))->priority;
3847 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3848 int blocks;
3849
3850 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003851 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003852
3853 /* Stop if priority has changed */
3854 if (skb->priority < priority)
3855 break;
3856
3857 skb = skb_dequeue(&chan->data_q);
3858
3859 blocks = __get_blocks(hdev, skb);
3860 if (blocks > hdev->block_cnt)
3861 return;
3862
3863 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003864 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003865
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003866 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003867 hdev->acl_last_tx = jiffies;
3868
3869 hdev->block_cnt -= blocks;
3870 quote -= blocks;
3871
3872 chan->sent += blocks;
3873 chan->conn->sent += blocks;
3874 }
3875 }
3876
3877 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003878 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003879}
3880
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003881static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003882{
3883 BT_DBG("%s", hdev->name);
3884
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003885 /* No ACL link over BR/EDR controller */
3886 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3887 return;
3888
3889 /* No AMP link over AMP controller */
3890 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003891 return;
3892
3893 switch (hdev->flow_ctl_mode) {
3894 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3895 hci_sched_acl_pkt(hdev);
3896 break;
3897
3898 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3899 hci_sched_acl_blk(hdev);
3900 break;
3901 }
3902}
3903
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003905static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906{
3907 struct hci_conn *conn;
3908 struct sk_buff *skb;
3909 int quote;
3910
3911 BT_DBG("%s", hdev->name);
3912
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003913 if (!hci_conn_num(hdev, SCO_LINK))
3914 return;
3915
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3917 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3918 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003919 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920
3921 conn->sent++;
3922 if (conn->sent == ~0)
3923 conn->sent = 0;
3924 }
3925 }
3926}
3927
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003928static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003929{
3930 struct hci_conn *conn;
3931 struct sk_buff *skb;
3932 int quote;
3933
3934 BT_DBG("%s", hdev->name);
3935
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003936 if (!hci_conn_num(hdev, ESCO_LINK))
3937 return;
3938
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003939 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3940 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003941 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3942 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003943 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003944
3945 conn->sent++;
3946 if (conn->sent == ~0)
3947 conn->sent = 0;
3948 }
3949 }
3950}
3951
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003952static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003953{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003954 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003955 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003956 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003957
3958 BT_DBG("%s", hdev->name);
3959
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003960 if (!hci_conn_num(hdev, LE_LINK))
3961 return;
3962
Marcel Holtmann4a964402014-07-02 19:10:33 +02003963 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003964 /* LE tx timeout must be longer than maximum
3965 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003966 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003967 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003968 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003969 }
3970
3971 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003972 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003973 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003974 u32 priority = (skb_peek(&chan->data_q))->priority;
3975 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003976 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003977 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003978
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003979 /* Stop if priority has changed */
3980 if (skb->priority < priority)
3981 break;
3982
3983 skb = skb_dequeue(&chan->data_q);
3984
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003985 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003986 hdev->le_last_tx = jiffies;
3987
3988 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003989 chan->sent++;
3990 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003991 }
3992 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003993
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003994 if (hdev->le_pkts)
3995 hdev->le_cnt = cnt;
3996 else
3997 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003998
3999 if (cnt != tmp)
4000 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004001}
4002
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004003static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004005 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 struct sk_buff *skb;
4007
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004008 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004009 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010
Marcel Holtmann52de5992013-09-03 18:08:38 -07004011 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4012 /* Schedule queues and send stuff to HCI driver */
4013 hci_sched_acl(hdev);
4014 hci_sched_sco(hdev);
4015 hci_sched_esco(hdev);
4016 hci_sched_le(hdev);
4017 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004018
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 /* Send next queued raw (unknown type) packet */
4020 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004021 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022}
4023
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004024/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
4026/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004027static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028{
4029 struct hci_acl_hdr *hdr = (void *) skb->data;
4030 struct hci_conn *conn;
4031 __u16 handle, flags;
4032
4033 skb_pull(skb, HCI_ACL_HDR_SIZE);
4034
4035 handle = __le16_to_cpu(hdr->handle);
4036 flags = hci_flags(handle);
4037 handle = hci_handle(handle);
4038
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004039 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004040 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041
4042 hdev->stat.acl_rx++;
4043
4044 hci_dev_lock(hdev);
4045 conn = hci_conn_hash_lookup_handle(hdev, handle);
4046 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004047
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004049 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004050
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004052 l2cap_recv_acldata(conn, skb, flags);
4053 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004055 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004056 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 }
4058
4059 kfree_skb(skb);
4060}
4061
4062/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004063static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064{
4065 struct hci_sco_hdr *hdr = (void *) skb->data;
4066 struct hci_conn *conn;
4067 __u16 handle;
4068
4069 skb_pull(skb, HCI_SCO_HDR_SIZE);
4070
4071 handle = __le16_to_cpu(hdr->handle);
4072
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004073 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
4075 hdev->stat.sco_rx++;
4076
4077 hci_dev_lock(hdev);
4078 conn = hci_conn_hash_lookup_handle(hdev, handle);
4079 hci_dev_unlock(hdev);
4080
4081 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004083 sco_recv_scodata(conn, skb);
4084 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004086 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004087 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 }
4089
4090 kfree_skb(skb);
4091}
4092
Johan Hedberg9238f362013-03-05 20:37:48 +02004093static bool hci_req_is_complete(struct hci_dev *hdev)
4094{
4095 struct sk_buff *skb;
4096
4097 skb = skb_peek(&hdev->cmd_q);
4098 if (!skb)
4099 return true;
4100
4101 return bt_cb(skb)->req.start;
4102}
4103
Johan Hedberg42c6b122013-03-05 20:37:49 +02004104static void hci_resend_last(struct hci_dev *hdev)
4105{
4106 struct hci_command_hdr *sent;
4107 struct sk_buff *skb;
4108 u16 opcode;
4109
4110 if (!hdev->sent_cmd)
4111 return;
4112
4113 sent = (void *) hdev->sent_cmd->data;
4114 opcode = __le16_to_cpu(sent->opcode);
4115 if (opcode == HCI_OP_RESET)
4116 return;
4117
4118 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4119 if (!skb)
4120 return;
4121
4122 skb_queue_head(&hdev->cmd_q, skb);
4123 queue_work(hdev->workqueue, &hdev->cmd_work);
4124}
4125
Johan Hedberg9238f362013-03-05 20:37:48 +02004126void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4127{
4128 hci_req_complete_t req_complete = NULL;
4129 struct sk_buff *skb;
4130 unsigned long flags;
4131
4132 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4133
Johan Hedberg42c6b122013-03-05 20:37:49 +02004134 /* If the completed command doesn't match the last one that was
4135 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004136 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004137 if (!hci_sent_cmd_data(hdev, opcode)) {
4138 /* Some CSR based controllers generate a spontaneous
4139 * reset complete event during init and any pending
4140 * command will never be completed. In such a case we
4141 * need to resend whatever was the last sent
4142 * command.
4143 */
4144 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4145 hci_resend_last(hdev);
4146
Johan Hedberg9238f362013-03-05 20:37:48 +02004147 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004148 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004149
4150 /* If the command succeeded and there's still more commands in
4151 * this request the request is not yet complete.
4152 */
4153 if (!status && !hci_req_is_complete(hdev))
4154 return;
4155
4156 /* If this was the last command in a request the complete
4157 * callback would be found in hdev->sent_cmd instead of the
4158 * command queue (hdev->cmd_q).
4159 */
4160 if (hdev->sent_cmd) {
4161 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004162
4163 if (req_complete) {
4164 /* We must set the complete callback to NULL to
4165 * avoid calling the callback more than once if
4166 * this function gets called again.
4167 */
4168 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4169
Johan Hedberg9238f362013-03-05 20:37:48 +02004170 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004171 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004172 }
4173
4174 /* Remove all pending commands belonging to this request */
4175 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4176 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4177 if (bt_cb(skb)->req.start) {
4178 __skb_queue_head(&hdev->cmd_q, skb);
4179 break;
4180 }
4181
4182 req_complete = bt_cb(skb)->req.complete;
4183 kfree_skb(skb);
4184 }
4185 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4186
4187call_complete:
4188 if (req_complete)
4189 req_complete(hdev, status);
4190}
4191
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004192static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004194 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 struct sk_buff *skb;
4196
4197 BT_DBG("%s", hdev->name);
4198
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004200 /* Send copy to monitor */
4201 hci_send_to_monitor(hdev, skb);
4202
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203 if (atomic_read(&hdev->promisc)) {
4204 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004205 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 }
4207
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004208 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 kfree_skb(skb);
4210 continue;
4211 }
4212
4213 if (test_bit(HCI_INIT, &hdev->flags)) {
4214 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004215 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 case HCI_ACLDATA_PKT:
4217 case HCI_SCODATA_PKT:
4218 kfree_skb(skb);
4219 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 }
4222
4223 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004224 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004226 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 hci_event_packet(hdev, skb);
4228 break;
4229
4230 case HCI_ACLDATA_PKT:
4231 BT_DBG("%s ACL data packet", hdev->name);
4232 hci_acldata_packet(hdev, skb);
4233 break;
4234
4235 case HCI_SCODATA_PKT:
4236 BT_DBG("%s SCO data packet", hdev->name);
4237 hci_scodata_packet(hdev, skb);
4238 break;
4239
4240 default:
4241 kfree_skb(skb);
4242 break;
4243 }
4244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245}
4246
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004247static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004249 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 struct sk_buff *skb;
4251
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004252 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4253 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004256 if (atomic_read(&hdev->cmd_cnt)) {
4257 skb = skb_dequeue(&hdev->cmd_q);
4258 if (!skb)
4259 return;
4260
Wei Yongjun7585b972009-02-25 18:29:52 +08004261 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004263 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004264 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004266 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004267 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004268 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004269 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004270 schedule_delayed_work(&hdev->cmd_timer,
4271 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 } else {
4273 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004274 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 }
4276 }
4277}