blob: 5ef5221c1813fc43804b7d0e9677e792e3884f6c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Johan Hedberg42c6b122013-03-05 20:37:49 +0200144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
500static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
501{
502 if (lmp_ext_inq_capable(hdev))
503 return 0x02;
504
505 if (lmp_inq_rssi_capable(hdev))
506 return 0x01;
507
508 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509 hdev->lmp_subver == 0x0757)
510 return 0x01;
511
512 if (hdev->manufacturer == 15) {
513 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
514 return 0x01;
515 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
516 return 0x01;
517 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
518 return 0x01;
519 }
520
521 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522 hdev->lmp_subver == 0x1805)
523 return 0x01;
524
525 return 0x00;
526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
530 u8 mode;
531
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535}
536
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 struct hci_dev *hdev = req->hdev;
540
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 /* The second byte is 0xff instead of 0x9f (two reserved bits
542 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
543 * command otherwise.
544 */
545 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
546
547 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548 * any event mask for pre 1.2 devices.
549 */
550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
551 return;
552
553 if (lmp_bredr_capable(hdev)) {
554 events[4] |= 0x01; /* Flow Specification Complete */
555 events[4] |= 0x02; /* Inquiry Result with RSSI */
556 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557 events[5] |= 0x08; /* Synchronous Connection Complete */
558 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700559 } else {
560 /* Use a different default for LE-only devices */
561 memset(events, 0, sizeof(events));
562 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700563 events[1] |= 0x08; /* Read Remote Version Information Complete */
564 events[1] |= 0x20; /* Command Complete */
565 events[1] |= 0x40; /* Command Status */
566 events[1] |= 0x80; /* Hardware Error */
567 events[2] |= 0x04; /* Number of Completed Packets */
568 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200569
570 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571 events[0] |= 0x80; /* Encryption Change */
572 events[5] |= 0x80; /* Encryption Key Refresh Complete */
573 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574 }
575
576 if (lmp_inq_rssi_capable(hdev))
577 events[4] |= 0x02; /* Inquiry Result with RSSI */
578
579 if (lmp_sniffsubr_capable(hdev))
580 events[5] |= 0x20; /* Sniff Subrating */
581
582 if (lmp_pause_enc_capable(hdev))
583 events[5] |= 0x80; /* Encryption Key Refresh Complete */
584
585 if (lmp_ext_inq_capable(hdev))
586 events[5] |= 0x40; /* Extended Inquiry Result */
587
588 if (lmp_no_flush_capable(hdev))
589 events[7] |= 0x01; /* Enhanced Flush Complete */
590
591 if (lmp_lsto_capable(hdev))
592 events[6] |= 0x80; /* Link Supervision Timeout Changed */
593
594 if (lmp_ssp_capable(hdev)) {
595 events[6] |= 0x01; /* IO Capability Request */
596 events[6] |= 0x02; /* IO Capability Response */
597 events[6] |= 0x04; /* User Confirmation Request */
598 events[6] |= 0x08; /* User Passkey Request */
599 events[6] |= 0x10; /* Remote OOB Data Request */
600 events[6] |= 0x20; /* Simple Pairing Complete */
601 events[7] |= 0x04; /* User Passkey Notification */
602 events[7] |= 0x08; /* Keypress Notification */
603 events[7] |= 0x10; /* Remote Host Supported
604 * Features Notification
605 */
606 }
607
608 if (lmp_le_capable(hdev))
609 events[7] |= 0x20; /* LE Meta-Event */
610
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612}
613
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200616 struct hci_dev *hdev = req->hdev;
617
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300620 else
621 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622
623 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100626 /* All Bluetooth 1.2 and later controllers should support the
627 * HCI command for reading the local supported commands.
628 *
629 * Unfortunately some controllers indicate Bluetooth 1.2 support,
630 * but do not have support for this command. If that is the case,
631 * the driver can quirk the behavior and skip reading the local
632 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300633 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100634 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
635 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200636 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200637
638 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700639 /* When SSP is available, then the host features page
640 * should also be available as well. However some
641 * controllers list the max_page as 0 as long as SSP
642 * has not been enabled. To achieve proper debugging
643 * output, force the minimum max_page to 1 at least.
644 */
645 hdev->max_page = 0x01;
646
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
648 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
650 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651 } else {
652 struct hci_cp_write_eir cp;
653
654 memset(hdev->eir, 0, sizeof(hdev->eir));
655 memset(&cp, 0, sizeof(cp));
656
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658 }
659 }
660
661 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
664 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200666
667 if (lmp_ext_feat_capable(hdev)) {
668 struct hci_cp_read_local_ext_features cp;
669
670 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
672 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200673 }
674
675 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
676 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
678 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 }
680}
681
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 struct hci_cp_write_def_link_policy cp;
686 u16 link_policy = 0;
687
688 if (lmp_rswitch_capable(hdev))
689 link_policy |= HCI_LP_RSWITCH;
690 if (lmp_hold_capable(hdev))
691 link_policy |= HCI_LP_HOLD;
692 if (lmp_sniff_capable(hdev))
693 link_policy |= HCI_LP_SNIFF;
694 if (lmp_park_capable(hdev))
695 link_policy |= HCI_LP_PARK;
696
697 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699}
700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704 struct hci_cp_write_le_host_supported cp;
705
Johan Hedbergc73eee92013-04-19 18:35:21 +0300706 /* LE-only devices do not support explicit enablement */
707 if (!lmp_bredr_capable(hdev))
708 return;
709
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710 memset(&cp, 0, sizeof(cp));
711
712 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
713 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200714 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200715 }
716
717 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
719 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720}
721
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300722static void hci_set_event_mask_page_2(struct hci_request *req)
723{
724 struct hci_dev *hdev = req->hdev;
725 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
726
727 /* If Connectionless Slave Broadcast master role is supported
728 * enable all necessary events for it.
729 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800730 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300731 events[1] |= 0x40; /* Triggered Clock Capture */
732 events[1] |= 0x80; /* Synchronization Train Complete */
733 events[2] |= 0x10; /* Slave Page Response Timeout */
734 events[2] |= 0x20; /* CSB Channel Map Change */
735 }
736
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
739 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800740 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
745 }
746
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800747 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200748 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800749 events[2] |= 0x80;
750
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300751 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
752}
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200755{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300757 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200758
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200759 hci_setup_event_mask(req);
760
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100761 /* Some Broadcom based Bluetooth controllers do not support the
762 * Delete Stored Link Key command. They are clearly indicating its
763 * absence in the bit mask of supported commands.
764 *
765 * Check the supported commands and only if the the command is marked
766 * as supported send it. If not supported assume that the controller
767 * does not have actual support for stored link keys which makes this
768 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800769 *
770 * Some controllers indicate that they support handling deleting
771 * stored link keys, but they don't. The quirk lets a driver
772 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700773 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800774 if (hdev->commands[6] & 0x80 &&
775 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +0300776 struct hci_cp_delete_stored_link_key cp;
777
778 bacpy(&cp.bdaddr, BDADDR_ANY);
779 cp.delete_all = 0x01;
780 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
781 sizeof(cp), &cp);
782 }
783
Johan Hedberg2177bab2013-03-05 20:37:43 +0200784 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200785 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200786
Marcel Holtmann417287d2014-12-11 20:21:54 +0100787 if (hdev->commands[8] & 0x01)
788 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
789
790 /* Some older Broadcom based Bluetooth 1.2 controllers do not
791 * support the Read Page Scan Type command. Check support for
792 * this command in the bit mask of supported commands.
793 */
794 if (hdev->commands[13] & 0x01)
795 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
796
Andre Guedes9193c6e2014-07-01 18:10:09 -0300797 if (lmp_le_capable(hdev)) {
798 u8 events[8];
799
800 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200801 events[0] = 0x0f;
802
803 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
804 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300805
806 /* If controller supports the Connection Parameters Request
807 * Link Layer Procedure, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
810 events[0] |= 0x20; /* LE Remote Connection
811 * Parameter Request
812 */
813
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100814 /* If the controller supports the Data Length Extension
815 * feature, enable the corresponding event.
816 */
817 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
818 events[0] |= 0x40; /* LE Data Length Change */
819
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100820 /* If the controller supports Extended Scanner Filter
821 * Policies, enable the correspondig event.
822 */
823 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
824 events[1] |= 0x04; /* LE Direct Advertising
825 * Report
826 */
827
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100828 /* If the controller supports the LE Read Local P-256
829 * Public Key command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x02)
832 events[0] |= 0x80; /* LE Read Local P-256
833 * Public Key Complete
834 */
835
836 /* If the controller supports the LE Generate DHKey
837 * command, enable the corresponding event.
838 */
839 if (hdev->commands[34] & 0x04)
840 events[1] |= 0x01; /* LE Generate DHKey Complete */
841
Andre Guedes9193c6e2014-07-01 18:10:09 -0300842 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
843 events);
844
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200845 if (hdev->commands[25] & 0x40) {
846 /* Read LE Advertising Channel TX Power */
847 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
848 }
849
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100850 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
851 /* Read LE Maximum Data Length */
852 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
853
854 /* Read LE Suggested Default Data Length */
855 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
856 }
857
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300859 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300860
861 /* Read features beyond page 1 if available */
862 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
863 struct hci_cp_read_local_ext_features cp;
864
865 cp.page = p;
866 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
867 sizeof(cp), &cp);
868 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869}
870
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300871static void hci_init4_req(struct hci_request *req, unsigned long opt)
872{
873 struct hci_dev *hdev = req->hdev;
874
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300875 /* Set event mask page 2 if the HCI command for it is supported */
876 if (hdev->commands[22] & 0x04)
877 hci_set_event_mask_page_2(req);
878
Marcel Holtmann109e3192014-07-23 19:24:56 +0200879 /* Read local codec list if the HCI command is supported */
880 if (hdev->commands[29] & 0x20)
881 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
882
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200883 /* Get MWS transport configuration if the HCI command is supported */
884 if (hdev->commands[30] & 0x08)
885 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
886
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300887 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800888 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300889 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800890
891 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +0300892 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800893 u8 support = 0x01;
894 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
895 sizeof(support), &support);
896 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300897}
898
Johan Hedberg2177bab2013-03-05 20:37:43 +0200899static int __hci_init(struct hci_dev *hdev)
900{
901 int err;
902
903 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
904 if (err < 0)
905 return err;
906
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700907 /* The Device Under Test (DUT) mode is special and available for
908 * all controller types. So just create it early on.
909 */
910 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
911 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
912 &dut_mode_fops);
913 }
914
Johan Hedberg2177bab2013-03-05 20:37:43 +0200915 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
916 * BR/EDR/LE type controllers. AMP controllers only need the
917 * first stage init.
918 */
919 if (hdev->dev_type != HCI_BREDR)
920 return 0;
921
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
923 if (err < 0)
924 return err;
925
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300926 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
927 if (err < 0)
928 return err;
929
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700930 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
931 if (err < 0)
932 return err;
933
934 /* Only create debugfs entries during the initial setup
935 * phase and not every time the controller gets powered on.
936 */
937 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
938 return 0;
939
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100940 hci_debugfs_create_common(hdev);
941
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100942 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100943 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700944
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700945 if (lmp_le_capable(hdev)) {
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100946 hci_debugfs_create_le(hdev);
Johan Hedberg711eafe2014-08-08 09:32:52 +0300947 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700948 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700949
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700950 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200951}
952
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200953static void hci_init0_req(struct hci_request *req, unsigned long opt)
954{
955 struct hci_dev *hdev = req->hdev;
956
957 BT_DBG("%s %ld", hdev->name, opt);
958
959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
961 hci_reset_req(req, 0);
962
963 /* Read Local Version */
964 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
965
966 /* Read BD Address */
967 if (hdev->set_bdaddr)
968 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
969}
970
971static int __hci_unconf_init(struct hci_dev *hdev)
972{
973 int err;
974
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
976 return 0;
977
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
979 if (err < 0)
980 return err;
981
982 return 0;
983}
984
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
987 __u8 scan = opt;
988
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993}
994
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 __u8 auth = opt;
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003}
1004
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006{
1007 __u8 encrypt = opt;
1008
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001011 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001012 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001016{
1017 __le16 policy = cpu_to_le16(opt);
1018
Johan Hedberg42c6b122013-03-05 20:37:49 +02001019 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001020
1021 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001022 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001023}
1024
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001025/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 * Device is held on return. */
1027struct hci_dev *hci_dev_get(int index)
1028{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001029 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031 BT_DBG("%d", index);
1032
1033 if (index < 0)
1034 return NULL;
1035
1036 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001037 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 if (d->id == index) {
1039 hdev = hci_dev_hold(d);
1040 break;
1041 }
1042 }
1043 read_unlock(&hci_dev_list_lock);
1044 return hdev;
1045}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001048
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001049bool hci_discovery_active(struct hci_dev *hdev)
1050{
1051 struct discovery_state *discov = &hdev->discovery;
1052
Andre Guedes6fbe1952012-02-03 17:47:58 -03001053 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001054 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001055 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001056 return true;
1057
Andre Guedes6fbe1952012-02-03 17:47:58 -03001058 default:
1059 return false;
1060 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001061}
1062
Johan Hedbergff9ef572012-01-04 14:23:45 +02001063void hci_discovery_set_state(struct hci_dev *hdev, int state)
1064{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001065 int old_state = hdev->discovery.state;
1066
Johan Hedbergff9ef572012-01-04 14:23:45 +02001067 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1068
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001069 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001070 return;
1071
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001072 hdev->discovery.state = state;
1073
Johan Hedbergff9ef572012-01-04 14:23:45 +02001074 switch (state) {
1075 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001076 hci_update_background_scan(hdev);
1077
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001078 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001079 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001080 break;
1081 case DISCOVERY_STARTING:
1082 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001083 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001084 mgmt_discovering(hdev, 1);
1085 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001086 case DISCOVERY_RESOLVING:
1087 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001088 case DISCOVERY_STOPPING:
1089 break;
1090 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001091}
1092
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001093void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094{
Johan Hedberg30883512012-01-04 14:16:21 +02001095 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001096 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Johan Hedberg561aafb2012-01-04 13:31:59 +02001098 list_for_each_entry_safe(p, n, &cache->all, all) {
1099 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001100 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001102
1103 INIT_LIST_HEAD(&cache->unknown);
1104 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105}
1106
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001107struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1108 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109{
Johan Hedberg30883512012-01-04 14:16:21 +02001110 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 struct inquiry_entry *e;
1112
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001113 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Johan Hedberg561aafb2012-01-04 13:31:59 +02001115 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001117 return e;
1118 }
1119
1120 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121}
1122
Johan Hedberg561aafb2012-01-04 13:31:59 +02001123struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001124 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001125{
Johan Hedberg30883512012-01-04 14:16:21 +02001126 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001127 struct inquiry_entry *e;
1128
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001129 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001130
1131 list_for_each_entry(e, &cache->unknown, list) {
1132 if (!bacmp(&e->data.bdaddr, bdaddr))
1133 return e;
1134 }
1135
1136 return NULL;
1137}
1138
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001139struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001140 bdaddr_t *bdaddr,
1141 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001142{
1143 struct discovery_state *cache = &hdev->discovery;
1144 struct inquiry_entry *e;
1145
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001146 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001147
1148 list_for_each_entry(e, &cache->resolve, list) {
1149 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1150 return e;
1151 if (!bacmp(&e->data.bdaddr, bdaddr))
1152 return e;
1153 }
1154
1155 return NULL;
1156}
1157
Johan Hedberga3d4e202012-01-09 00:53:02 +02001158void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001159 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001160{
1161 struct discovery_state *cache = &hdev->discovery;
1162 struct list_head *pos = &cache->resolve;
1163 struct inquiry_entry *p;
1164
1165 list_del(&ie->list);
1166
1167 list_for_each_entry(p, &cache->resolve, list) {
1168 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001169 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001170 break;
1171 pos = &p->list;
1172 }
1173
1174 list_add(&ie->list, pos);
1175}
1176
Marcel Holtmannaf589252014-07-01 14:11:20 +02001177u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1178 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
Johan Hedberg30883512012-01-04 14:16:21 +02001180 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001181 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001182 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001184 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Johan Hedberg6928a922014-10-26 20:46:09 +01001186 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001187
Marcel Holtmannaf589252014-07-01 14:11:20 +02001188 if (!data->ssp_mode)
1189 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001190
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001191 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001192 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001193 if (!ie->data.ssp_mode)
1194 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001195
Johan Hedberga3d4e202012-01-09 00:53:02 +02001196 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001197 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001198 ie->data.rssi = data->rssi;
1199 hci_inquiry_cache_update_resolve(hdev, ie);
1200 }
1201
Johan Hedberg561aafb2012-01-04 13:31:59 +02001202 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001203 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001204
Johan Hedberg561aafb2012-01-04 13:31:59 +02001205 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001206 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001207 if (!ie) {
1208 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1209 goto done;
1210 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001211
1212 list_add(&ie->all, &cache->all);
1213
1214 if (name_known) {
1215 ie->name_state = NAME_KNOWN;
1216 } else {
1217 ie->name_state = NAME_NOT_KNOWN;
1218 list_add(&ie->list, &cache->unknown);
1219 }
1220
1221update:
1222 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001223 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001224 ie->name_state = NAME_KNOWN;
1225 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 }
1227
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001228 memcpy(&ie->data, data, sizeof(*data));
1229 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001231
1232 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001233 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001234
Marcel Holtmannaf589252014-07-01 14:11:20 +02001235done:
1236 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237}
1238
1239static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1240{
Johan Hedberg30883512012-01-04 14:16:21 +02001241 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 struct inquiry_info *info = (struct inquiry_info *) buf;
1243 struct inquiry_entry *e;
1244 int copied = 0;
1245
Johan Hedberg561aafb2012-01-04 13:31:59 +02001246 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001248
1249 if (copied >= num)
1250 break;
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 bacpy(&info->bdaddr, &data->bdaddr);
1253 info->pscan_rep_mode = data->pscan_rep_mode;
1254 info->pscan_period_mode = data->pscan_period_mode;
1255 info->pscan_mode = data->pscan_mode;
1256 memcpy(info->dev_class, data->dev_class, 3);
1257 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001260 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 }
1262
1263 BT_DBG("cache %p, copied %d", cache, copied);
1264 return copied;
1265}
1266
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268{
1269 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 struct hci_cp_inquiry cp;
1272
1273 BT_DBG("%s", hdev->name);
1274
1275 if (test_bit(HCI_INQUIRY, &hdev->flags))
1276 return;
1277
1278 /* Start Inquiry */
1279 memcpy(&cp.lap, &ir->lap, 3);
1280 cp.length = ir->length;
1281 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283}
1284
1285int hci_inquiry(void __user *arg)
1286{
1287 __u8 __user *ptr = arg;
1288 struct hci_inquiry_req ir;
1289 struct hci_dev *hdev;
1290 int err = 0, do_inquiry = 0, max_rsp;
1291 long timeo;
1292 __u8 *buf;
1293
1294 if (copy_from_user(&ir, ptr, sizeof(ir)))
1295 return -EFAULT;
1296
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001297 hdev = hci_dev_get(ir.dev_id);
1298 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 return -ENODEV;
1300
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001301 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1302 err = -EBUSY;
1303 goto done;
1304 }
1305
Marcel Holtmann4a964402014-07-02 19:10:33 +02001306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001307 err = -EOPNOTSUPP;
1308 goto done;
1309 }
1310
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001311 if (hdev->dev_type != HCI_BREDR) {
1312 err = -EOPNOTSUPP;
1313 goto done;
1314 }
1315
Johan Hedberg56f87902013-10-02 13:43:13 +03001316 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1317 err = -EOPNOTSUPP;
1318 goto done;
1319 }
1320
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001321 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001322 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001323 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001324 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 do_inquiry = 1;
1326 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001327 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Marcel Holtmann04837f62006-07-03 10:02:33 +02001329 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001330
1331 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001332 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1333 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001334 if (err < 0)
1335 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001336
1337 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1338 * cleared). If it is interrupted by a signal, return -EINTR.
1339 */
NeilBrown74316202014-07-07 15:16:04 +10001340 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001341 TASK_INTERRUPTIBLE))
1342 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001345 /* for unlimited number of responses we will use buffer with
1346 * 255 entries
1347 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1349
1350 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1351 * copy it to the user space.
1352 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001353 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001354 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 err = -ENOMEM;
1356 goto done;
1357 }
1358
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001359 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001361 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
1363 BT_DBG("num_rsp %d", ir.num_rsp);
1364
1365 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1366 ptr += sizeof(ir);
1367 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001368 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001370 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 err = -EFAULT;
1372
1373 kfree(buf);
1374
1375done:
1376 hci_dev_put(hdev);
1377 return err;
1378}
1379
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001380static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 int ret = 0;
1383
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 BT_DBG("%s %p", hdev->name, hdev);
1385
1386 hci_req_lock(hdev);
1387
Johan Hovold94324962012-03-15 14:48:41 +01001388 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1389 ret = -ENODEV;
1390 goto done;
1391 }
1392
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001393 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1394 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001395 /* Check for rfkill but allow the HCI setup stage to
1396 * proceed (which in itself doesn't cause any RF activity).
1397 */
1398 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1399 ret = -ERFKILL;
1400 goto done;
1401 }
1402
1403 /* Check for valid public address or a configured static
1404 * random adddress, but let the HCI setup proceed to
1405 * be able to determine if there is a public address
1406 * or not.
1407 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001408 * In case of user channel usage, it is not important
1409 * if a public address or static random address is
1410 * available.
1411 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001412 * This check is only valid for BR/EDR controllers
1413 * since AMP controllers do not have an address.
1414 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001415 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1416 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001417 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1418 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1419 ret = -EADDRNOTAVAIL;
1420 goto done;
1421 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001422 }
1423
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 if (test_bit(HCI_UP, &hdev->flags)) {
1425 ret = -EALREADY;
1426 goto done;
1427 }
1428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 if (hdev->open(hdev)) {
1430 ret = -EIO;
1431 goto done;
1432 }
1433
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001434 atomic_set(&hdev->cmd_cnt, 1);
1435 set_bit(HCI_INIT, &hdev->flags);
1436
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 if (hdev->setup)
1439 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001440
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001441 /* The transport driver can set these quirks before
1442 * creating the HCI device or in its setup callback.
1443 *
1444 * In case any of them is set, the controller has to
1445 * start up as unconfigured.
1446 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1448 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001449 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001450
1451 /* For an unconfigured controller it is required to
1452 * read at least the version information provided by
1453 * the Read Local Version Information command.
1454 *
1455 * If the set_bdaddr driver callback is provided, then
1456 * also the original Bluetooth public device address
1457 * will be read using the Read BD Address command.
1458 */
1459 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1460 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001461 }
1462
Marcel Holtmann9713c172014-07-06 12:11:15 +02001463 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1464 /* If public address change is configured, ensure that
1465 * the address gets programmed. If the driver does not
1466 * support changing the public address, fail the power
1467 * on procedure.
1468 */
1469 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1470 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001471 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1472 else
1473 ret = -EADDRNOTAVAIL;
1474 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001475
1476 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001477 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001479 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 }
1481
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001482 clear_bit(HCI_INIT, &hdev->flags);
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (!ret) {
1485 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001486 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 set_bit(HCI_UP, &hdev->flags);
1488 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001489 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001490 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001491 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001492 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001493 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001494 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001495 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001496 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001497 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001498 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001500 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001501 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001502 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504 skb_queue_purge(&hdev->cmd_q);
1505 skb_queue_purge(&hdev->rx_q);
1506
1507 if (hdev->flush)
1508 hdev->flush(hdev);
1509
1510 if (hdev->sent_cmd) {
1511 kfree_skb(hdev->sent_cmd);
1512 hdev->sent_cmd = NULL;
1513 }
1514
1515 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001516 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 }
1518
1519done:
1520 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001524/* ---- HCI ioctl helpers ---- */
1525
1526int hci_dev_open(__u16 dev)
1527{
1528 struct hci_dev *hdev;
1529 int err;
1530
1531 hdev = hci_dev_get(dev);
1532 if (!hdev)
1533 return -ENODEV;
1534
Marcel Holtmann4a964402014-07-02 19:10:33 +02001535 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001536 * up as user channel. Trying to bring them up as normal devices
1537 * will result into a failure. Only user channel operation is
1538 * possible.
1539 *
1540 * When this function is called for a user channel, the flag
1541 * HCI_USER_CHANNEL will be set first before attempting to
1542 * open the device.
1543 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001544 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001545 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1546 err = -EOPNOTSUPP;
1547 goto done;
1548 }
1549
Johan Hedberge1d08f42013-10-01 22:44:50 +03001550 /* We need to ensure that no other power on/off work is pending
1551 * before proceeding to call hci_dev_do_open. This is
1552 * particularly important if the setup procedure has not yet
1553 * completed.
1554 */
1555 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1556 cancel_delayed_work(&hdev->power_off);
1557
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001558 /* After this call it is guaranteed that the setup procedure
1559 * has finished. This means that error conditions like RFKILL
1560 * or no valid public or static random address apply.
1561 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001562 flush_workqueue(hdev->req_workqueue);
1563
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001564 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001565 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001566 * so that pairing works for them. Once the management interface
1567 * is in use this bit will be cleared again and userspace has
1568 * to explicitly enable it.
1569 */
1570 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1571 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001572 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001573
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001574 err = hci_dev_do_open(hdev);
1575
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001576done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001577 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001578 return err;
1579}
1580
Johan Hedbergd7347f32014-07-04 12:37:23 +03001581/* This function requires the caller holds hdev->lock */
1582static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1583{
1584 struct hci_conn_params *p;
1585
Johan Hedbergf161dd42014-08-15 21:06:54 +03001586 list_for_each_entry(p, &hdev->le_conn_params, list) {
1587 if (p->conn) {
1588 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001589 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001590 p->conn = NULL;
1591 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001592 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001593 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001594
1595 BT_DBG("All LE pending actions cleared");
1596}
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598static int hci_dev_do_close(struct hci_dev *hdev)
1599{
1600 BT_DBG("%s %p", hdev->name, hdev);
1601
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001602 cancel_delayed_work(&hdev->power_off);
1603
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 hci_req_cancel(hdev, ENODEV);
1605 hci_req_lock(hdev);
1606
1607 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001608 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 hci_req_unlock(hdev);
1610 return 0;
1611 }
1612
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001613 /* Flush RX and TX works */
1614 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001615 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001617 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001618 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001619 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001621 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001622 }
1623
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001624 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001625 cancel_delayed_work(&hdev->service_cache);
1626
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001627 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001628
1629 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1630 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001631
Johan Hedberg76727c02014-11-18 09:00:14 +02001632 /* Avoid potential lockdep warnings from the *_flush() calls by
1633 * ensuring the workqueue is empty up front.
1634 */
1635 drain_workqueue(hdev->workqueue);
1636
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001637 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001638
1639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1640 if (hdev->dev_type == HCI_BREDR)
1641 mgmt_powered(hdev, 0);
1642 }
1643
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001644 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001645 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001646 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001647 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
1649 hci_notify(hdev, HCI_DEV_DOWN);
1650
1651 if (hdev->flush)
1652 hdev->flush(hdev);
1653
1654 /* Reset device */
1655 skb_queue_purge(&hdev->cmd_q);
1656 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001657 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1658 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001659 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001661 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 clear_bit(HCI_INIT, &hdev->flags);
1663 }
1664
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001665 /* flush cmd work */
1666 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
1668 /* Drop queues */
1669 skb_queue_purge(&hdev->rx_q);
1670 skb_queue_purge(&hdev->cmd_q);
1671 skb_queue_purge(&hdev->raw_q);
1672
1673 /* Drop last sent command */
1674 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001675 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 kfree_skb(hdev->sent_cmd);
1677 hdev->sent_cmd = NULL;
1678 }
1679
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001680 kfree_skb(hdev->recv_evt);
1681 hdev->recv_evt = NULL;
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 /* After this point our queues are empty
1684 * and no tasks are scheduled. */
1685 hdev->close(hdev);
1686
Johan Hedberg35b973c2013-03-15 17:06:59 -05001687 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001688 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001689 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1690
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001691 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001692 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001693
Johan Hedberge59fda82012-02-22 18:11:53 +02001694 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001695 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001696 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001697
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 hci_req_unlock(hdev);
1699
1700 hci_dev_put(hdev);
1701 return 0;
1702}
1703
1704int hci_dev_close(__u16 dev)
1705{
1706 struct hci_dev *hdev;
1707 int err;
1708
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001709 hdev = hci_dev_get(dev);
1710 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001712
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001713 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1714 err = -EBUSY;
1715 goto done;
1716 }
1717
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001718 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1719 cancel_delayed_work(&hdev->power_off);
1720
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001723done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 hci_dev_put(hdev);
1725 return err;
1726}
1727
1728int hci_dev_reset(__u16 dev)
1729{
1730 struct hci_dev *hdev;
1731 int ret = 0;
1732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001733 hdev = hci_dev_get(dev);
1734 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 return -ENODEV;
1736
1737 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Marcel Holtmann808a0492013-08-26 20:57:58 -07001739 if (!test_bit(HCI_UP, &hdev->flags)) {
1740 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001744 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1745 ret = -EBUSY;
1746 goto done;
1747 }
1748
Marcel Holtmann4a964402014-07-02 19:10:33 +02001749 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001750 ret = -EOPNOTSUPP;
1751 goto done;
1752 }
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 /* Drop queues */
1755 skb_queue_purge(&hdev->rx_q);
1756 skb_queue_purge(&hdev->cmd_q);
1757
Johan Hedberg76727c02014-11-18 09:00:14 +02001758 /* Avoid potential lockdep warnings from the *_flush() calls by
1759 * ensuring the workqueue is empty up front.
1760 */
1761 drain_workqueue(hdev->workqueue);
1762
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001763 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001764 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001766 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 if (hdev->flush)
1769 hdev->flush(hdev);
1770
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001771 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001772 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001774 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 hci_req_unlock(hdev);
1778 hci_dev_put(hdev);
1779 return ret;
1780}
1781
1782int hci_dev_reset_stat(__u16 dev)
1783{
1784 struct hci_dev *hdev;
1785 int ret = 0;
1786
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001787 hdev = hci_dev_get(dev);
1788 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 return -ENODEV;
1790
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1792 ret = -EBUSY;
1793 goto done;
1794 }
1795
Marcel Holtmann4a964402014-07-02 19:10:33 +02001796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001797 ret = -EOPNOTSUPP;
1798 goto done;
1799 }
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1802
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001803done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 return ret;
1806}
1807
Johan Hedberg123abc02014-07-10 12:09:07 +03001808static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1809{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001810 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001811
1812 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1813
1814 if ((scan & SCAN_PAGE))
1815 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1816 &hdev->dev_flags);
1817 else
1818 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1819 &hdev->dev_flags);
1820
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001821 if ((scan & SCAN_INQUIRY)) {
1822 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1823 &hdev->dev_flags);
1824 } else {
1825 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1826 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1827 &hdev->dev_flags);
1828 }
1829
Johan Hedberg123abc02014-07-10 12:09:07 +03001830 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1831 return;
1832
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001833 if (conn_changed || discov_changed) {
1834 /* In case this was disabled through mgmt */
1835 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1836
1837 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1838 mgmt_update_adv_data(hdev);
1839
Johan Hedberg123abc02014-07-10 12:09:07 +03001840 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001841 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001842}
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844int hci_dev_cmd(unsigned int cmd, void __user *arg)
1845{
1846 struct hci_dev *hdev;
1847 struct hci_dev_req dr;
1848 int err = 0;
1849
1850 if (copy_from_user(&dr, arg, sizeof(dr)))
1851 return -EFAULT;
1852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001853 hdev = hci_dev_get(dr.dev_id);
1854 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 return -ENODEV;
1856
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001857 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1858 err = -EBUSY;
1859 goto done;
1860 }
1861
Marcel Holtmann4a964402014-07-02 19:10:33 +02001862 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001863 err = -EOPNOTSUPP;
1864 goto done;
1865 }
1866
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001867 if (hdev->dev_type != HCI_BREDR) {
1868 err = -EOPNOTSUPP;
1869 goto done;
1870 }
1871
Johan Hedberg56f87902013-10-02 13:43:13 +03001872 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1873 err = -EOPNOTSUPP;
1874 goto done;
1875 }
1876
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 switch (cmd) {
1878 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001879 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1880 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 break;
1882
1883 case HCISETENCRYPT:
1884 if (!lmp_encrypt_capable(hdev)) {
1885 err = -EOPNOTSUPP;
1886 break;
1887 }
1888
1889 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1890 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001891 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1892 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 if (err)
1894 break;
1895 }
1896
Johan Hedberg01178cd2013-03-05 20:37:41 +02001897 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1898 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 break;
1900
1901 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001902 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1903 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001904
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001905 /* Ensure that the connectable and discoverable states
1906 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001907 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001908 if (!err)
1909 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 break;
1911
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001912 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001913 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1914 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001915 break;
1916
1917 case HCISETLINKMODE:
1918 hdev->link_mode = ((__u16) dr.dev_opt) &
1919 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1920 break;
1921
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 case HCISETPTYPE:
1923 hdev->pkt_type = (__u16) dr.dev_opt;
1924 break;
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001927 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1928 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 break;
1930
1931 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001932 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1933 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 break;
1935
1936 default:
1937 err = -EINVAL;
1938 break;
1939 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001940
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001941done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 hci_dev_put(hdev);
1943 return err;
1944}
1945
1946int hci_get_dev_list(void __user *arg)
1947{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001948 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 struct hci_dev_list_req *dl;
1950 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 int n = 0, size, err;
1952 __u16 dev_num;
1953
1954 if (get_user(dev_num, (__u16 __user *) arg))
1955 return -EFAULT;
1956
1957 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1958 return -EINVAL;
1959
1960 size = sizeof(*dl) + dev_num * sizeof(*dr);
1961
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001962 dl = kzalloc(size, GFP_KERNEL);
1963 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 return -ENOMEM;
1965
1966 dr = dl->dev_req;
1967
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001968 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001969 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001970 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001971
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001972 /* When the auto-off is configured it means the transport
1973 * is running, but in that case still indicate that the
1974 * device is actually down.
1975 */
1976 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1977 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001980 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001981
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 if (++n >= dev_num)
1983 break;
1984 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001985 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 dl->dev_num = n;
1988 size = sizeof(*dl) + n * sizeof(*dr);
1989
1990 err = copy_to_user(arg, dl, size);
1991 kfree(dl);
1992
1993 return err ? -EFAULT : 0;
1994}
1995
1996int hci_get_dev_info(void __user *arg)
1997{
1998 struct hci_dev *hdev;
1999 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002000 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 int err = 0;
2002
2003 if (copy_from_user(&di, arg, sizeof(di)))
2004 return -EFAULT;
2005
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002006 hdev = hci_dev_get(di.dev_id);
2007 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 return -ENODEV;
2009
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002010 /* When the auto-off is configured it means the transport
2011 * is running, but in that case still indicate that the
2012 * device is actually down.
2013 */
2014 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2015 flags = hdev->flags & ~BIT(HCI_UP);
2016 else
2017 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 strcpy(di.name, hdev->name);
2020 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002021 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002022 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002024 if (lmp_bredr_capable(hdev)) {
2025 di.acl_mtu = hdev->acl_mtu;
2026 di.acl_pkts = hdev->acl_pkts;
2027 di.sco_mtu = hdev->sco_mtu;
2028 di.sco_pkts = hdev->sco_pkts;
2029 } else {
2030 di.acl_mtu = hdev->le_mtu;
2031 di.acl_pkts = hdev->le_pkts;
2032 di.sco_mtu = 0;
2033 di.sco_pkts = 0;
2034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 di.link_policy = hdev->link_policy;
2036 di.link_mode = hdev->link_mode;
2037
2038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2039 memcpy(&di.features, &hdev->features, sizeof(di.features));
2040
2041 if (copy_to_user(arg, &di, sizeof(di)))
2042 err = -EFAULT;
2043
2044 hci_dev_put(hdev);
2045
2046 return err;
2047}
2048
2049/* ---- Interface to HCI drivers ---- */
2050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002051static int hci_rfkill_set_block(void *data, bool blocked)
2052{
2053 struct hci_dev *hdev = data;
2054
2055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2056
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002057 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2058 return -EBUSY;
2059
Johan Hedberg5e130362013-09-13 08:58:17 +03002060 if (blocked) {
2061 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002062 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2063 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002064 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002065 } else {
2066 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002067 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002068
2069 return 0;
2070}
2071
2072static const struct rfkill_ops hci_rfkill_ops = {
2073 .set_block = hci_rfkill_set_block,
2074};
2075
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002076static void hci_power_on(struct work_struct *work)
2077{
2078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002079 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002080
2081 BT_DBG("%s", hdev->name);
2082
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002083 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002084 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302085 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002086 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302087 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002088 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002089 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002090
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002091 /* During the HCI setup phase, a few error conditions are
2092 * ignored and they need to be checked now. If they are still
2093 * valid, it is important to turn the device back off.
2094 */
2095 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002096 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002097 (hdev->dev_type == HCI_BREDR &&
2098 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2099 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002100 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2101 hci_dev_do_close(hdev);
2102 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002103 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2104 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002105 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002106
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002107 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002108 /* For unconfigured devices, set the HCI_RAW flag
2109 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002110 */
2111 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2112 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002113
2114 /* For fully configured devices, this will send
2115 * the Index Added event. For unconfigured devices,
2116 * it will send Unconfigued Index Added event.
2117 *
2118 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2119 * and no event will be send.
2120 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002121 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002122 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002123 /* When the controller is now configured, then it
2124 * is important to clear the HCI_RAW flag.
2125 */
2126 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2127 clear_bit(HCI_RAW, &hdev->flags);
2128
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002129 /* Powering on the controller with HCI_CONFIG set only
2130 * happens with the transition from unconfigured to
2131 * configured. This will send the Index Added event.
2132 */
2133 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002134 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002135}
2136
2137static void hci_power_off(struct work_struct *work)
2138{
Johan Hedberg32435532011-11-07 22:16:04 +02002139 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002140 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002141
2142 BT_DBG("%s", hdev->name);
2143
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002144 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002145}
2146
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002147static void hci_discov_off(struct work_struct *work)
2148{
2149 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002150
2151 hdev = container_of(work, struct hci_dev, discov_off.work);
2152
2153 BT_DBG("%s", hdev->name);
2154
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002155 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002156}
2157
Johan Hedberg35f74982014-02-18 17:14:32 +02002158void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002159{
Johan Hedberg48210022013-01-27 00:31:28 +02002160 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002161
Johan Hedberg48210022013-01-27 00:31:28 +02002162 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2163 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002164 kfree(uuid);
2165 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002166}
2167
Johan Hedberg35f74982014-02-18 17:14:32 +02002168void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002169{
Johan Hedberg0378b592014-11-19 15:22:22 +02002170 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002171
Johan Hedberg0378b592014-11-19 15:22:22 +02002172 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2173 list_del_rcu(&key->list);
2174 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002175 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002176}
2177
Johan Hedberg35f74982014-02-18 17:14:32 +02002178void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002179{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002180 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002181
Johan Hedberg970d0f12014-11-13 14:37:47 +02002182 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2183 list_del_rcu(&k->list);
2184 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002185 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002186}
2187
Johan Hedberg970c4e42014-02-18 10:19:33 +02002188void hci_smp_irks_clear(struct hci_dev *hdev)
2189{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002190 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002191
Johan Hedbergadae20c2014-11-13 14:37:48 +02002192 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2193 list_del_rcu(&k->list);
2194 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002195 }
2196}
2197
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002198struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2199{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002200 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002201
Johan Hedberg0378b592014-11-19 15:22:22 +02002202 rcu_read_lock();
2203 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2204 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2205 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002206 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002207 }
2208 }
2209 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002210
2211 return NULL;
2212}
2213
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302214static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002215 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002216{
2217 /* Legacy key */
2218 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302219 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002220
2221 /* Debug keys are insecure so don't store them persistently */
2222 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302223 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002224
2225 /* Changed combination key and there's no previous one */
2226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302227 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002228
2229 /* Security mode 3 case */
2230 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302231 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002232
Johan Hedberge3befab2014-06-01 16:33:39 +03002233 /* BR/EDR key derived using SC from an LE link */
2234 if (conn->type == LE_LINK)
2235 return true;
2236
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002237 /* Neither local nor remote side had no-bonding as requirement */
2238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302239 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002240
2241 /* Local side had dedicated bonding as requirement */
2242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302243 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002244
2245 /* Remote side had dedicated bonding as requirement */
2246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302247 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002248
2249 /* If none of the above criteria match, then don't store the key
2250 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302251 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002252}
2253
Johan Hedberge804d252014-07-16 11:42:28 +03002254static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002255{
Johan Hedberge804d252014-07-16 11:42:28 +03002256 if (type == SMP_LTK)
2257 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002258
Johan Hedberge804d252014-07-16 11:42:28 +03002259 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002260}
2261
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002262struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2263 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002264{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002265 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002266
Johan Hedberg970d0f12014-11-13 14:37:47 +02002267 rcu_read_lock();
2268 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002269 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2270 continue;
2271
Johan Hedberg923e2412014-12-03 12:43:39 +02002272 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002273 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002274 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002275 }
2276 }
2277 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002278
2279 return NULL;
2280}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002281
Johan Hedberg970c4e42014-02-18 10:19:33 +02002282struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2283{
2284 struct smp_irk *irk;
2285
Johan Hedbergadae20c2014-11-13 14:37:48 +02002286 rcu_read_lock();
2287 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2288 if (!bacmp(&irk->rpa, rpa)) {
2289 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002290 return irk;
2291 }
2292 }
2293
Johan Hedbergadae20c2014-11-13 14:37:48 +02002294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2295 if (smp_irk_matches(hdev, irk->val, rpa)) {
2296 bacpy(&irk->rpa, rpa);
2297 rcu_read_unlock();
2298 return irk;
2299 }
2300 }
2301 rcu_read_unlock();
2302
Johan Hedberg970c4e42014-02-18 10:19:33 +02002303 return NULL;
2304}
2305
2306struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2307 u8 addr_type)
2308{
2309 struct smp_irk *irk;
2310
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002311 /* Identity Address must be public or static random */
2312 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2313 return NULL;
2314
Johan Hedbergadae20c2014-11-13 14:37:48 +02002315 rcu_read_lock();
2316 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002317 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002318 bacmp(bdaddr, &irk->bdaddr) == 0) {
2319 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002320 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002321 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002322 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002323 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002324
2325 return NULL;
2326}
2327
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002328struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002329 bdaddr_t *bdaddr, u8 *val, u8 type,
2330 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002331{
2332 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302333 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002334
2335 old_key = hci_find_link_key(hdev, bdaddr);
2336 if (old_key) {
2337 old_key_type = old_key->type;
2338 key = old_key;
2339 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002340 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002341 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002342 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002343 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002344 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002345 }
2346
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002347 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002348
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002349 /* Some buggy controller combinations generate a changed
2350 * combination key for legacy pairing even when there's no
2351 * previous key */
2352 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002353 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002354 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002355 if (conn)
2356 conn->key_type = type;
2357 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002358
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002359 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002360 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002361 key->pin_len = pin_len;
2362
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002363 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002364 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002365 else
2366 key->type = type;
2367
Johan Hedberg7652ff62014-06-24 13:15:49 +03002368 if (persistent)
2369 *persistent = hci_persistent_key(hdev, conn, type,
2370 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002371
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002372 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002373}
2374
Johan Hedbergca9142b2014-02-19 14:57:44 +02002375struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002376 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002377 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002378{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002379 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002380 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002381
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002382 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002383 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002384 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002385 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002386 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002387 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002388 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002389 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002390 }
2391
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002392 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002393 key->bdaddr_type = addr_type;
2394 memcpy(key->val, tk, sizeof(key->val));
2395 key->authenticated = authenticated;
2396 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002397 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002398 key->enc_size = enc_size;
2399 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002400
Johan Hedbergca9142b2014-02-19 14:57:44 +02002401 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002402}
2403
Johan Hedbergca9142b2014-02-19 14:57:44 +02002404struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002406{
2407 struct smp_irk *irk;
2408
2409 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2410 if (!irk) {
2411 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2412 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002413 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002414
2415 bacpy(&irk->bdaddr, bdaddr);
2416 irk->addr_type = addr_type;
2417
Johan Hedbergadae20c2014-11-13 14:37:48 +02002418 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002419 }
2420
2421 memcpy(irk->val, val, 16);
2422 bacpy(&irk->rpa, rpa);
2423
Johan Hedbergca9142b2014-02-19 14:57:44 +02002424 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002425}
2426
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002427int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2428{
2429 struct link_key *key;
2430
2431 key = hci_find_link_key(hdev, bdaddr);
2432 if (!key)
2433 return -ENOENT;
2434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002435 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002436
Johan Hedberg0378b592014-11-19 15:22:22 +02002437 list_del_rcu(&key->list);
2438 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002439
2440 return 0;
2441}
2442
Johan Hedberge0b2b272014-02-18 17:14:31 +02002443int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002444{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002445 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002446 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002447
Johan Hedberg970d0f12014-11-13 14:37:47 +02002448 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002449 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002450 continue;
2451
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002452 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002453
Johan Hedberg970d0f12014-11-13 14:37:47 +02002454 list_del_rcu(&k->list);
2455 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002456 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002457 }
2458
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002459 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002460}
2461
Johan Hedberga7ec7332014-02-18 17:14:35 +02002462void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2463{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002464 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002465
Johan Hedbergadae20c2014-11-13 14:37:48 +02002466 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002467 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2468 continue;
2469
2470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2471
Johan Hedbergadae20c2014-11-13 14:37:48 +02002472 list_del_rcu(&k->list);
2473 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002474 }
2475}
2476
Ville Tervo6bd32322011-02-16 16:32:41 +02002477/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002478static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002479{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002480 struct hci_dev *hdev = container_of(work, struct hci_dev,
2481 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002482
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002483 if (hdev->sent_cmd) {
2484 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2485 u16 opcode = __le16_to_cpu(sent->opcode);
2486
2487 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2488 } else {
2489 BT_ERR("%s command tx timeout", hdev->name);
2490 }
2491
Ville Tervo6bd32322011-02-16 16:32:41 +02002492 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002493 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002494}
2495
Szymon Janc2763eda2011-03-22 13:12:22 +01002496struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002497 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002498{
2499 struct oob_data *data;
2500
Johan Hedberg6928a922014-10-26 20:46:09 +01002501 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2502 if (bacmp(bdaddr, &data->bdaddr) != 0)
2503 continue;
2504 if (data->bdaddr_type != bdaddr_type)
2505 continue;
2506 return data;
2507 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002508
2509 return NULL;
2510}
2511
Johan Hedberg6928a922014-10-26 20:46:09 +01002512int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2513 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002514{
2515 struct oob_data *data;
2516
Johan Hedberg6928a922014-10-26 20:46:09 +01002517 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002518 if (!data)
2519 return -ENOENT;
2520
Johan Hedberg6928a922014-10-26 20:46:09 +01002521 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002522
2523 list_del(&data->list);
2524 kfree(data);
2525
2526 return 0;
2527}
2528
Johan Hedberg35f74982014-02-18 17:14:32 +02002529void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002530{
2531 struct oob_data *data, *n;
2532
2533 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2534 list_del(&data->list);
2535 kfree(data);
2536 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002537}
2538
Marcel Holtmann07988722014-01-10 02:07:29 -08002539int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002540 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002541 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002542{
2543 struct oob_data *data;
2544
Johan Hedberg6928a922014-10-26 20:46:09 +01002545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002546 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002547 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002548 if (!data)
2549 return -ENOMEM;
2550
2551 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002552 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002553 list_add(&data->list, &hdev->remote_oob_data);
2554 }
2555
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002556 if (hash192 && rand192) {
2557 memcpy(data->hash192, hash192, sizeof(data->hash192));
2558 memcpy(data->rand192, rand192, sizeof(data->rand192));
2559 } else {
2560 memset(data->hash192, 0, sizeof(data->hash192));
2561 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08002562 }
2563
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002564 if (hash256 && rand256) {
2565 memcpy(data->hash256, hash256, sizeof(data->hash256));
2566 memcpy(data->rand256, rand256, sizeof(data->rand256));
2567 } else {
2568 memset(data->hash256, 0, sizeof(data->hash256));
2569 memset(data->rand256, 0, sizeof(data->rand256));
2570 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002571
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002573
2574 return 0;
2575}
2576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002577struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002578 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002579{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002580 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002581
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002582 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002584 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002585 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002586
2587 return NULL;
2588}
2589
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002590void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002591{
2592 struct list_head *p, *n;
2593
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002594 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002596
2597 list_del(p);
2598 kfree(b);
2599 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002600}
2601
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002602int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002603{
2604 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002605
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002606 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002607 return -EBADF;
2608
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002609 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002610 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002611
Johan Hedberg27f70f32014-07-21 10:50:06 +03002612 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002613 if (!entry)
2614 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002615
2616 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002617 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002618
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002619 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002620
2621 return 0;
2622}
2623
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002624int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002625{
2626 struct bdaddr_list *entry;
2627
Johan Hedberg35f74982014-02-18 17:14:32 +02002628 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002629 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002630 return 0;
2631 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002632
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002633 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002634 if (!entry)
2635 return -ENOENT;
2636
2637 list_del(&entry->list);
2638 kfree(entry);
2639
2640 return 0;
2641}
2642
Andre Guedes15819a72014-02-03 13:56:18 -03002643/* This function requires the caller holds hdev->lock */
2644struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2645 bdaddr_t *addr, u8 addr_type)
2646{
2647 struct hci_conn_params *params;
2648
Johan Hedberg738f6182014-07-03 19:33:51 +03002649 /* The conn params list only contains identity addresses */
2650 if (!hci_is_identity_address(addr, addr_type))
2651 return NULL;
2652
Andre Guedes15819a72014-02-03 13:56:18 -03002653 list_for_each_entry(params, &hdev->le_conn_params, list) {
2654 if (bacmp(&params->addr, addr) == 0 &&
2655 params->addr_type == addr_type) {
2656 return params;
2657 }
2658 }
2659
2660 return NULL;
2661}
2662
2663/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002664struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2665 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002666{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002667 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002668
Johan Hedberg738f6182014-07-03 19:33:51 +03002669 /* The list only contains identity addresses */
2670 if (!hci_is_identity_address(addr, addr_type))
2671 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002672
Johan Hedberg501f8822014-07-04 12:37:26 +03002673 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002674 if (bacmp(&param->addr, addr) == 0 &&
2675 param->addr_type == addr_type)
2676 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002677 }
2678
2679 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002680}
2681
2682/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002683struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2684 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002685{
2686 struct hci_conn_params *params;
2687
Johan Hedbergc46245b2014-07-02 17:37:33 +03002688 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002689 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002690
Andre Guedes15819a72014-02-03 13:56:18 -03002691 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002692 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002693 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002694
2695 params = kzalloc(sizeof(*params), GFP_KERNEL);
2696 if (!params) {
2697 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002698 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002699 }
2700
2701 bacpy(&params->addr, addr);
2702 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002703
2704 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002705 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002706
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002707 params->conn_min_interval = hdev->le_conn_min_interval;
2708 params->conn_max_interval = hdev->le_conn_max_interval;
2709 params->conn_latency = hdev->le_conn_latency;
2710 params->supervision_timeout = hdev->le_supv_timeout;
2711 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2712
2713 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2714
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002715 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002716}
2717
Johan Hedbergf6c63242014-08-15 21:06:59 +03002718static void hci_conn_params_free(struct hci_conn_params *params)
2719{
2720 if (params->conn) {
2721 hci_conn_drop(params->conn);
2722 hci_conn_put(params->conn);
2723 }
2724
2725 list_del(&params->action);
2726 list_del(&params->list);
2727 kfree(params);
2728}
2729
Andre Guedes15819a72014-02-03 13:56:18 -03002730/* This function requires the caller holds hdev->lock */
2731void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2732{
2733 struct hci_conn_params *params;
2734
2735 params = hci_conn_params_lookup(hdev, addr, addr_type);
2736 if (!params)
2737 return;
2738
Johan Hedbergf6c63242014-08-15 21:06:59 +03002739 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002740
Johan Hedberg95305ba2014-07-04 12:37:21 +03002741 hci_update_background_scan(hdev);
2742
Andre Guedes15819a72014-02-03 13:56:18 -03002743 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2744}
2745
2746/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002747void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002748{
2749 struct hci_conn_params *params, *tmp;
2750
2751 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002752 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2753 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002754 list_del(&params->list);
2755 kfree(params);
2756 }
2757
Johan Hedberg55af49a82014-07-02 17:37:26 +03002758 BT_DBG("All LE disabled connection parameters were removed");
2759}
2760
2761/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002762void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002763{
2764 struct hci_conn_params *params, *tmp;
2765
Johan Hedbergf6c63242014-08-15 21:06:59 +03002766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2767 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002768
Johan Hedberga2f41a82014-07-04 12:37:19 +03002769 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002770
Andre Guedes15819a72014-02-03 13:56:18 -03002771 BT_DBG("All LE connection parameters were removed");
2772}
2773
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002774static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002775{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002776 if (status) {
2777 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002778
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002779 hci_dev_lock(hdev);
2780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2781 hci_dev_unlock(hdev);
2782 return;
2783 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002784}
2785
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002786static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002787{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002788 /* General inquiry access code (GIAC) */
2789 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2790 struct hci_request req;
2791 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002792 int err;
2793
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002794 if (status) {
2795 BT_ERR("Failed to disable LE scanning: status %d", status);
2796 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002797 }
2798
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002799 switch (hdev->discovery.type) {
2800 case DISCOV_TYPE_LE:
2801 hci_dev_lock(hdev);
2802 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803 hci_dev_unlock(hdev);
2804 break;
2805
2806 case DISCOV_TYPE_INTERLEAVED:
2807 hci_req_init(&req, hdev);
2808
2809 memset(&cp, 0, sizeof(cp));
2810 memcpy(&cp.lap, lap, sizeof(cp.lap));
2811 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2813
2814 hci_dev_lock(hdev);
2815
2816 hci_inquiry_cache_flush(hdev);
2817
2818 err = hci_req_run(&req, inquiry_complete);
2819 if (err) {
2820 BT_ERR("Inquiry request failed: err %d", err);
2821 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2822 }
2823
2824 hci_dev_unlock(hdev);
2825 break;
2826 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002827}
2828
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002829static void le_scan_disable_work(struct work_struct *work)
2830{
2831 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002832 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002833 struct hci_request req;
2834 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002835
2836 BT_DBG("%s", hdev->name);
2837
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002838 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002839
Andre Guedesb1efcc22014-02-26 20:21:40 -03002840 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002841
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002842 err = hci_req_run(&req, le_scan_disable_work_complete);
2843 if (err)
2844 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002845}
2846
Johan Hedberga1f4c312014-02-27 14:05:41 +02002847/* Copy the Identity Address of the controller.
2848 *
2849 * If the controller has a public BD_ADDR, then by default use that one.
2850 * If this is a LE only controller without a public address, default to
2851 * the static random address.
2852 *
2853 * For debugging purposes it is possible to force controllers with a
2854 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002855 *
2856 * In case BR/EDR has been disabled on a dual-mode controller and
2857 * userspace has configured a static address, then that address
2858 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002859 */
2860void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2861 u8 *bdaddr_type)
2862{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002863 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002864 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2865 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2866 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002867 bacpy(bdaddr, &hdev->static_addr);
2868 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2869 } else {
2870 bacpy(bdaddr, &hdev->bdaddr);
2871 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2872 }
2873}
2874
David Herrmann9be0dab2012-04-22 14:39:57 +02002875/* Alloc HCI device */
2876struct hci_dev *hci_alloc_dev(void)
2877{
2878 struct hci_dev *hdev;
2879
Johan Hedberg27f70f32014-07-21 10:50:06 +03002880 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002881 if (!hdev)
2882 return NULL;
2883
David Herrmannb1b813d2012-04-22 14:39:58 +02002884 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2885 hdev->esco_type = (ESCO_HV1);
2886 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002887 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2888 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002889 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002890 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2891 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002892
David Herrmannb1b813d2012-04-22 14:39:58 +02002893 hdev->sniff_max_interval = 800;
2894 hdev->sniff_min_interval = 80;
2895
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002896 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002897 hdev->le_adv_min_interval = 0x0800;
2898 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002899 hdev->le_scan_interval = 0x0060;
2900 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002901 hdev->le_conn_min_interval = 0x0028;
2902 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002903 hdev->le_conn_latency = 0x0000;
2904 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002905 hdev->le_def_tx_len = 0x001b;
2906 hdev->le_def_tx_time = 0x0148;
2907 hdev->le_max_tx_len = 0x001b;
2908 hdev->le_max_tx_time = 0x0148;
2909 hdev->le_max_rx_len = 0x001b;
2910 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002911
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002912 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002913 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002914 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2915 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002916
David Herrmannb1b813d2012-04-22 14:39:58 +02002917 mutex_init(&hdev->lock);
2918 mutex_init(&hdev->req_lock);
2919
2920 INIT_LIST_HEAD(&hdev->mgmt_pending);
2921 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002922 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002923 INIT_LIST_HEAD(&hdev->uuids);
2924 INIT_LIST_HEAD(&hdev->link_keys);
2925 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002926 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002927 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002928 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002929 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002930 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002931 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002932 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002933
2934 INIT_WORK(&hdev->rx_work, hci_rx_work);
2935 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2936 INIT_WORK(&hdev->tx_work, hci_tx_work);
2937 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002938
David Herrmannb1b813d2012-04-22 14:39:58 +02002939 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2940 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2941 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2942
David Herrmannb1b813d2012-04-22 14:39:58 +02002943 skb_queue_head_init(&hdev->rx_q);
2944 skb_queue_head_init(&hdev->cmd_q);
2945 skb_queue_head_init(&hdev->raw_q);
2946
2947 init_waitqueue_head(&hdev->req_wait_q);
2948
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002949 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02002950
David Herrmannb1b813d2012-04-22 14:39:58 +02002951 hci_init_sysfs(hdev);
2952 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002953
2954 return hdev;
2955}
2956EXPORT_SYMBOL(hci_alloc_dev);
2957
2958/* Free HCI device */
2959void hci_free_dev(struct hci_dev *hdev)
2960{
David Herrmann9be0dab2012-04-22 14:39:57 +02002961 /* will free via device release */
2962 put_device(&hdev->dev);
2963}
2964EXPORT_SYMBOL(hci_free_dev);
2965
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966/* Register HCI device */
2967int hci_register_dev(struct hci_dev *hdev)
2968{
David Herrmannb1b813d2012-04-22 14:39:58 +02002969 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970
Marcel Holtmann74292d52014-07-06 15:50:27 +02002971 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 return -EINVAL;
2973
Mat Martineau08add512011-11-02 16:18:36 -07002974 /* Do not allow HCI_AMP devices to register at index 0,
2975 * so the index can be used as the AMP controller ID.
2976 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002977 switch (hdev->dev_type) {
2978 case HCI_BREDR:
2979 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2980 break;
2981 case HCI_AMP:
2982 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2983 break;
2984 default:
2985 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002987
Sasha Levin3df92b32012-05-27 22:36:56 +02002988 if (id < 0)
2989 return id;
2990
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 sprintf(hdev->name, "hci%d", id);
2992 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002993
2994 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2995
Kees Cookd8537542013-07-03 15:04:57 -07002996 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2997 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002998 if (!hdev->workqueue) {
2999 error = -ENOMEM;
3000 goto err;
3001 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003002
Kees Cookd8537542013-07-03 15:04:57 -07003003 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3004 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003005 if (!hdev->req_workqueue) {
3006 destroy_workqueue(hdev->workqueue);
3007 error = -ENOMEM;
3008 goto err;
3009 }
3010
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003011 if (!IS_ERR_OR_NULL(bt_debugfs))
3012 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3013
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003014 dev_set_name(&hdev->dev, "%s", hdev->name);
3015
3016 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003017 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003018 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003020 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003021 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3022 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003023 if (hdev->rfkill) {
3024 if (rfkill_register(hdev->rfkill) < 0) {
3025 rfkill_destroy(hdev->rfkill);
3026 hdev->rfkill = NULL;
3027 }
3028 }
3029
Johan Hedberg5e130362013-09-13 08:58:17 +03003030 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3031 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3032
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003033 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003034 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003035
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003036 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003037 /* Assume BR/EDR support until proven otherwise (such as
3038 * through reading supported features during init.
3039 */
3040 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3041 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003042
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003043 write_lock(&hci_dev_list_lock);
3044 list_add(&hdev->list, &hci_dev_list);
3045 write_unlock(&hci_dev_list_lock);
3046
Marcel Holtmann4a964402014-07-02 19:10:33 +02003047 /* Devices that are marked for raw-only usage are unconfigured
3048 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003049 */
3050 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003051 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003052
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003054 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055
Johan Hedberg19202572013-01-14 22:33:51 +02003056 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003057
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003059
David Herrmann33ca9542011-10-08 14:58:49 +02003060err_wqueue:
3061 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003062 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003063err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003064 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003065
David Herrmann33ca9542011-10-08 14:58:49 +02003066 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067}
3068EXPORT_SYMBOL(hci_register_dev);
3069
3070/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003071void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072{
Sasha Levin3df92b32012-05-27 22:36:56 +02003073 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003074
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003075 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
Johan Hovold94324962012-03-15 14:48:41 +01003077 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3078
Sasha Levin3df92b32012-05-27 22:36:56 +02003079 id = hdev->id;
3080
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003081 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003083 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
3085 hci_dev_do_close(hdev);
3086
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303087 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003088 kfree_skb(hdev->reassembly[i]);
3089
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003090 cancel_work_sync(&hdev->power_on);
3091
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003092 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003093 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3094 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003095 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003096 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003097 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003098 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003099
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003100 /* mgmt_index_removed should take care of emptying the
3101 * pending list */
3102 BUG_ON(!list_empty(&hdev->mgmt_pending));
3103
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104 hci_notify(hdev, HCI_DEV_UNREG);
3105
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003106 if (hdev->rfkill) {
3107 rfkill_unregister(hdev->rfkill);
3108 rfkill_destroy(hdev->rfkill);
3109 }
3110
Johan Hedberg711eafe2014-08-08 09:32:52 +03003111 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003112
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003113 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003114
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003115 debugfs_remove_recursive(hdev->debugfs);
3116
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003117 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003118 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003119
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003120 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003121 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003122 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003123 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003124 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003125 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003126 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003127 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003128 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003129 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003130 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003131 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003132
David Herrmanndc946bd2012-01-07 15:47:24 +01003133 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003134
3135 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136}
3137EXPORT_SYMBOL(hci_unregister_dev);
3138
3139/* Suspend HCI device */
3140int hci_suspend_dev(struct hci_dev *hdev)
3141{
3142 hci_notify(hdev, HCI_DEV_SUSPEND);
3143 return 0;
3144}
3145EXPORT_SYMBOL(hci_suspend_dev);
3146
3147/* Resume HCI device */
3148int hci_resume_dev(struct hci_dev *hdev)
3149{
3150 hci_notify(hdev, HCI_DEV_RESUME);
3151 return 0;
3152}
3153EXPORT_SYMBOL(hci_resume_dev);
3154
Marcel Holtmann75e05692014-11-02 08:15:38 +01003155/* Reset HCI device */
3156int hci_reset_dev(struct hci_dev *hdev)
3157{
3158 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3159 struct sk_buff *skb;
3160
3161 skb = bt_skb_alloc(3, GFP_ATOMIC);
3162 if (!skb)
3163 return -ENOMEM;
3164
3165 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3166 memcpy(skb_put(skb, 3), hw_err, 3);
3167
3168 /* Send Hardware Error to upper stack */
3169 return hci_recv_frame(hdev, skb);
3170}
3171EXPORT_SYMBOL(hci_reset_dev);
3172
Marcel Holtmann76bca882009-11-18 00:40:39 +01003173/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003174int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003175{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003176 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003177 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003178 kfree_skb(skb);
3179 return -ENXIO;
3180 }
3181
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003182 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003183 bt_cb(skb)->incoming = 1;
3184
3185 /* Time stamp */
3186 __net_timestamp(skb);
3187
Marcel Holtmann76bca882009-11-18 00:40:39 +01003188 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003189 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003190
Marcel Holtmann76bca882009-11-18 00:40:39 +01003191 return 0;
3192}
3193EXPORT_SYMBOL(hci_recv_frame);
3194
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303195static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003196 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303197{
3198 int len = 0;
3199 int hlen = 0;
3200 int remain = count;
3201 struct sk_buff *skb;
3202 struct bt_skb_cb *scb;
3203
3204 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003205 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303206 return -EILSEQ;
3207
3208 skb = hdev->reassembly[index];
3209
3210 if (!skb) {
3211 switch (type) {
3212 case HCI_ACLDATA_PKT:
3213 len = HCI_MAX_FRAME_SIZE;
3214 hlen = HCI_ACL_HDR_SIZE;
3215 break;
3216 case HCI_EVENT_PKT:
3217 len = HCI_MAX_EVENT_SIZE;
3218 hlen = HCI_EVENT_HDR_SIZE;
3219 break;
3220 case HCI_SCODATA_PKT:
3221 len = HCI_MAX_SCO_SIZE;
3222 hlen = HCI_SCO_HDR_SIZE;
3223 break;
3224 }
3225
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003226 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303227 if (!skb)
3228 return -ENOMEM;
3229
3230 scb = (void *) skb->cb;
3231 scb->expect = hlen;
3232 scb->pkt_type = type;
3233
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303234 hdev->reassembly[index] = skb;
3235 }
3236
3237 while (count) {
3238 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003239 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303240
3241 memcpy(skb_put(skb, len), data, len);
3242
3243 count -= len;
3244 data += len;
3245 scb->expect -= len;
3246 remain = count;
3247
3248 switch (type) {
3249 case HCI_EVENT_PKT:
3250 if (skb->len == HCI_EVENT_HDR_SIZE) {
3251 struct hci_event_hdr *h = hci_event_hdr(skb);
3252 scb->expect = h->plen;
3253
3254 if (skb_tailroom(skb) < scb->expect) {
3255 kfree_skb(skb);
3256 hdev->reassembly[index] = NULL;
3257 return -ENOMEM;
3258 }
3259 }
3260 break;
3261
3262 case HCI_ACLDATA_PKT:
3263 if (skb->len == HCI_ACL_HDR_SIZE) {
3264 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3265 scb->expect = __le16_to_cpu(h->dlen);
3266
3267 if (skb_tailroom(skb) < scb->expect) {
3268 kfree_skb(skb);
3269 hdev->reassembly[index] = NULL;
3270 return -ENOMEM;
3271 }
3272 }
3273 break;
3274
3275 case HCI_SCODATA_PKT:
3276 if (skb->len == HCI_SCO_HDR_SIZE) {
3277 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3278 scb->expect = h->dlen;
3279
3280 if (skb_tailroom(skb) < scb->expect) {
3281 kfree_skb(skb);
3282 hdev->reassembly[index] = NULL;
3283 return -ENOMEM;
3284 }
3285 }
3286 break;
3287 }
3288
3289 if (scb->expect == 0) {
3290 /* Complete frame */
3291
3292 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003293 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303294
3295 hdev->reassembly[index] = NULL;
3296 return remain;
3297 }
3298 }
3299
3300 return remain;
3301}
3302
Suraj Sumangala99811512010-07-14 13:02:19 +05303303#define STREAM_REASSEMBLY 0
3304
3305int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3306{
3307 int type;
3308 int rem = 0;
3309
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003310 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303311 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3312
3313 if (!skb) {
3314 struct { char type; } *pkt;
3315
3316 /* Start of the frame */
3317 pkt = data;
3318 type = pkt->type;
3319
3320 data++;
3321 count--;
3322 } else
3323 type = bt_cb(skb)->pkt_type;
3324
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003325 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003326 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303327 if (rem < 0)
3328 return rem;
3329
3330 data += (count - rem);
3331 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003332 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303333
3334 return rem;
3335}
3336EXPORT_SYMBOL(hci_recv_stream_fragment);
3337
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338/* ---- Interface to upper protocols ---- */
3339
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340int hci_register_cb(struct hci_cb *cb)
3341{
3342 BT_DBG("%p name %s", cb, cb->name);
3343
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003344 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003346 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347
3348 return 0;
3349}
3350EXPORT_SYMBOL(hci_register_cb);
3351
3352int hci_unregister_cb(struct hci_cb *cb)
3353{
3354 BT_DBG("%p name %s", cb, cb->name);
3355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003356 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003358 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359
3360 return 0;
3361}
3362EXPORT_SYMBOL(hci_unregister_cb);
3363
Marcel Holtmann51086992013-10-10 14:54:19 -07003364static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003366 int err;
3367
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003368 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003370 /* Time stamp */
3371 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003373 /* Send copy to monitor */
3374 hci_send_to_monitor(hdev, skb);
3375
3376 if (atomic_read(&hdev->promisc)) {
3377 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003378 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 }
3380
3381 /* Get rid of skb owner, prior to sending to the driver. */
3382 skb_orphan(skb);
3383
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003384 err = hdev->send(hdev, skb);
3385 if (err < 0) {
3386 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3387 kfree_skb(skb);
3388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389}
3390
Marcel Holtmann899de762014-07-11 05:51:58 +02003391bool hci_req_pending(struct hci_dev *hdev)
3392{
3393 return (hdev->req_status == HCI_REQ_PEND);
3394}
3395
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003396/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003397int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3398 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003399{
3400 struct sk_buff *skb;
3401
3402 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3403
3404 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3405 if (!skb) {
3406 BT_ERR("%s no memory for command", hdev->name);
3407 return -ENOMEM;
3408 }
3409
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003410 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003411 * single-command requests.
3412 */
3413 bt_cb(skb)->req.start = true;
3414
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003416 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
3418 return 0;
3419}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420
3421/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003422void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423{
3424 struct hci_command_hdr *hdr;
3425
3426 if (!hdev->sent_cmd)
3427 return NULL;
3428
3429 hdr = (void *) hdev->sent_cmd->data;
3430
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003431 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 return NULL;
3433
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003434 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435
3436 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3437}
3438
3439/* Send ACL data */
3440static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3441{
3442 struct hci_acl_hdr *hdr;
3443 int len = skb->len;
3444
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003445 skb_push(skb, HCI_ACL_HDR_SIZE);
3446 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003447 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003448 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3449 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450}
3451
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003452static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003453 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003455 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 struct hci_dev *hdev = conn->hdev;
3457 struct sk_buff *list;
3458
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003459 skb->len = skb_headlen(skb);
3460 skb->data_len = 0;
3461
3462 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003463
3464 switch (hdev->dev_type) {
3465 case HCI_BREDR:
3466 hci_add_acl_hdr(skb, conn->handle, flags);
3467 break;
3468 case HCI_AMP:
3469 hci_add_acl_hdr(skb, chan->handle, flags);
3470 break;
3471 default:
3472 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3473 return;
3474 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003475
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003476 list = skb_shinfo(skb)->frag_list;
3477 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 /* Non fragmented */
3479 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3480
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003481 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 } else {
3483 /* Fragmented */
3484 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3485
3486 skb_shinfo(skb)->frag_list = NULL;
3487
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003488 /* Queue all fragments atomically. We need to use spin_lock_bh
3489 * here because of 6LoWPAN links, as there this function is
3490 * called from softirq and using normal spin lock could cause
3491 * deadlocks.
3492 */
3493 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003495 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003496
3497 flags &= ~ACL_START;
3498 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 do {
3500 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003501
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003502 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003503 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504
3505 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3506
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003507 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 } while (list);
3509
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003510 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003512}
3513
3514void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3515{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003516 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003517
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003518 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003519
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003520 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003522 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524
3525/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003526void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527{
3528 struct hci_dev *hdev = conn->hdev;
3529 struct hci_sco_hdr hdr;
3530
3531 BT_DBG("%s len %d", hdev->name, skb->len);
3532
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003533 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 hdr.dlen = skb->len;
3535
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003536 skb_push(skb, HCI_SCO_HDR_SIZE);
3537 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003538 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003540 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003541
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003543 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545
3546/* ---- HCI TX task (outgoing data) ---- */
3547
3548/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003549static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3550 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551{
3552 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003553 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003554 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003556 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003558
3559 rcu_read_lock();
3560
3561 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003562 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003564
3565 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3566 continue;
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 num++;
3569
3570 if (c->sent < min) {
3571 min = c->sent;
3572 conn = c;
3573 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003574
3575 if (hci_conn_num(hdev, type) == num)
3576 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 }
3578
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003579 rcu_read_unlock();
3580
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003582 int cnt, q;
3583
3584 switch (conn->type) {
3585 case ACL_LINK:
3586 cnt = hdev->acl_cnt;
3587 break;
3588 case SCO_LINK:
3589 case ESCO_LINK:
3590 cnt = hdev->sco_cnt;
3591 break;
3592 case LE_LINK:
3593 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3594 break;
3595 default:
3596 cnt = 0;
3597 BT_ERR("Unknown link type");
3598 }
3599
3600 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 *quote = q ? q : 1;
3602 } else
3603 *quote = 0;
3604
3605 BT_DBG("conn %p quote %d", conn, *quote);
3606 return conn;
3607}
3608
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003609static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610{
3611 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003612 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Ville Tervobae1f5d92011-02-10 22:38:53 -03003614 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003616 rcu_read_lock();
3617
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003619 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003620 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003621 BT_ERR("%s killing stalled connection %pMR",
3622 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003623 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
3625 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003626
3627 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628}
3629
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003630static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3631 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003632{
3633 struct hci_conn_hash *h = &hdev->conn_hash;
3634 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003635 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003636 struct hci_conn *conn;
3637 int cnt, q, conn_num = 0;
3638
3639 BT_DBG("%s", hdev->name);
3640
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003641 rcu_read_lock();
3642
3643 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003644 struct hci_chan *tmp;
3645
3646 if (conn->type != type)
3647 continue;
3648
3649 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3650 continue;
3651
3652 conn_num++;
3653
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003654 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003655 struct sk_buff *skb;
3656
3657 if (skb_queue_empty(&tmp->data_q))
3658 continue;
3659
3660 skb = skb_peek(&tmp->data_q);
3661 if (skb->priority < cur_prio)
3662 continue;
3663
3664 if (skb->priority > cur_prio) {
3665 num = 0;
3666 min = ~0;
3667 cur_prio = skb->priority;
3668 }
3669
3670 num++;
3671
3672 if (conn->sent < min) {
3673 min = conn->sent;
3674 chan = tmp;
3675 }
3676 }
3677
3678 if (hci_conn_num(hdev, type) == conn_num)
3679 break;
3680 }
3681
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003682 rcu_read_unlock();
3683
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003684 if (!chan)
3685 return NULL;
3686
3687 switch (chan->conn->type) {
3688 case ACL_LINK:
3689 cnt = hdev->acl_cnt;
3690 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003691 case AMP_LINK:
3692 cnt = hdev->block_cnt;
3693 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003694 case SCO_LINK:
3695 case ESCO_LINK:
3696 cnt = hdev->sco_cnt;
3697 break;
3698 case LE_LINK:
3699 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3700 break;
3701 default:
3702 cnt = 0;
3703 BT_ERR("Unknown link type");
3704 }
3705
3706 q = cnt / num;
3707 *quote = q ? q : 1;
3708 BT_DBG("chan %p quote %d", chan, *quote);
3709 return chan;
3710}
3711
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003712static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3713{
3714 struct hci_conn_hash *h = &hdev->conn_hash;
3715 struct hci_conn *conn;
3716 int num = 0;
3717
3718 BT_DBG("%s", hdev->name);
3719
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003720 rcu_read_lock();
3721
3722 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003723 struct hci_chan *chan;
3724
3725 if (conn->type != type)
3726 continue;
3727
3728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3729 continue;
3730
3731 num++;
3732
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003733 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003734 struct sk_buff *skb;
3735
3736 if (chan->sent) {
3737 chan->sent = 0;
3738 continue;
3739 }
3740
3741 if (skb_queue_empty(&chan->data_q))
3742 continue;
3743
3744 skb = skb_peek(&chan->data_q);
3745 if (skb->priority >= HCI_PRIO_MAX - 1)
3746 continue;
3747
3748 skb->priority = HCI_PRIO_MAX - 1;
3749
3750 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003751 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003752 }
3753
3754 if (hci_conn_num(hdev, type) == num)
3755 break;
3756 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003757
3758 rcu_read_unlock();
3759
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003760}
3761
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003762static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3763{
3764 /* Calculate count of blocks used by this packet */
3765 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3766}
3767
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003768static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003770 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 /* ACL tx timeout must be longer than maximum
3772 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003773 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003774 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003775 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003777}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003779static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003780{
3781 unsigned int cnt = hdev->acl_cnt;
3782 struct hci_chan *chan;
3783 struct sk_buff *skb;
3784 int quote;
3785
3786 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003787
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003788 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003789 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003790 u32 priority = (skb_peek(&chan->data_q))->priority;
3791 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003792 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003793 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003794
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003795 /* Stop if priority has changed */
3796 if (skb->priority < priority)
3797 break;
3798
3799 skb = skb_dequeue(&chan->data_q);
3800
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003801 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003802 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003803
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003804 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 hdev->acl_last_tx = jiffies;
3806
3807 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003808 chan->sent++;
3809 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 }
3811 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003812
3813 if (cnt != hdev->acl_cnt)
3814 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815}
3816
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003817static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003818{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003819 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003820 struct hci_chan *chan;
3821 struct sk_buff *skb;
3822 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003823 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003824
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003825 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003826
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003827 BT_DBG("%s", hdev->name);
3828
3829 if (hdev->dev_type == HCI_AMP)
3830 type = AMP_LINK;
3831 else
3832 type = ACL_LINK;
3833
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003834 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003835 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003836 u32 priority = (skb_peek(&chan->data_q))->priority;
3837 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3838 int blocks;
3839
3840 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003841 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003842
3843 /* Stop if priority has changed */
3844 if (skb->priority < priority)
3845 break;
3846
3847 skb = skb_dequeue(&chan->data_q);
3848
3849 blocks = __get_blocks(hdev, skb);
3850 if (blocks > hdev->block_cnt)
3851 return;
3852
3853 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003854 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003855
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003856 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003857 hdev->acl_last_tx = jiffies;
3858
3859 hdev->block_cnt -= blocks;
3860 quote -= blocks;
3861
3862 chan->sent += blocks;
3863 chan->conn->sent += blocks;
3864 }
3865 }
3866
3867 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003868 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003869}
3870
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003871static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003872{
3873 BT_DBG("%s", hdev->name);
3874
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003875 /* No ACL link over BR/EDR controller */
3876 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3877 return;
3878
3879 /* No AMP link over AMP controller */
3880 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003881 return;
3882
3883 switch (hdev->flow_ctl_mode) {
3884 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3885 hci_sched_acl_pkt(hdev);
3886 break;
3887
3888 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3889 hci_sched_acl_blk(hdev);
3890 break;
3891 }
3892}
3893
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003895static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896{
3897 struct hci_conn *conn;
3898 struct sk_buff *skb;
3899 int quote;
3900
3901 BT_DBG("%s", hdev->name);
3902
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003903 if (!hci_conn_num(hdev, SCO_LINK))
3904 return;
3905
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3907 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3908 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003909 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910
3911 conn->sent++;
3912 if (conn->sent == ~0)
3913 conn->sent = 0;
3914 }
3915 }
3916}
3917
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003918static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003919{
3920 struct hci_conn *conn;
3921 struct sk_buff *skb;
3922 int quote;
3923
3924 BT_DBG("%s", hdev->name);
3925
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003926 if (!hci_conn_num(hdev, ESCO_LINK))
3927 return;
3928
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003929 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3930 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003931 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3932 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003933 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003934
3935 conn->sent++;
3936 if (conn->sent == ~0)
3937 conn->sent = 0;
3938 }
3939 }
3940}
3941
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003942static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003943{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003944 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003945 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003946 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003947
3948 BT_DBG("%s", hdev->name);
3949
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003950 if (!hci_conn_num(hdev, LE_LINK))
3951 return;
3952
Marcel Holtmann4a964402014-07-02 19:10:33 +02003953 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003954 /* LE tx timeout must be longer than maximum
3955 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003956 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003957 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003958 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003959 }
3960
3961 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003962 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003963 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003964 u32 priority = (skb_peek(&chan->data_q))->priority;
3965 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003967 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003968
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003969 /* Stop if priority has changed */
3970 if (skb->priority < priority)
3971 break;
3972
3973 skb = skb_dequeue(&chan->data_q);
3974
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003975 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003976 hdev->le_last_tx = jiffies;
3977
3978 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003979 chan->sent++;
3980 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003981 }
3982 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003983
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003984 if (hdev->le_pkts)
3985 hdev->le_cnt = cnt;
3986 else
3987 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003988
3989 if (cnt != tmp)
3990 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003991}
3992
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003993static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003995 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996 struct sk_buff *skb;
3997
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003998 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003999 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000
Marcel Holtmann52de5992013-09-03 18:08:38 -07004001 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4002 /* Schedule queues and send stuff to HCI driver */
4003 hci_sched_acl(hdev);
4004 hci_sched_sco(hdev);
4005 hci_sched_esco(hdev);
4006 hci_sched_le(hdev);
4007 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004008
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 /* Send next queued raw (unknown type) packet */
4010 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004011 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012}
4013
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004014/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015
4016/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004017static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
4019 struct hci_acl_hdr *hdr = (void *) skb->data;
4020 struct hci_conn *conn;
4021 __u16 handle, flags;
4022
4023 skb_pull(skb, HCI_ACL_HDR_SIZE);
4024
4025 handle = __le16_to_cpu(hdr->handle);
4026 flags = hci_flags(handle);
4027 handle = hci_handle(handle);
4028
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004029 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004030 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031
4032 hdev->stat.acl_rx++;
4033
4034 hci_dev_lock(hdev);
4035 conn = hci_conn_hash_lookup_handle(hdev, handle);
4036 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004037
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004039 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004040
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004042 l2cap_recv_acldata(conn, skb, flags);
4043 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004045 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 }
4048
4049 kfree_skb(skb);
4050}
4051
4052/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004053static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054{
4055 struct hci_sco_hdr *hdr = (void *) skb->data;
4056 struct hci_conn *conn;
4057 __u16 handle;
4058
4059 skb_pull(skb, HCI_SCO_HDR_SIZE);
4060
4061 handle = __le16_to_cpu(hdr->handle);
4062
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004063 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064
4065 hdev->stat.sco_rx++;
4066
4067 hci_dev_lock(hdev);
4068 conn = hci_conn_hash_lookup_handle(hdev, handle);
4069 hci_dev_unlock(hdev);
4070
4071 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004073 sco_recv_scodata(conn, skb);
4074 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004076 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004077 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 }
4079
4080 kfree_skb(skb);
4081}
4082
Johan Hedberg9238f362013-03-05 20:37:48 +02004083static bool hci_req_is_complete(struct hci_dev *hdev)
4084{
4085 struct sk_buff *skb;
4086
4087 skb = skb_peek(&hdev->cmd_q);
4088 if (!skb)
4089 return true;
4090
4091 return bt_cb(skb)->req.start;
4092}
4093
Johan Hedberg42c6b122013-03-05 20:37:49 +02004094static void hci_resend_last(struct hci_dev *hdev)
4095{
4096 struct hci_command_hdr *sent;
4097 struct sk_buff *skb;
4098 u16 opcode;
4099
4100 if (!hdev->sent_cmd)
4101 return;
4102
4103 sent = (void *) hdev->sent_cmd->data;
4104 opcode = __le16_to_cpu(sent->opcode);
4105 if (opcode == HCI_OP_RESET)
4106 return;
4107
4108 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4109 if (!skb)
4110 return;
4111
4112 skb_queue_head(&hdev->cmd_q, skb);
4113 queue_work(hdev->workqueue, &hdev->cmd_work);
4114}
4115
Johan Hedberg9238f362013-03-05 20:37:48 +02004116void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4117{
4118 hci_req_complete_t req_complete = NULL;
4119 struct sk_buff *skb;
4120 unsigned long flags;
4121
4122 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4123
Johan Hedberg42c6b122013-03-05 20:37:49 +02004124 /* If the completed command doesn't match the last one that was
4125 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004126 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004127 if (!hci_sent_cmd_data(hdev, opcode)) {
4128 /* Some CSR based controllers generate a spontaneous
4129 * reset complete event during init and any pending
4130 * command will never be completed. In such a case we
4131 * need to resend whatever was the last sent
4132 * command.
4133 */
4134 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4135 hci_resend_last(hdev);
4136
Johan Hedberg9238f362013-03-05 20:37:48 +02004137 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004138 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004139
4140 /* If the command succeeded and there's still more commands in
4141 * this request the request is not yet complete.
4142 */
4143 if (!status && !hci_req_is_complete(hdev))
4144 return;
4145
4146 /* If this was the last command in a request the complete
4147 * callback would be found in hdev->sent_cmd instead of the
4148 * command queue (hdev->cmd_q).
4149 */
4150 if (hdev->sent_cmd) {
4151 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004152
4153 if (req_complete) {
4154 /* We must set the complete callback to NULL to
4155 * avoid calling the callback more than once if
4156 * this function gets called again.
4157 */
4158 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4159
Johan Hedberg9238f362013-03-05 20:37:48 +02004160 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004161 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004162 }
4163
4164 /* Remove all pending commands belonging to this request */
4165 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4166 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4167 if (bt_cb(skb)->req.start) {
4168 __skb_queue_head(&hdev->cmd_q, skb);
4169 break;
4170 }
4171
4172 req_complete = bt_cb(skb)->req.complete;
4173 kfree_skb(skb);
4174 }
4175 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4176
4177call_complete:
4178 if (req_complete)
4179 req_complete(hdev, status);
4180}
4181
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004182static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004184 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 struct sk_buff *skb;
4186
4187 BT_DBG("%s", hdev->name);
4188
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004190 /* Send copy to monitor */
4191 hci_send_to_monitor(hdev, skb);
4192
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 if (atomic_read(&hdev->promisc)) {
4194 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004195 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 }
4197
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004198 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 kfree_skb(skb);
4200 continue;
4201 }
4202
4203 if (test_bit(HCI_INIT, &hdev->flags)) {
4204 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004205 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 case HCI_ACLDATA_PKT:
4207 case HCI_SCODATA_PKT:
4208 kfree_skb(skb);
4209 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 }
4212
4213 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004214 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004216 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 hci_event_packet(hdev, skb);
4218 break;
4219
4220 case HCI_ACLDATA_PKT:
4221 BT_DBG("%s ACL data packet", hdev->name);
4222 hci_acldata_packet(hdev, skb);
4223 break;
4224
4225 case HCI_SCODATA_PKT:
4226 BT_DBG("%s SCO data packet", hdev->name);
4227 hci_scodata_packet(hdev, skb);
4228 break;
4229
4230 default:
4231 kfree_skb(skb);
4232 break;
4233 }
4234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235}
4236
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004237static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004239 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 struct sk_buff *skb;
4241
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004242 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4243 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004246 if (atomic_read(&hdev->cmd_cnt)) {
4247 skb = skb_dequeue(&hdev->cmd_q);
4248 if (!skb)
4249 return;
4250
Wei Yongjun7585b972009-02-25 18:29:52 +08004251 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004253 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004254 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004256 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004257 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004258 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004259 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004260 schedule_delayed_work(&hdev->cmd_timer,
4261 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 } else {
4263 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004264 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 }
4266 }
4267}