blob: ba0d1fdccbd9f27899c604a0760aebdbf63ade8f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Marcel Holtmann1904a852015-01-11 13:50:44 -0800144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 struct hci_dev *hdev = req->hdev;
503
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506 * command otherwise.
507 */
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
512 */
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
515
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700522 } else {
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200532
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
536 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537 }
538
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
544
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
550
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
568 */
569 }
570
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575}
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 struct hci_dev *hdev = req->hdev;
580
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300583 else
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585
586 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
591 *
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300596 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600
601 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
607 */
608 hdev->max_page = 0x01;
609
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614 } else {
615 struct hci_cp_write_eir cp;
616
617 memset(hdev->eir, 0, sizeof(hdev->eir));
618 memset(&cp, 0, sizeof(cp));
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621 }
622 }
623
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800624 if (lmp_inq_rssi_capable(hdev) ||
625 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800626 u8 mode;
627
628 /* If Extended Inquiry Result events are supported, then
629 * they are clearly preferred over Inquiry Result with RSSI
630 * events.
631 */
632 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
633
634 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
635 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200636
637 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639
640 if (lmp_ext_feat_capable(hdev)) {
641 struct hci_cp_read_local_ext_features cp;
642
643 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
645 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200646 }
647
648 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
649 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
651 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200652 }
653}
654
Johan Hedberg42c6b122013-03-05 20:37:49 +0200655static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200656{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658 struct hci_cp_write_def_link_policy cp;
659 u16 link_policy = 0;
660
661 if (lmp_rswitch_capable(hdev))
662 link_policy |= HCI_LP_RSWITCH;
663 if (lmp_hold_capable(hdev))
664 link_policy |= HCI_LP_HOLD;
665 if (lmp_sniff_capable(hdev))
666 link_policy |= HCI_LP_SNIFF;
667 if (lmp_park_capable(hdev))
668 link_policy |= HCI_LP_PARK;
669
670 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672}
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677 struct hci_cp_write_le_host_supported cp;
678
Johan Hedbergc73eee92013-04-19 18:35:21 +0300679 /* LE-only devices do not support explicit enablement */
680 if (!lmp_bredr_capable(hdev))
681 return;
682
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683 memset(&cp, 0, sizeof(cp));
684
685 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
686 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200687 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 }
689
690 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
692 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200693}
694
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300695static void hci_set_event_mask_page_2(struct hci_request *req)
696{
697 struct hci_dev *hdev = req->hdev;
698 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
699
700 /* If Connectionless Slave Broadcast master role is supported
701 * enable all necessary events for it.
702 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800703 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300704 events[1] |= 0x40; /* Triggered Clock Capture */
705 events[1] |= 0x80; /* Synchronization Train Complete */
706 events[2] |= 0x10; /* Slave Page Response Timeout */
707 events[2] |= 0x20; /* CSB Channel Map Change */
708 }
709
710 /* If Connectionless Slave Broadcast slave role is supported
711 * enable all necessary events for it.
712 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800713 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300714 events[2] |= 0x01; /* Synchronization Train Received */
715 events[2] |= 0x02; /* CSB Receive */
716 events[2] |= 0x04; /* CSB Timeout */
717 events[2] |= 0x08; /* Truncated Page Complete */
718 }
719
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800720 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200721 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800722 events[2] |= 0x80;
723
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300724 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
725}
726
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200728{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200729 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300730 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200731
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200732 hci_setup_event_mask(req);
733
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100734 /* Some Broadcom based Bluetooth controllers do not support the
735 * Delete Stored Link Key command. They are clearly indicating its
736 * absence in the bit mask of supported commands.
737 *
738 * Check the supported commands and only if the the command is marked
739 * as supported send it. If not supported assume that the controller
740 * does not have actual support for stored link keys which makes this
741 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800742 *
743 * Some controllers indicate that they support handling deleting
744 * stored link keys, but they don't. The quirk lets a driver
745 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700746 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800747 if (hdev->commands[6] & 0x80 &&
748 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +0300749 struct hci_cp_delete_stored_link_key cp;
750
751 bacpy(&cp.bdaddr, BDADDR_ANY);
752 cp.delete_all = 0x01;
753 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
754 sizeof(cp), &cp);
755 }
756
Johan Hedberg2177bab2013-03-05 20:37:43 +0200757 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200758 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200759
Marcel Holtmann417287d2014-12-11 20:21:54 +0100760 if (hdev->commands[8] & 0x01)
761 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
762
763 /* Some older Broadcom based Bluetooth 1.2 controllers do not
764 * support the Read Page Scan Type command. Check support for
765 * this command in the bit mask of supported commands.
766 */
767 if (hdev->commands[13] & 0x01)
768 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
769
Andre Guedes9193c6e2014-07-01 18:10:09 -0300770 if (lmp_le_capable(hdev)) {
771 u8 events[8];
772
773 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200774 events[0] = 0x0f;
775
776 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
777 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300778
779 /* If controller supports the Connection Parameters Request
780 * Link Layer Procedure, enable the corresponding event.
781 */
782 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
783 events[0] |= 0x20; /* LE Remote Connection
784 * Parameter Request
785 */
786
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100787 /* If the controller supports the Data Length Extension
788 * feature, enable the corresponding event.
789 */
790 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
791 events[0] |= 0x40; /* LE Data Length Change */
792
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100793 /* If the controller supports Extended Scanner Filter
794 * Policies, enable the correspondig event.
795 */
796 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
797 events[1] |= 0x04; /* LE Direct Advertising
798 * Report
799 */
800
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100801 /* If the controller supports the LE Read Local P-256
802 * Public Key command, enable the corresponding event.
803 */
804 if (hdev->commands[34] & 0x02)
805 events[0] |= 0x80; /* LE Read Local P-256
806 * Public Key Complete
807 */
808
809 /* If the controller supports the LE Generate DHKey
810 * command, enable the corresponding event.
811 */
812 if (hdev->commands[34] & 0x04)
813 events[1] |= 0x01; /* LE Generate DHKey Complete */
814
Andre Guedes9193c6e2014-07-01 18:10:09 -0300815 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
816 events);
817
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200818 if (hdev->commands[25] & 0x40) {
819 /* Read LE Advertising Channel TX Power */
820 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
821 }
822
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100823 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
824 /* Read LE Maximum Data Length */
825 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
826
827 /* Read LE Suggested Default Data Length */
828 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
829 }
830
Johan Hedberg42c6b122013-03-05 20:37:49 +0200831 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300832 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300833
834 /* Read features beyond page 1 if available */
835 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
836 struct hci_cp_read_local_ext_features cp;
837
838 cp.page = p;
839 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
840 sizeof(cp), &cp);
841 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200842}
843
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300844static void hci_init4_req(struct hci_request *req, unsigned long opt)
845{
846 struct hci_dev *hdev = req->hdev;
847
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300848 /* Set event mask page 2 if the HCI command for it is supported */
849 if (hdev->commands[22] & 0x04)
850 hci_set_event_mask_page_2(req);
851
Marcel Holtmann109e3192014-07-23 19:24:56 +0200852 /* Read local codec list if the HCI command is supported */
853 if (hdev->commands[29] & 0x20)
854 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
855
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200856 /* Get MWS transport configuration if the HCI command is supported */
857 if (hdev->commands[30] & 0x08)
858 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
859
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300860 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800861 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300862 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800863
864 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +0300865 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800866 u8 support = 0x01;
867 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
868 sizeof(support), &support);
869 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300870}
871
Johan Hedberg2177bab2013-03-05 20:37:43 +0200872static int __hci_init(struct hci_dev *hdev)
873{
874 int err;
875
876 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700880 /* The Device Under Test (DUT) mode is special and available for
881 * all controller types. So just create it early on.
882 */
883 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
884 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
885 &dut_mode_fops);
886 }
887
Johan Hedberg2177bab2013-03-05 20:37:43 +0200888 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
889 * BR/EDR/LE type controllers. AMP controllers only need the
890 * first stage init.
891 */
892 if (hdev->dev_type != HCI_BREDR)
893 return 0;
894
895 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
896 if (err < 0)
897 return err;
898
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300899 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
900 if (err < 0)
901 return err;
902
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700903 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
904 if (err < 0)
905 return err;
906
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800907 /* This function is only called when the controller is actually in
908 * configured state. When the controller is marked as unconfigured,
909 * this initialization procedure is not run.
910 *
911 * It means that it is possible that a controller runs through its
912 * setup phase and then discovers missing settings. If that is the
913 * case, then this function will not be called. It then will only
914 * be called during the config phase.
915 *
916 * So only when in setup phase or config phase, create the debugfs
917 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700918 */
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800919 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
920 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700921 return 0;
922
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100923 hci_debugfs_create_common(hdev);
924
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100925 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100926 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700927
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700928 if (lmp_le_capable(hdev)) {
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100929 hci_debugfs_create_le(hdev);
Johan Hedberg711eafe2014-08-08 09:32:52 +0300930 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700931 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700932
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700933 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200934}
935
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200936static void hci_init0_req(struct hci_request *req, unsigned long opt)
937{
938 struct hci_dev *hdev = req->hdev;
939
940 BT_DBG("%s %ld", hdev->name, opt);
941
942 /* Reset */
943 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
944 hci_reset_req(req, 0);
945
946 /* Read Local Version */
947 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
948
949 /* Read BD Address */
950 if (hdev->set_bdaddr)
951 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
952}
953
954static int __hci_unconf_init(struct hci_dev *hdev)
955{
956 int err;
957
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200958 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
959 return 0;
960
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200961 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
962 if (err < 0)
963 return err;
964
965 return 0;
966}
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
970 __u8 scan = opt;
971
Johan Hedberg42c6b122013-03-05 20:37:49 +0200972 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979{
980 __u8 auth = opt;
981
Johan Hedberg42c6b122013-03-05 20:37:49 +0200982 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
984 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986}
987
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989{
990 __u8 encrypt = opt;
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200994 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997
Johan Hedberg42c6b122013-03-05 20:37:49 +0200998static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200999{
1000 __le16 policy = cpu_to_le16(opt);
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001003
1004 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001006}
1007
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001008/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 * Device is held on return. */
1010struct hci_dev *hci_dev_get(int index)
1011{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001012 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 BT_DBG("%d", index);
1015
1016 if (index < 0)
1017 return NULL;
1018
1019 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001020 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 if (d->id == index) {
1022 hdev = hci_dev_hold(d);
1023 break;
1024 }
1025 }
1026 read_unlock(&hci_dev_list_lock);
1027 return hdev;
1028}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
1030/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001031
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001032bool hci_discovery_active(struct hci_dev *hdev)
1033{
1034 struct discovery_state *discov = &hdev->discovery;
1035
Andre Guedes6fbe1952012-02-03 17:47:58 -03001036 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001037 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001038 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001039 return true;
1040
Andre Guedes6fbe1952012-02-03 17:47:58 -03001041 default:
1042 return false;
1043 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001044}
1045
Johan Hedbergff9ef572012-01-04 14:23:45 +02001046void hci_discovery_set_state(struct hci_dev *hdev, int state)
1047{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001048 int old_state = hdev->discovery.state;
1049
Johan Hedbergff9ef572012-01-04 14:23:45 +02001050 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1051
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001052 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001053 return;
1054
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001055 hdev->discovery.state = state;
1056
Johan Hedbergff9ef572012-01-04 14:23:45 +02001057 switch (state) {
1058 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001059 hci_update_background_scan(hdev);
1060
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001061 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001062 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001063 break;
1064 case DISCOVERY_STARTING:
1065 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001066 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001067 mgmt_discovering(hdev, 1);
1068 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001069 case DISCOVERY_RESOLVING:
1070 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001071 case DISCOVERY_STOPPING:
1072 break;
1073 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001074}
1075
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001076void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077{
Johan Hedberg30883512012-01-04 14:16:21 +02001078 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001079 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Johan Hedberg561aafb2012-01-04 13:31:59 +02001081 list_for_each_entry_safe(p, n, &cache->all, all) {
1082 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001083 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001085
1086 INIT_LIST_HEAD(&cache->unknown);
1087 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
1089
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001090struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1091 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092{
Johan Hedberg30883512012-01-04 14:16:21 +02001093 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 struct inquiry_entry *e;
1095
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001096 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Johan Hedberg561aafb2012-01-04 13:31:59 +02001098 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001100 return e;
1101 }
1102
1103 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104}
1105
Johan Hedberg561aafb2012-01-04 13:31:59 +02001106struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001107 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001108{
Johan Hedberg30883512012-01-04 14:16:21 +02001109 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001110 struct inquiry_entry *e;
1111
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001112 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001113
1114 list_for_each_entry(e, &cache->unknown, list) {
1115 if (!bacmp(&e->data.bdaddr, bdaddr))
1116 return e;
1117 }
1118
1119 return NULL;
1120}
1121
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001122struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001123 bdaddr_t *bdaddr,
1124 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001125{
1126 struct discovery_state *cache = &hdev->discovery;
1127 struct inquiry_entry *e;
1128
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001129 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001130
1131 list_for_each_entry(e, &cache->resolve, list) {
1132 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1133 return e;
1134 if (!bacmp(&e->data.bdaddr, bdaddr))
1135 return e;
1136 }
1137
1138 return NULL;
1139}
1140
Johan Hedberga3d4e202012-01-09 00:53:02 +02001141void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001142 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001143{
1144 struct discovery_state *cache = &hdev->discovery;
1145 struct list_head *pos = &cache->resolve;
1146 struct inquiry_entry *p;
1147
1148 list_del(&ie->list);
1149
1150 list_for_each_entry(p, &cache->resolve, list) {
1151 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001152 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001153 break;
1154 pos = &p->list;
1155 }
1156
1157 list_add(&ie->list, pos);
1158}
1159
Marcel Holtmannaf589252014-07-01 14:11:20 +02001160u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1161 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162{
Johan Hedberg30883512012-01-04 14:16:21 +02001163 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001164 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001165 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001167 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Johan Hedberg6928a922014-10-26 20:46:09 +01001169 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001170
Marcel Holtmannaf589252014-07-01 14:11:20 +02001171 if (!data->ssp_mode)
1172 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001173
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001174 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001175 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001176 if (!ie->data.ssp_mode)
1177 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001178
Johan Hedberga3d4e202012-01-09 00:53:02 +02001179 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001180 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001181 ie->data.rssi = data->rssi;
1182 hci_inquiry_cache_update_resolve(hdev, ie);
1183 }
1184
Johan Hedberg561aafb2012-01-04 13:31:59 +02001185 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001186 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001187
Johan Hedberg561aafb2012-01-04 13:31:59 +02001188 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001189 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001190 if (!ie) {
1191 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192 goto done;
1193 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001194
1195 list_add(&ie->all, &cache->all);
1196
1197 if (name_known) {
1198 ie->name_state = NAME_KNOWN;
1199 } else {
1200 ie->name_state = NAME_NOT_KNOWN;
1201 list_add(&ie->list, &cache->unknown);
1202 }
1203
1204update:
1205 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001206 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001207 ie->name_state = NAME_KNOWN;
1208 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 }
1210
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001211 memcpy(&ie->data, data, sizeof(*data));
1212 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001214
1215 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001216 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001217
Marcel Holtmannaf589252014-07-01 14:11:20 +02001218done:
1219 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221
1222static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1223{
Johan Hedberg30883512012-01-04 14:16:21 +02001224 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 struct inquiry_info *info = (struct inquiry_info *) buf;
1226 struct inquiry_entry *e;
1227 int copied = 0;
1228
Johan Hedberg561aafb2012-01-04 13:31:59 +02001229 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001231
1232 if (copied >= num)
1233 break;
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 bacpy(&info->bdaddr, &data->bdaddr);
1236 info->pscan_rep_mode = data->pscan_rep_mode;
1237 info->pscan_period_mode = data->pscan_period_mode;
1238 info->pscan_mode = data->pscan_mode;
1239 memcpy(info->dev_class, data->dev_class, 3);
1240 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001241
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001243 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 }
1245
1246 BT_DBG("cache %p, copied %d", cache, copied);
1247 return copied;
1248}
1249
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251{
1252 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 struct hci_cp_inquiry cp;
1255
1256 BT_DBG("%s", hdev->name);
1257
1258 if (test_bit(HCI_INQUIRY, &hdev->flags))
1259 return;
1260
1261 /* Start Inquiry */
1262 memcpy(&cp.lap, &ir->lap, 3);
1263 cp.length = ir->length;
1264 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266}
1267
1268int hci_inquiry(void __user *arg)
1269{
1270 __u8 __user *ptr = arg;
1271 struct hci_inquiry_req ir;
1272 struct hci_dev *hdev;
1273 int err = 0, do_inquiry = 0, max_rsp;
1274 long timeo;
1275 __u8 *buf;
1276
1277 if (copy_from_user(&ir, ptr, sizeof(ir)))
1278 return -EFAULT;
1279
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001280 hdev = hci_dev_get(ir.dev_id);
1281 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 return -ENODEV;
1283
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001284 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1285 err = -EBUSY;
1286 goto done;
1287 }
1288
Marcel Holtmann4a964402014-07-02 19:10:33 +02001289 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001290 err = -EOPNOTSUPP;
1291 goto done;
1292 }
1293
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001294 if (hdev->dev_type != HCI_BREDR) {
1295 err = -EOPNOTSUPP;
1296 goto done;
1297 }
1298
Johan Hedberg56f87902013-10-02 13:43:13 +03001299 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1300 err = -EOPNOTSUPP;
1301 goto done;
1302 }
1303
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001304 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001305 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001306 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001307 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 do_inquiry = 1;
1309 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001310 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Marcel Holtmann04837f62006-07-03 10:02:33 +02001312 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001313
1314 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001315 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1316 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001317 if (err < 0)
1318 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001319
1320 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1321 * cleared). If it is interrupted by a signal, return -EINTR.
1322 */
NeilBrown74316202014-07-07 15:16:04 +10001323 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001324 TASK_INTERRUPTIBLE))
1325 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001328 /* for unlimited number of responses we will use buffer with
1329 * 255 entries
1330 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1332
1333 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1334 * copy it to the user space.
1335 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001336 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001337 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 err = -ENOMEM;
1339 goto done;
1340 }
1341
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001342 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001344 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
1346 BT_DBG("num_rsp %d", ir.num_rsp);
1347
1348 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1349 ptr += sizeof(ir);
1350 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001351 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001353 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 err = -EFAULT;
1355
1356 kfree(buf);
1357
1358done:
1359 hci_dev_put(hdev);
1360 return err;
1361}
1362
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001363static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 int ret = 0;
1366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 BT_DBG("%s %p", hdev->name, hdev);
1368
1369 hci_req_lock(hdev);
1370
Johan Hovold94324962012-03-15 14:48:41 +01001371 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1372 ret = -ENODEV;
1373 goto done;
1374 }
1375
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001376 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1377 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001378 /* Check for rfkill but allow the HCI setup stage to
1379 * proceed (which in itself doesn't cause any RF activity).
1380 */
1381 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1382 ret = -ERFKILL;
1383 goto done;
1384 }
1385
1386 /* Check for valid public address or a configured static
1387 * random adddress, but let the HCI setup proceed to
1388 * be able to determine if there is a public address
1389 * or not.
1390 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001391 * In case of user channel usage, it is not important
1392 * if a public address or static random address is
1393 * available.
1394 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001395 * This check is only valid for BR/EDR controllers
1396 * since AMP controllers do not have an address.
1397 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001398 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1399 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001400 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1401 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1402 ret = -EADDRNOTAVAIL;
1403 goto done;
1404 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001405 }
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 if (test_bit(HCI_UP, &hdev->flags)) {
1408 ret = -EALREADY;
1409 goto done;
1410 }
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 if (hdev->open(hdev)) {
1413 ret = -EIO;
1414 goto done;
1415 }
1416
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001417 atomic_set(&hdev->cmd_cnt, 1);
1418 set_bit(HCI_INIT, &hdev->flags);
1419
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001420 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1421 if (hdev->setup)
1422 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001423
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001424 /* The transport driver can set these quirks before
1425 * creating the HCI device or in its setup callback.
1426 *
1427 * In case any of them is set, the controller has to
1428 * start up as unconfigured.
1429 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001430 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1431 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001432 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001433
1434 /* For an unconfigured controller it is required to
1435 * read at least the version information provided by
1436 * the Read Local Version Information command.
1437 *
1438 * If the set_bdaddr driver callback is provided, then
1439 * also the original Bluetooth public device address
1440 * will be read using the Read BD Address command.
1441 */
1442 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1443 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001444 }
1445
Marcel Holtmann9713c172014-07-06 12:11:15 +02001446 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1447 /* If public address change is configured, ensure that
1448 * the address gets programmed. If the driver does not
1449 * support changing the public address, fail the power
1450 * on procedure.
1451 */
1452 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1453 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001454 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1455 else
1456 ret = -EADDRNOTAVAIL;
1457 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001458
1459 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001460 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001461 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001462 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 }
1464
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001465 clear_bit(HCI_INIT, &hdev->flags);
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 if (!ret) {
1468 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001469 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 set_bit(HCI_UP, &hdev->flags);
1471 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001472 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001473 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001474 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001475 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001476 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001477 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001478 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001479 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001480 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001481 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001483 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001484 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001485 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
1487 skb_queue_purge(&hdev->cmd_q);
1488 skb_queue_purge(&hdev->rx_q);
1489
1490 if (hdev->flush)
1491 hdev->flush(hdev);
1492
1493 if (hdev->sent_cmd) {
1494 kfree_skb(hdev->sent_cmd);
1495 hdev->sent_cmd = NULL;
1496 }
1497
1498 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001499 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 }
1501
1502done:
1503 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 return ret;
1505}
1506
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001507/* ---- HCI ioctl helpers ---- */
1508
1509int hci_dev_open(__u16 dev)
1510{
1511 struct hci_dev *hdev;
1512 int err;
1513
1514 hdev = hci_dev_get(dev);
1515 if (!hdev)
1516 return -ENODEV;
1517
Marcel Holtmann4a964402014-07-02 19:10:33 +02001518 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001519 * up as user channel. Trying to bring them up as normal devices
1520 * will result into a failure. Only user channel operation is
1521 * possible.
1522 *
1523 * When this function is called for a user channel, the flag
1524 * HCI_USER_CHANNEL will be set first before attempting to
1525 * open the device.
1526 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001527 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001528 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1529 err = -EOPNOTSUPP;
1530 goto done;
1531 }
1532
Johan Hedberge1d08f42013-10-01 22:44:50 +03001533 /* We need to ensure that no other power on/off work is pending
1534 * before proceeding to call hci_dev_do_open. This is
1535 * particularly important if the setup procedure has not yet
1536 * completed.
1537 */
1538 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1539 cancel_delayed_work(&hdev->power_off);
1540
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001541 /* After this call it is guaranteed that the setup procedure
1542 * has finished. This means that error conditions like RFKILL
1543 * or no valid public or static random address apply.
1544 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001545 flush_workqueue(hdev->req_workqueue);
1546
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001547 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001548 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001549 * so that pairing works for them. Once the management interface
1550 * is in use this bit will be cleared again and userspace has
1551 * to explicitly enable it.
1552 */
1553 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1554 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001555 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001556
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001557 err = hci_dev_do_open(hdev);
1558
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001559done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001560 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001561 return err;
1562}
1563
Johan Hedbergd7347f32014-07-04 12:37:23 +03001564/* This function requires the caller holds hdev->lock */
1565static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1566{
1567 struct hci_conn_params *p;
1568
Johan Hedbergf161dd42014-08-15 21:06:54 +03001569 list_for_each_entry(p, &hdev->le_conn_params, list) {
1570 if (p->conn) {
1571 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001572 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001573 p->conn = NULL;
1574 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001575 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001576 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001577
1578 BT_DBG("All LE pending actions cleared");
1579}
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581static int hci_dev_do_close(struct hci_dev *hdev)
1582{
1583 BT_DBG("%s %p", hdev->name, hdev);
1584
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001585 cancel_delayed_work(&hdev->power_off);
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 hci_req_cancel(hdev, ENODEV);
1588 hci_req_lock(hdev);
1589
1590 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001591 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 hci_req_unlock(hdev);
1593 return 0;
1594 }
1595
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001596 /* Flush RX and TX works */
1597 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001598 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001600 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001601 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001602 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001603 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001604 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001605 }
1606
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001607 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001608 cancel_delayed_work(&hdev->service_cache);
1609
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001610 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001611
1612 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1613 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001614
Johan Hedberg76727c02014-11-18 09:00:14 +02001615 /* Avoid potential lockdep warnings from the *_flush() calls by
1616 * ensuring the workqueue is empty up front.
1617 */
1618 drain_workqueue(hdev->workqueue);
1619
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001620 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001621
1622 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1623 if (hdev->dev_type == HCI_BREDR)
1624 mgmt_powered(hdev, 0);
1625 }
1626
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001627 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001628 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001629 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001630 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 hci_notify(hdev, HCI_DEV_DOWN);
1633
1634 if (hdev->flush)
1635 hdev->flush(hdev);
1636
1637 /* Reset device */
1638 skb_queue_purge(&hdev->cmd_q);
1639 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001640 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1641 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001642 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001644 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 clear_bit(HCI_INIT, &hdev->flags);
1646 }
1647
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001648 /* flush cmd work */
1649 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 /* Drop queues */
1652 skb_queue_purge(&hdev->rx_q);
1653 skb_queue_purge(&hdev->cmd_q);
1654 skb_queue_purge(&hdev->raw_q);
1655
1656 /* Drop last sent command */
1657 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001658 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 kfree_skb(hdev->sent_cmd);
1660 hdev->sent_cmd = NULL;
1661 }
1662
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001663 kfree_skb(hdev->recv_evt);
1664 hdev->recv_evt = NULL;
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 /* After this point our queues are empty
1667 * and no tasks are scheduled. */
1668 hdev->close(hdev);
1669
Johan Hedberg35b973c2013-03-15 17:06:59 -05001670 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001671 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001672 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1673
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001674 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001675 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001676
Johan Hedberge59fda82012-02-22 18:11:53 +02001677 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001678 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001679 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001680
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 hci_req_unlock(hdev);
1682
1683 hci_dev_put(hdev);
1684 return 0;
1685}
1686
1687int hci_dev_close(__u16 dev)
1688{
1689 struct hci_dev *hdev;
1690 int err;
1691
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001692 hdev = hci_dev_get(dev);
1693 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001695
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001696 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1697 err = -EBUSY;
1698 goto done;
1699 }
1700
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001701 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1702 cancel_delayed_work(&hdev->power_off);
1703
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001705
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001706done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 hci_dev_put(hdev);
1708 return err;
1709}
1710
1711int hci_dev_reset(__u16 dev)
1712{
1713 struct hci_dev *hdev;
1714 int ret = 0;
1715
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001716 hdev = hci_dev_get(dev);
1717 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 return -ENODEV;
1719
1720 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Marcel Holtmann808a0492013-08-26 20:57:58 -07001722 if (!test_bit(HCI_UP, &hdev->flags)) {
1723 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001727 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1728 ret = -EBUSY;
1729 goto done;
1730 }
1731
Marcel Holtmann4a964402014-07-02 19:10:33 +02001732 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001733 ret = -EOPNOTSUPP;
1734 goto done;
1735 }
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 /* Drop queues */
1738 skb_queue_purge(&hdev->rx_q);
1739 skb_queue_purge(&hdev->cmd_q);
1740
Johan Hedberg76727c02014-11-18 09:00:14 +02001741 /* Avoid potential lockdep warnings from the *_flush() calls by
1742 * ensuring the workqueue is empty up front.
1743 */
1744 drain_workqueue(hdev->workqueue);
1745
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001746 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001747 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001749 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 if (hdev->flush)
1752 hdev->flush(hdev);
1753
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001754 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001755 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001757 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 hci_req_unlock(hdev);
1761 hci_dev_put(hdev);
1762 return ret;
1763}
1764
1765int hci_dev_reset_stat(__u16 dev)
1766{
1767 struct hci_dev *hdev;
1768 int ret = 0;
1769
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001770 hdev = hci_dev_get(dev);
1771 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 return -ENODEV;
1773
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001774 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1775 ret = -EBUSY;
1776 goto done;
1777 }
1778
Marcel Holtmann4a964402014-07-02 19:10:33 +02001779 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001780 ret = -EOPNOTSUPP;
1781 goto done;
1782 }
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1785
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001786done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 return ret;
1789}
1790
Johan Hedberg123abc02014-07-10 12:09:07 +03001791static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1792{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001793 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001794
1795 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1796
1797 if ((scan & SCAN_PAGE))
1798 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1799 &hdev->dev_flags);
1800 else
1801 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1802 &hdev->dev_flags);
1803
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001804 if ((scan & SCAN_INQUIRY)) {
1805 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1806 &hdev->dev_flags);
1807 } else {
1808 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1809 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1810 &hdev->dev_flags);
1811 }
1812
Johan Hedberg123abc02014-07-10 12:09:07 +03001813 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1814 return;
1815
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001816 if (conn_changed || discov_changed) {
1817 /* In case this was disabled through mgmt */
1818 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1819
1820 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1821 mgmt_update_adv_data(hdev);
1822
Johan Hedberg123abc02014-07-10 12:09:07 +03001823 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001824 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001825}
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827int hci_dev_cmd(unsigned int cmd, void __user *arg)
1828{
1829 struct hci_dev *hdev;
1830 struct hci_dev_req dr;
1831 int err = 0;
1832
1833 if (copy_from_user(&dr, arg, sizeof(dr)))
1834 return -EFAULT;
1835
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001836 hdev = hci_dev_get(dr.dev_id);
1837 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 return -ENODEV;
1839
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001840 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1841 err = -EBUSY;
1842 goto done;
1843 }
1844
Marcel Holtmann4a964402014-07-02 19:10:33 +02001845 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001846 err = -EOPNOTSUPP;
1847 goto done;
1848 }
1849
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001850 if (hdev->dev_type != HCI_BREDR) {
1851 err = -EOPNOTSUPP;
1852 goto done;
1853 }
1854
Johan Hedberg56f87902013-10-02 13:43:13 +03001855 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1856 err = -EOPNOTSUPP;
1857 goto done;
1858 }
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 switch (cmd) {
1861 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001862 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1863 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 break;
1865
1866 case HCISETENCRYPT:
1867 if (!lmp_encrypt_capable(hdev)) {
1868 err = -EOPNOTSUPP;
1869 break;
1870 }
1871
1872 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1873 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001874 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1875 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 if (err)
1877 break;
1878 }
1879
Johan Hedberg01178cd2013-03-05 20:37:41 +02001880 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1881 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 break;
1883
1884 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001885 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1886 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001887
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001888 /* Ensure that the connectable and discoverable states
1889 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001890 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001891 if (!err)
1892 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 break;
1894
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001895 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001896 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1897 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001898 break;
1899
1900 case HCISETLINKMODE:
1901 hdev->link_mode = ((__u16) dr.dev_opt) &
1902 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1903 break;
1904
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 case HCISETPTYPE:
1906 hdev->pkt_type = (__u16) dr.dev_opt;
1907 break;
1908
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001910 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1911 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 break;
1913
1914 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001915 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1916 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 break;
1918
1919 default:
1920 err = -EINVAL;
1921 break;
1922 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001923
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001924done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 hci_dev_put(hdev);
1926 return err;
1927}
1928
1929int hci_get_dev_list(void __user *arg)
1930{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001931 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 struct hci_dev_list_req *dl;
1933 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 int n = 0, size, err;
1935 __u16 dev_num;
1936
1937 if (get_user(dev_num, (__u16 __user *) arg))
1938 return -EFAULT;
1939
1940 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1941 return -EINVAL;
1942
1943 size = sizeof(*dl) + dev_num * sizeof(*dr);
1944
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001945 dl = kzalloc(size, GFP_KERNEL);
1946 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 return -ENOMEM;
1948
1949 dr = dl->dev_req;
1950
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001951 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001952 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001953 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001954
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001955 /* When the auto-off is configured it means the transport
1956 * is running, but in that case still indicate that the
1957 * device is actually down.
1958 */
1959 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1960 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001963 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001964
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 if (++n >= dev_num)
1966 break;
1967 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001968 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
1970 dl->dev_num = n;
1971 size = sizeof(*dl) + n * sizeof(*dr);
1972
1973 err = copy_to_user(arg, dl, size);
1974 kfree(dl);
1975
1976 return err ? -EFAULT : 0;
1977}
1978
1979int hci_get_dev_info(void __user *arg)
1980{
1981 struct hci_dev *hdev;
1982 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001983 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 int err = 0;
1985
1986 if (copy_from_user(&di, arg, sizeof(di)))
1987 return -EFAULT;
1988
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001989 hdev = hci_dev_get(di.dev_id);
1990 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 return -ENODEV;
1992
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001993 /* When the auto-off is configured it means the transport
1994 * is running, but in that case still indicate that the
1995 * device is actually down.
1996 */
1997 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1998 flags = hdev->flags & ~BIT(HCI_UP);
1999 else
2000 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 strcpy(di.name, hdev->name);
2003 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002004 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002005 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002007 if (lmp_bredr_capable(hdev)) {
2008 di.acl_mtu = hdev->acl_mtu;
2009 di.acl_pkts = hdev->acl_pkts;
2010 di.sco_mtu = hdev->sco_mtu;
2011 di.sco_pkts = hdev->sco_pkts;
2012 } else {
2013 di.acl_mtu = hdev->le_mtu;
2014 di.acl_pkts = hdev->le_pkts;
2015 di.sco_mtu = 0;
2016 di.sco_pkts = 0;
2017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 di.link_policy = hdev->link_policy;
2019 di.link_mode = hdev->link_mode;
2020
2021 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2022 memcpy(&di.features, &hdev->features, sizeof(di.features));
2023
2024 if (copy_to_user(arg, &di, sizeof(di)))
2025 err = -EFAULT;
2026
2027 hci_dev_put(hdev);
2028
2029 return err;
2030}
2031
2032/* ---- Interface to HCI drivers ---- */
2033
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002034static int hci_rfkill_set_block(void *data, bool blocked)
2035{
2036 struct hci_dev *hdev = data;
2037
2038 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2039
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002040 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2041 return -EBUSY;
2042
Johan Hedberg5e130362013-09-13 08:58:17 +03002043 if (blocked) {
2044 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002045 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2046 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002047 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002048 } else {
2049 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002050 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002051
2052 return 0;
2053}
2054
2055static const struct rfkill_ops hci_rfkill_ops = {
2056 .set_block = hci_rfkill_set_block,
2057};
2058
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002059static void hci_power_on(struct work_struct *work)
2060{
2061 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002062 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002063
2064 BT_DBG("%s", hdev->name);
2065
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002066 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002067 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302068 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002069 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302070 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002071 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002072 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002073
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002074 /* During the HCI setup phase, a few error conditions are
2075 * ignored and they need to be checked now. If they are still
2076 * valid, it is important to turn the device back off.
2077 */
2078 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002079 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002080 (hdev->dev_type == HCI_BREDR &&
2081 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2082 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002083 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2084 hci_dev_do_close(hdev);
2085 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002086 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2087 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002088 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002089
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002090 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002091 /* For unconfigured devices, set the HCI_RAW flag
2092 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002093 */
2094 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2095 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002096
2097 /* For fully configured devices, this will send
2098 * the Index Added event. For unconfigured devices,
2099 * it will send Unconfigued Index Added event.
2100 *
2101 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2102 * and no event will be send.
2103 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002104 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002105 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002106 /* When the controller is now configured, then it
2107 * is important to clear the HCI_RAW flag.
2108 */
2109 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2110 clear_bit(HCI_RAW, &hdev->flags);
2111
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002112 /* Powering on the controller with HCI_CONFIG set only
2113 * happens with the transition from unconfigured to
2114 * configured. This will send the Index Added event.
2115 */
2116 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002117 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002118}
2119
2120static void hci_power_off(struct work_struct *work)
2121{
Johan Hedberg32435532011-11-07 22:16:04 +02002122 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002123 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002124
2125 BT_DBG("%s", hdev->name);
2126
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002127 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002128}
2129
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002130static void hci_discov_off(struct work_struct *work)
2131{
2132 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002133
2134 hdev = container_of(work, struct hci_dev, discov_off.work);
2135
2136 BT_DBG("%s", hdev->name);
2137
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002138 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002139}
2140
Johan Hedberg35f74982014-02-18 17:14:32 +02002141void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002142{
Johan Hedberg48210022013-01-27 00:31:28 +02002143 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002144
Johan Hedberg48210022013-01-27 00:31:28 +02002145 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2146 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002147 kfree(uuid);
2148 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002149}
2150
Johan Hedberg35f74982014-02-18 17:14:32 +02002151void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002152{
Johan Hedberg0378b592014-11-19 15:22:22 +02002153 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002154
Johan Hedberg0378b592014-11-19 15:22:22 +02002155 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2156 list_del_rcu(&key->list);
2157 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002158 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002159}
2160
Johan Hedberg35f74982014-02-18 17:14:32 +02002161void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002162{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002163 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002164
Johan Hedberg970d0f12014-11-13 14:37:47 +02002165 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2166 list_del_rcu(&k->list);
2167 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002168 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002169}
2170
Johan Hedberg970c4e42014-02-18 10:19:33 +02002171void hci_smp_irks_clear(struct hci_dev *hdev)
2172{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002173 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002174
Johan Hedbergadae20c2014-11-13 14:37:48 +02002175 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2176 list_del_rcu(&k->list);
2177 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002178 }
2179}
2180
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002181struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2182{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002183 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002184
Johan Hedberg0378b592014-11-19 15:22:22 +02002185 rcu_read_lock();
2186 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2187 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2188 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002189 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002190 }
2191 }
2192 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002193
2194 return NULL;
2195}
2196
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302197static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002198 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002199{
2200 /* Legacy key */
2201 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302202 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002203
2204 /* Debug keys are insecure so don't store them persistently */
2205 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302206 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002207
2208 /* Changed combination key and there's no previous one */
2209 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302210 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002211
2212 /* Security mode 3 case */
2213 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302214 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002215
Johan Hedberge3befab2014-06-01 16:33:39 +03002216 /* BR/EDR key derived using SC from an LE link */
2217 if (conn->type == LE_LINK)
2218 return true;
2219
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002220 /* Neither local nor remote side had no-bonding as requirement */
2221 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302222 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002223
2224 /* Local side had dedicated bonding as requirement */
2225 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302226 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002227
2228 /* Remote side had dedicated bonding as requirement */
2229 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302230 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002231
2232 /* If none of the above criteria match, then don't store the key
2233 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302234 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002235}
2236
Johan Hedberge804d252014-07-16 11:42:28 +03002237static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002238{
Johan Hedberge804d252014-07-16 11:42:28 +03002239 if (type == SMP_LTK)
2240 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002241
Johan Hedberge804d252014-07-16 11:42:28 +03002242 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002243}
2244
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002245struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2246 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002247{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002248 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002249
Johan Hedberg970d0f12014-11-13 14:37:47 +02002250 rcu_read_lock();
2251 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002252 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2253 continue;
2254
Johan Hedberg923e2412014-12-03 12:43:39 +02002255 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002256 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002257 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002258 }
2259 }
2260 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002261
2262 return NULL;
2263}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002264
Johan Hedberg970c4e42014-02-18 10:19:33 +02002265struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2266{
2267 struct smp_irk *irk;
2268
Johan Hedbergadae20c2014-11-13 14:37:48 +02002269 rcu_read_lock();
2270 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2271 if (!bacmp(&irk->rpa, rpa)) {
2272 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002273 return irk;
2274 }
2275 }
2276
Johan Hedbergadae20c2014-11-13 14:37:48 +02002277 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2278 if (smp_irk_matches(hdev, irk->val, rpa)) {
2279 bacpy(&irk->rpa, rpa);
2280 rcu_read_unlock();
2281 return irk;
2282 }
2283 }
2284 rcu_read_unlock();
2285
Johan Hedberg970c4e42014-02-18 10:19:33 +02002286 return NULL;
2287}
2288
2289struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2290 u8 addr_type)
2291{
2292 struct smp_irk *irk;
2293
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002294 /* Identity Address must be public or static random */
2295 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2296 return NULL;
2297
Johan Hedbergadae20c2014-11-13 14:37:48 +02002298 rcu_read_lock();
2299 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002300 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002301 bacmp(bdaddr, &irk->bdaddr) == 0) {
2302 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002303 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002304 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002305 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002306 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002307
2308 return NULL;
2309}
2310
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002311struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002312 bdaddr_t *bdaddr, u8 *val, u8 type,
2313 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002314{
2315 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302316 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002317
2318 old_key = hci_find_link_key(hdev, bdaddr);
2319 if (old_key) {
2320 old_key_type = old_key->type;
2321 key = old_key;
2322 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002323 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002324 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002325 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002326 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002327 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002328 }
2329
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002330 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002331
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002332 /* Some buggy controller combinations generate a changed
2333 * combination key for legacy pairing even when there's no
2334 * previous key */
2335 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002336 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002337 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002338 if (conn)
2339 conn->key_type = type;
2340 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002341
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002342 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002343 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002344 key->pin_len = pin_len;
2345
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002346 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002347 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002348 else
2349 key->type = type;
2350
Johan Hedberg7652ff62014-06-24 13:15:49 +03002351 if (persistent)
2352 *persistent = hci_persistent_key(hdev, conn, type,
2353 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002354
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002355 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002356}
2357
Johan Hedbergca9142b2014-02-19 14:57:44 +02002358struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002359 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002360 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002361{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002362 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002363 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002364
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002365 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002366 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002367 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002368 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002369 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002370 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002371 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002372 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002373 }
2374
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002375 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002376 key->bdaddr_type = addr_type;
2377 memcpy(key->val, tk, sizeof(key->val));
2378 key->authenticated = authenticated;
2379 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002380 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002381 key->enc_size = enc_size;
2382 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002383
Johan Hedbergca9142b2014-02-19 14:57:44 +02002384 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002385}
2386
Johan Hedbergca9142b2014-02-19 14:57:44 +02002387struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2388 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002389{
2390 struct smp_irk *irk;
2391
2392 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2393 if (!irk) {
2394 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2395 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002396 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002397
2398 bacpy(&irk->bdaddr, bdaddr);
2399 irk->addr_type = addr_type;
2400
Johan Hedbergadae20c2014-11-13 14:37:48 +02002401 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002402 }
2403
2404 memcpy(irk->val, val, 16);
2405 bacpy(&irk->rpa, rpa);
2406
Johan Hedbergca9142b2014-02-19 14:57:44 +02002407 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002408}
2409
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002410int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2411{
2412 struct link_key *key;
2413
2414 key = hci_find_link_key(hdev, bdaddr);
2415 if (!key)
2416 return -ENOENT;
2417
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002418 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002419
Johan Hedberg0378b592014-11-19 15:22:22 +02002420 list_del_rcu(&key->list);
2421 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002422
2423 return 0;
2424}
2425
Johan Hedberge0b2b272014-02-18 17:14:31 +02002426int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002427{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002428 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002429 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002430
Johan Hedberg970d0f12014-11-13 14:37:47 +02002431 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002432 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002433 continue;
2434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002435 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002436
Johan Hedberg970d0f12014-11-13 14:37:47 +02002437 list_del_rcu(&k->list);
2438 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002439 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002440 }
2441
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002442 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002443}
2444
Johan Hedberga7ec7332014-02-18 17:14:35 +02002445void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2446{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002447 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002448
Johan Hedbergadae20c2014-11-13 14:37:48 +02002449 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002450 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2451 continue;
2452
2453 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2454
Johan Hedbergadae20c2014-11-13 14:37:48 +02002455 list_del_rcu(&k->list);
2456 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002457 }
2458}
2459
Ville Tervo6bd32322011-02-16 16:32:41 +02002460/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002461static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002462{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002463 struct hci_dev *hdev = container_of(work, struct hci_dev,
2464 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002465
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002466 if (hdev->sent_cmd) {
2467 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2468 u16 opcode = __le16_to_cpu(sent->opcode);
2469
2470 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2471 } else {
2472 BT_ERR("%s command tx timeout", hdev->name);
2473 }
2474
Ville Tervo6bd32322011-02-16 16:32:41 +02002475 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002476 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002477}
2478
Szymon Janc2763eda2011-03-22 13:12:22 +01002479struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002480 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002481{
2482 struct oob_data *data;
2483
Johan Hedberg6928a922014-10-26 20:46:09 +01002484 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2485 if (bacmp(bdaddr, &data->bdaddr) != 0)
2486 continue;
2487 if (data->bdaddr_type != bdaddr_type)
2488 continue;
2489 return data;
2490 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002491
2492 return NULL;
2493}
2494
Johan Hedberg6928a922014-10-26 20:46:09 +01002495int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2496 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002497{
2498 struct oob_data *data;
2499
Johan Hedberg6928a922014-10-26 20:46:09 +01002500 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002501 if (!data)
2502 return -ENOENT;
2503
Johan Hedberg6928a922014-10-26 20:46:09 +01002504 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002505
2506 list_del(&data->list);
2507 kfree(data);
2508
2509 return 0;
2510}
2511
Johan Hedberg35f74982014-02-18 17:14:32 +02002512void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002513{
2514 struct oob_data *data, *n;
2515
2516 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2517 list_del(&data->list);
2518 kfree(data);
2519 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002520}
2521
Marcel Holtmann07988722014-01-10 02:07:29 -08002522int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002523 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002524 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002525{
2526 struct oob_data *data;
2527
Johan Hedberg6928a922014-10-26 20:46:09 +01002528 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002529 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002530 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002531 if (!data)
2532 return -ENOMEM;
2533
2534 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002535 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002536 list_add(&data->list, &hdev->remote_oob_data);
2537 }
2538
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002539 if (hash192 && rand192) {
2540 memcpy(data->hash192, hash192, sizeof(data->hash192));
2541 memcpy(data->rand192, rand192, sizeof(data->rand192));
2542 } else {
2543 memset(data->hash192, 0, sizeof(data->hash192));
2544 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08002545 }
2546
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002547 if (hash256 && rand256) {
2548 memcpy(data->hash256, hash256, sizeof(data->hash256));
2549 memcpy(data->rand256, rand256, sizeof(data->rand256));
2550 } else {
2551 memset(data->hash256, 0, sizeof(data->hash256));
2552 memset(data->rand256, 0, sizeof(data->rand256));
2553 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002554
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002555 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002556
2557 return 0;
2558}
2559
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002560struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002561 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002562{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002563 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002564
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002565 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002566 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002567 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002568 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002569
2570 return NULL;
2571}
2572
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002573void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002574{
2575 struct list_head *p, *n;
2576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002577 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002578 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002579
2580 list_del(p);
2581 kfree(b);
2582 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002583}
2584
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002585int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002586{
2587 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002588
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002589 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002590 return -EBADF;
2591
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002592 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002593 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002594
Johan Hedberg27f70f32014-07-21 10:50:06 +03002595 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002596 if (!entry)
2597 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002598
2599 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002600 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002601
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002602 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002603
2604 return 0;
2605}
2606
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002607int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002608{
2609 struct bdaddr_list *entry;
2610
Johan Hedberg35f74982014-02-18 17:14:32 +02002611 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002612 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002613 return 0;
2614 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002615
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002616 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002617 if (!entry)
2618 return -ENOENT;
2619
2620 list_del(&entry->list);
2621 kfree(entry);
2622
2623 return 0;
2624}
2625
Andre Guedes15819a72014-02-03 13:56:18 -03002626/* This function requires the caller holds hdev->lock */
2627struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2628 bdaddr_t *addr, u8 addr_type)
2629{
2630 struct hci_conn_params *params;
2631
Johan Hedberg738f6182014-07-03 19:33:51 +03002632 /* The conn params list only contains identity addresses */
2633 if (!hci_is_identity_address(addr, addr_type))
2634 return NULL;
2635
Andre Guedes15819a72014-02-03 13:56:18 -03002636 list_for_each_entry(params, &hdev->le_conn_params, list) {
2637 if (bacmp(&params->addr, addr) == 0 &&
2638 params->addr_type == addr_type) {
2639 return params;
2640 }
2641 }
2642
2643 return NULL;
2644}
2645
2646/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002647struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2648 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002649{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002650 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002651
Johan Hedberg738f6182014-07-03 19:33:51 +03002652 /* The list only contains identity addresses */
2653 if (!hci_is_identity_address(addr, addr_type))
2654 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002655
Johan Hedberg501f8822014-07-04 12:37:26 +03002656 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002657 if (bacmp(&param->addr, addr) == 0 &&
2658 param->addr_type == addr_type)
2659 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002660 }
2661
2662 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002663}
2664
2665/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002666struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2667 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002668{
2669 struct hci_conn_params *params;
2670
Johan Hedbergc46245b2014-07-02 17:37:33 +03002671 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002672 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002673
Andre Guedes15819a72014-02-03 13:56:18 -03002674 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002675 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002676 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002677
2678 params = kzalloc(sizeof(*params), GFP_KERNEL);
2679 if (!params) {
2680 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002681 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002682 }
2683
2684 bacpy(&params->addr, addr);
2685 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002686
2687 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002688 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002689
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002690 params->conn_min_interval = hdev->le_conn_min_interval;
2691 params->conn_max_interval = hdev->le_conn_max_interval;
2692 params->conn_latency = hdev->le_conn_latency;
2693 params->supervision_timeout = hdev->le_supv_timeout;
2694 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2695
2696 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2697
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002698 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002699}
2700
Johan Hedbergf6c63242014-08-15 21:06:59 +03002701static void hci_conn_params_free(struct hci_conn_params *params)
2702{
2703 if (params->conn) {
2704 hci_conn_drop(params->conn);
2705 hci_conn_put(params->conn);
2706 }
2707
2708 list_del(&params->action);
2709 list_del(&params->list);
2710 kfree(params);
2711}
2712
Andre Guedes15819a72014-02-03 13:56:18 -03002713/* This function requires the caller holds hdev->lock */
2714void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2715{
2716 struct hci_conn_params *params;
2717
2718 params = hci_conn_params_lookup(hdev, addr, addr_type);
2719 if (!params)
2720 return;
2721
Johan Hedbergf6c63242014-08-15 21:06:59 +03002722 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002723
Johan Hedberg95305ba2014-07-04 12:37:21 +03002724 hci_update_background_scan(hdev);
2725
Andre Guedes15819a72014-02-03 13:56:18 -03002726 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2727}
2728
2729/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002730void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002731{
2732 struct hci_conn_params *params, *tmp;
2733
2734 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002735 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2736 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002737 list_del(&params->list);
2738 kfree(params);
2739 }
2740
Johan Hedberg55af49a82014-07-02 17:37:26 +03002741 BT_DBG("All LE disabled connection parameters were removed");
2742}
2743
2744/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002745void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002746{
2747 struct hci_conn_params *params, *tmp;
2748
Johan Hedbergf6c63242014-08-15 21:06:59 +03002749 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2750 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002751
Johan Hedberga2f41a82014-07-04 12:37:19 +03002752 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002753
Andre Guedes15819a72014-02-03 13:56:18 -03002754 BT_DBG("All LE connection parameters were removed");
2755}
2756
Marcel Holtmann1904a852015-01-11 13:50:44 -08002757static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002758{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002759 if (status) {
2760 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002761
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002762 hci_dev_lock(hdev);
2763 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2764 hci_dev_unlock(hdev);
2765 return;
2766 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002767}
2768
Marcel Holtmann1904a852015-01-11 13:50:44 -08002769static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2770 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002771{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002772 /* General inquiry access code (GIAC) */
2773 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2774 struct hci_request req;
2775 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002776 int err;
2777
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002778 if (status) {
2779 BT_ERR("Failed to disable LE scanning: status %d", status);
2780 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002781 }
2782
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002783 switch (hdev->discovery.type) {
2784 case DISCOV_TYPE_LE:
2785 hci_dev_lock(hdev);
2786 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2787 hci_dev_unlock(hdev);
2788 break;
2789
2790 case DISCOV_TYPE_INTERLEAVED:
2791 hci_req_init(&req, hdev);
2792
2793 memset(&cp, 0, sizeof(cp));
2794 memcpy(&cp.lap, lap, sizeof(cp.lap));
2795 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2796 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2797
2798 hci_dev_lock(hdev);
2799
2800 hci_inquiry_cache_flush(hdev);
2801
2802 err = hci_req_run(&req, inquiry_complete);
2803 if (err) {
2804 BT_ERR("Inquiry request failed: err %d", err);
2805 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2806 }
2807
2808 hci_dev_unlock(hdev);
2809 break;
2810 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002811}
2812
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002813static void le_scan_disable_work(struct work_struct *work)
2814{
2815 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002816 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002817 struct hci_request req;
2818 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002819
2820 BT_DBG("%s", hdev->name);
2821
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002822 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002823
Andre Guedesb1efcc22014-02-26 20:21:40 -03002824 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002825
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002826 err = hci_req_run(&req, le_scan_disable_work_complete);
2827 if (err)
2828 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002829}
2830
Johan Hedberga1f4c312014-02-27 14:05:41 +02002831/* Copy the Identity Address of the controller.
2832 *
2833 * If the controller has a public BD_ADDR, then by default use that one.
2834 * If this is a LE only controller without a public address, default to
2835 * the static random address.
2836 *
2837 * For debugging purposes it is possible to force controllers with a
2838 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002839 *
2840 * In case BR/EDR has been disabled on a dual-mode controller and
2841 * userspace has configured a static address, then that address
2842 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002843 */
2844void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2845 u8 *bdaddr_type)
2846{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002847 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002848 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2849 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2850 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002851 bacpy(bdaddr, &hdev->static_addr);
2852 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2853 } else {
2854 bacpy(bdaddr, &hdev->bdaddr);
2855 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2856 }
2857}
2858
David Herrmann9be0dab2012-04-22 14:39:57 +02002859/* Alloc HCI device */
2860struct hci_dev *hci_alloc_dev(void)
2861{
2862 struct hci_dev *hdev;
2863
Johan Hedberg27f70f32014-07-21 10:50:06 +03002864 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002865 if (!hdev)
2866 return NULL;
2867
David Herrmannb1b813d2012-04-22 14:39:58 +02002868 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2869 hdev->esco_type = (ESCO_HV1);
2870 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002871 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2872 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002873 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002874 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2875 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002876
David Herrmannb1b813d2012-04-22 14:39:58 +02002877 hdev->sniff_max_interval = 800;
2878 hdev->sniff_min_interval = 80;
2879
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002880 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002881 hdev->le_adv_min_interval = 0x0800;
2882 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002883 hdev->le_scan_interval = 0x0060;
2884 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002885 hdev->le_conn_min_interval = 0x0028;
2886 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002887 hdev->le_conn_latency = 0x0000;
2888 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002889 hdev->le_def_tx_len = 0x001b;
2890 hdev->le_def_tx_time = 0x0148;
2891 hdev->le_max_tx_len = 0x001b;
2892 hdev->le_max_tx_time = 0x0148;
2893 hdev->le_max_rx_len = 0x001b;
2894 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002895
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002896 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002897 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002898 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2899 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002900
David Herrmannb1b813d2012-04-22 14:39:58 +02002901 mutex_init(&hdev->lock);
2902 mutex_init(&hdev->req_lock);
2903
2904 INIT_LIST_HEAD(&hdev->mgmt_pending);
2905 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002906 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002907 INIT_LIST_HEAD(&hdev->uuids);
2908 INIT_LIST_HEAD(&hdev->link_keys);
2909 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002910 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002911 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002912 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002913 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002914 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002915 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002916 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002917
2918 INIT_WORK(&hdev->rx_work, hci_rx_work);
2919 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2920 INIT_WORK(&hdev->tx_work, hci_tx_work);
2921 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002922
David Herrmannb1b813d2012-04-22 14:39:58 +02002923 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2924 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2925 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2926
David Herrmannb1b813d2012-04-22 14:39:58 +02002927 skb_queue_head_init(&hdev->rx_q);
2928 skb_queue_head_init(&hdev->cmd_q);
2929 skb_queue_head_init(&hdev->raw_q);
2930
2931 init_waitqueue_head(&hdev->req_wait_q);
2932
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002933 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02002934
David Herrmannb1b813d2012-04-22 14:39:58 +02002935 hci_init_sysfs(hdev);
2936 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002937
2938 return hdev;
2939}
2940EXPORT_SYMBOL(hci_alloc_dev);
2941
2942/* Free HCI device */
2943void hci_free_dev(struct hci_dev *hdev)
2944{
David Herrmann9be0dab2012-04-22 14:39:57 +02002945 /* will free via device release */
2946 put_device(&hdev->dev);
2947}
2948EXPORT_SYMBOL(hci_free_dev);
2949
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950/* Register HCI device */
2951int hci_register_dev(struct hci_dev *hdev)
2952{
David Herrmannb1b813d2012-04-22 14:39:58 +02002953 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Marcel Holtmann74292d52014-07-06 15:50:27 +02002955 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 return -EINVAL;
2957
Mat Martineau08add512011-11-02 16:18:36 -07002958 /* Do not allow HCI_AMP devices to register at index 0,
2959 * so the index can be used as the AMP controller ID.
2960 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002961 switch (hdev->dev_type) {
2962 case HCI_BREDR:
2963 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2964 break;
2965 case HCI_AMP:
2966 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2967 break;
2968 default:
2969 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002971
Sasha Levin3df92b32012-05-27 22:36:56 +02002972 if (id < 0)
2973 return id;
2974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 sprintf(hdev->name, "hci%d", id);
2976 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002977
2978 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2979
Kees Cookd8537542013-07-03 15:04:57 -07002980 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2981 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002982 if (!hdev->workqueue) {
2983 error = -ENOMEM;
2984 goto err;
2985 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002986
Kees Cookd8537542013-07-03 15:04:57 -07002987 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2988 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002989 if (!hdev->req_workqueue) {
2990 destroy_workqueue(hdev->workqueue);
2991 error = -ENOMEM;
2992 goto err;
2993 }
2994
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002995 if (!IS_ERR_OR_NULL(bt_debugfs))
2996 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2997
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002998 dev_set_name(&hdev->dev, "%s", hdev->name);
2999
3000 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003001 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003002 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003004 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003005 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3006 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003007 if (hdev->rfkill) {
3008 if (rfkill_register(hdev->rfkill) < 0) {
3009 rfkill_destroy(hdev->rfkill);
3010 hdev->rfkill = NULL;
3011 }
3012 }
3013
Johan Hedberg5e130362013-09-13 08:58:17 +03003014 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3015 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3016
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003017 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003018 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003019
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003020 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003021 /* Assume BR/EDR support until proven otherwise (such as
3022 * through reading supported features during init.
3023 */
3024 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3025 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003026
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003027 write_lock(&hci_dev_list_lock);
3028 list_add(&hdev->list, &hci_dev_list);
3029 write_unlock(&hci_dev_list_lock);
3030
Marcel Holtmann4a964402014-07-02 19:10:33 +02003031 /* Devices that are marked for raw-only usage are unconfigured
3032 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003033 */
3034 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003035 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003036
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003038 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039
Johan Hedberg19202572013-01-14 22:33:51 +02003040 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003041
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003043
David Herrmann33ca9542011-10-08 14:58:49 +02003044err_wqueue:
3045 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003046 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003047err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003048 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003049
David Herrmann33ca9542011-10-08 14:58:49 +02003050 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051}
3052EXPORT_SYMBOL(hci_register_dev);
3053
3054/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003055void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056{
Sasha Levin3df92b32012-05-27 22:36:56 +02003057 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003058
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003059 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060
Johan Hovold94324962012-03-15 14:48:41 +01003061 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3062
Sasha Levin3df92b32012-05-27 22:36:56 +02003063 id = hdev->id;
3064
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003065 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003067 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068
3069 hci_dev_do_close(hdev);
3070
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303071 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003072 kfree_skb(hdev->reassembly[i]);
3073
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003074 cancel_work_sync(&hdev->power_on);
3075
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003076 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003077 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3078 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003079 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003080 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003081 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003082 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003083
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003084 /* mgmt_index_removed should take care of emptying the
3085 * pending list */
3086 BUG_ON(!list_empty(&hdev->mgmt_pending));
3087
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 hci_notify(hdev, HCI_DEV_UNREG);
3089
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003090 if (hdev->rfkill) {
3091 rfkill_unregister(hdev->rfkill);
3092 rfkill_destroy(hdev->rfkill);
3093 }
3094
Johan Hedberg711eafe2014-08-08 09:32:52 +03003095 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003096
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003097 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003098
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003099 debugfs_remove_recursive(hdev->debugfs);
3100
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003101 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003102 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003103
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003104 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003105 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003106 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003107 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003108 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003110 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003111 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003112 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003113 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003114 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003115 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003116
David Herrmanndc946bd2012-01-07 15:47:24 +01003117 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003118
3119 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120}
3121EXPORT_SYMBOL(hci_unregister_dev);
3122
3123/* Suspend HCI device */
3124int hci_suspend_dev(struct hci_dev *hdev)
3125{
3126 hci_notify(hdev, HCI_DEV_SUSPEND);
3127 return 0;
3128}
3129EXPORT_SYMBOL(hci_suspend_dev);
3130
3131/* Resume HCI device */
3132int hci_resume_dev(struct hci_dev *hdev)
3133{
3134 hci_notify(hdev, HCI_DEV_RESUME);
3135 return 0;
3136}
3137EXPORT_SYMBOL(hci_resume_dev);
3138
Marcel Holtmann75e05692014-11-02 08:15:38 +01003139/* Reset HCI device */
3140int hci_reset_dev(struct hci_dev *hdev)
3141{
3142 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3143 struct sk_buff *skb;
3144
3145 skb = bt_skb_alloc(3, GFP_ATOMIC);
3146 if (!skb)
3147 return -ENOMEM;
3148
3149 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3150 memcpy(skb_put(skb, 3), hw_err, 3);
3151
3152 /* Send Hardware Error to upper stack */
3153 return hci_recv_frame(hdev, skb);
3154}
3155EXPORT_SYMBOL(hci_reset_dev);
3156
Marcel Holtmann76bca882009-11-18 00:40:39 +01003157/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003158int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003159{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003160 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003161 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003162 kfree_skb(skb);
3163 return -ENXIO;
3164 }
3165
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003166 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003167 bt_cb(skb)->incoming = 1;
3168
3169 /* Time stamp */
3170 __net_timestamp(skb);
3171
Marcel Holtmann76bca882009-11-18 00:40:39 +01003172 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003173 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003174
Marcel Holtmann76bca882009-11-18 00:40:39 +01003175 return 0;
3176}
3177EXPORT_SYMBOL(hci_recv_frame);
3178
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303179static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003180 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303181{
3182 int len = 0;
3183 int hlen = 0;
3184 int remain = count;
3185 struct sk_buff *skb;
3186 struct bt_skb_cb *scb;
3187
3188 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003189 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303190 return -EILSEQ;
3191
3192 skb = hdev->reassembly[index];
3193
3194 if (!skb) {
3195 switch (type) {
3196 case HCI_ACLDATA_PKT:
3197 len = HCI_MAX_FRAME_SIZE;
3198 hlen = HCI_ACL_HDR_SIZE;
3199 break;
3200 case HCI_EVENT_PKT:
3201 len = HCI_MAX_EVENT_SIZE;
3202 hlen = HCI_EVENT_HDR_SIZE;
3203 break;
3204 case HCI_SCODATA_PKT:
3205 len = HCI_MAX_SCO_SIZE;
3206 hlen = HCI_SCO_HDR_SIZE;
3207 break;
3208 }
3209
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003210 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303211 if (!skb)
3212 return -ENOMEM;
3213
3214 scb = (void *) skb->cb;
3215 scb->expect = hlen;
3216 scb->pkt_type = type;
3217
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303218 hdev->reassembly[index] = skb;
3219 }
3220
3221 while (count) {
3222 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003223 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303224
3225 memcpy(skb_put(skb, len), data, len);
3226
3227 count -= len;
3228 data += len;
3229 scb->expect -= len;
3230 remain = count;
3231
3232 switch (type) {
3233 case HCI_EVENT_PKT:
3234 if (skb->len == HCI_EVENT_HDR_SIZE) {
3235 struct hci_event_hdr *h = hci_event_hdr(skb);
3236 scb->expect = h->plen;
3237
3238 if (skb_tailroom(skb) < scb->expect) {
3239 kfree_skb(skb);
3240 hdev->reassembly[index] = NULL;
3241 return -ENOMEM;
3242 }
3243 }
3244 break;
3245
3246 case HCI_ACLDATA_PKT:
3247 if (skb->len == HCI_ACL_HDR_SIZE) {
3248 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3249 scb->expect = __le16_to_cpu(h->dlen);
3250
3251 if (skb_tailroom(skb) < scb->expect) {
3252 kfree_skb(skb);
3253 hdev->reassembly[index] = NULL;
3254 return -ENOMEM;
3255 }
3256 }
3257 break;
3258
3259 case HCI_SCODATA_PKT:
3260 if (skb->len == HCI_SCO_HDR_SIZE) {
3261 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3262 scb->expect = h->dlen;
3263
3264 if (skb_tailroom(skb) < scb->expect) {
3265 kfree_skb(skb);
3266 hdev->reassembly[index] = NULL;
3267 return -ENOMEM;
3268 }
3269 }
3270 break;
3271 }
3272
3273 if (scb->expect == 0) {
3274 /* Complete frame */
3275
3276 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003277 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303278
3279 hdev->reassembly[index] = NULL;
3280 return remain;
3281 }
3282 }
3283
3284 return remain;
3285}
3286
Suraj Sumangala99811512010-07-14 13:02:19 +05303287#define STREAM_REASSEMBLY 0
3288
3289int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3290{
3291 int type;
3292 int rem = 0;
3293
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003294 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303295 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3296
3297 if (!skb) {
3298 struct { char type; } *pkt;
3299
3300 /* Start of the frame */
3301 pkt = data;
3302 type = pkt->type;
3303
3304 data++;
3305 count--;
3306 } else
3307 type = bt_cb(skb)->pkt_type;
3308
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003309 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003310 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303311 if (rem < 0)
3312 return rem;
3313
3314 data += (count - rem);
3315 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003316 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303317
3318 return rem;
3319}
3320EXPORT_SYMBOL(hci_recv_stream_fragment);
3321
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322/* ---- Interface to upper protocols ---- */
3323
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324int hci_register_cb(struct hci_cb *cb)
3325{
3326 BT_DBG("%p name %s", cb, cb->name);
3327
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003328 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003330 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331
3332 return 0;
3333}
3334EXPORT_SYMBOL(hci_register_cb);
3335
3336int hci_unregister_cb(struct hci_cb *cb)
3337{
3338 BT_DBG("%p name %s", cb, cb->name);
3339
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003340 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003342 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
3344 return 0;
3345}
3346EXPORT_SYMBOL(hci_unregister_cb);
3347
Marcel Holtmann51086992013-10-10 14:54:19 -07003348static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003350 int err;
3351
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003352 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003354 /* Time stamp */
3355 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003357 /* Send copy to monitor */
3358 hci_send_to_monitor(hdev, skb);
3359
3360 if (atomic_read(&hdev->promisc)) {
3361 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003362 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 }
3364
3365 /* Get rid of skb owner, prior to sending to the driver. */
3366 skb_orphan(skb);
3367
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003368 err = hdev->send(hdev, skb);
3369 if (err < 0) {
3370 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3371 kfree_skb(skb);
3372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373}
3374
Marcel Holtmann899de762014-07-11 05:51:58 +02003375bool hci_req_pending(struct hci_dev *hdev)
3376{
3377 return (hdev->req_status == HCI_REQ_PEND);
3378}
3379
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003380/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003381int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3382 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003383{
3384 struct sk_buff *skb;
3385
3386 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3387
3388 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3389 if (!skb) {
3390 BT_ERR("%s no memory for command", hdev->name);
3391 return -ENOMEM;
3392 }
3393
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003394 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003395 * single-command requests.
3396 */
3397 bt_cb(skb)->req.start = true;
3398
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003400 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401
3402 return 0;
3403}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404
3405/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003406void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407{
3408 struct hci_command_hdr *hdr;
3409
3410 if (!hdev->sent_cmd)
3411 return NULL;
3412
3413 hdr = (void *) hdev->sent_cmd->data;
3414
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003415 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 return NULL;
3417
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003418 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
3420 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3421}
3422
3423/* Send ACL data */
3424static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3425{
3426 struct hci_acl_hdr *hdr;
3427 int len = skb->len;
3428
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003429 skb_push(skb, HCI_ACL_HDR_SIZE);
3430 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003431 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003432 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3433 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434}
3435
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003436static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003437 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003439 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 struct hci_dev *hdev = conn->hdev;
3441 struct sk_buff *list;
3442
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003443 skb->len = skb_headlen(skb);
3444 skb->data_len = 0;
3445
3446 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003447
3448 switch (hdev->dev_type) {
3449 case HCI_BREDR:
3450 hci_add_acl_hdr(skb, conn->handle, flags);
3451 break;
3452 case HCI_AMP:
3453 hci_add_acl_hdr(skb, chan->handle, flags);
3454 break;
3455 default:
3456 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3457 return;
3458 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003459
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003460 list = skb_shinfo(skb)->frag_list;
3461 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 /* Non fragmented */
3463 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3464
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003465 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 } else {
3467 /* Fragmented */
3468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3469
3470 skb_shinfo(skb)->frag_list = NULL;
3471
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003472 /* Queue all fragments atomically. We need to use spin_lock_bh
3473 * here because of 6LoWPAN links, as there this function is
3474 * called from softirq and using normal spin lock could cause
3475 * deadlocks.
3476 */
3477 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003479 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003480
3481 flags &= ~ACL_START;
3482 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 do {
3484 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003485
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003486 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003487 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
3489 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3490
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003491 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 } while (list);
3493
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003494 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003496}
3497
3498void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3499{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003500 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003501
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003502 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003503
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003504 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003506 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508
3509/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003510void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511{
3512 struct hci_dev *hdev = conn->hdev;
3513 struct hci_sco_hdr hdr;
3514
3515 BT_DBG("%s len %d", hdev->name, skb->len);
3516
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003517 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 hdr.dlen = skb->len;
3519
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003520 skb_push(skb, HCI_SCO_HDR_SIZE);
3521 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003522 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003524 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003525
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003527 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529
3530/* ---- HCI TX task (outgoing data) ---- */
3531
3532/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003533static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3534 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535{
3536 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003537 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003538 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003540 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003542
3543 rcu_read_lock();
3544
3545 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003546 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003548
3549 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3550 continue;
3551
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 num++;
3553
3554 if (c->sent < min) {
3555 min = c->sent;
3556 conn = c;
3557 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003558
3559 if (hci_conn_num(hdev, type) == num)
3560 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 }
3562
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003563 rcu_read_unlock();
3564
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003566 int cnt, q;
3567
3568 switch (conn->type) {
3569 case ACL_LINK:
3570 cnt = hdev->acl_cnt;
3571 break;
3572 case SCO_LINK:
3573 case ESCO_LINK:
3574 cnt = hdev->sco_cnt;
3575 break;
3576 case LE_LINK:
3577 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3578 break;
3579 default:
3580 cnt = 0;
3581 BT_ERR("Unknown link type");
3582 }
3583
3584 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 *quote = q ? q : 1;
3586 } else
3587 *quote = 0;
3588
3589 BT_DBG("conn %p quote %d", conn, *quote);
3590 return conn;
3591}
3592
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003593static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594{
3595 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003596 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Ville Tervobae1f5d92011-02-10 22:38:53 -03003598 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003600 rcu_read_lock();
3601
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003603 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003604 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003605 BT_ERR("%s killing stalled connection %pMR",
3606 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003607 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 }
3609 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003610
3611 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612}
3613
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003614static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3615 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003616{
3617 struct hci_conn_hash *h = &hdev->conn_hash;
3618 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003619 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003620 struct hci_conn *conn;
3621 int cnt, q, conn_num = 0;
3622
3623 BT_DBG("%s", hdev->name);
3624
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003625 rcu_read_lock();
3626
3627 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003628 struct hci_chan *tmp;
3629
3630 if (conn->type != type)
3631 continue;
3632
3633 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3634 continue;
3635
3636 conn_num++;
3637
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003638 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003639 struct sk_buff *skb;
3640
3641 if (skb_queue_empty(&tmp->data_q))
3642 continue;
3643
3644 skb = skb_peek(&tmp->data_q);
3645 if (skb->priority < cur_prio)
3646 continue;
3647
3648 if (skb->priority > cur_prio) {
3649 num = 0;
3650 min = ~0;
3651 cur_prio = skb->priority;
3652 }
3653
3654 num++;
3655
3656 if (conn->sent < min) {
3657 min = conn->sent;
3658 chan = tmp;
3659 }
3660 }
3661
3662 if (hci_conn_num(hdev, type) == conn_num)
3663 break;
3664 }
3665
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003666 rcu_read_unlock();
3667
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003668 if (!chan)
3669 return NULL;
3670
3671 switch (chan->conn->type) {
3672 case ACL_LINK:
3673 cnt = hdev->acl_cnt;
3674 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003675 case AMP_LINK:
3676 cnt = hdev->block_cnt;
3677 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003678 case SCO_LINK:
3679 case ESCO_LINK:
3680 cnt = hdev->sco_cnt;
3681 break;
3682 case LE_LINK:
3683 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3684 break;
3685 default:
3686 cnt = 0;
3687 BT_ERR("Unknown link type");
3688 }
3689
3690 q = cnt / num;
3691 *quote = q ? q : 1;
3692 BT_DBG("chan %p quote %d", chan, *quote);
3693 return chan;
3694}
3695
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003696static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3697{
3698 struct hci_conn_hash *h = &hdev->conn_hash;
3699 struct hci_conn *conn;
3700 int num = 0;
3701
3702 BT_DBG("%s", hdev->name);
3703
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003704 rcu_read_lock();
3705
3706 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003707 struct hci_chan *chan;
3708
3709 if (conn->type != type)
3710 continue;
3711
3712 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3713 continue;
3714
3715 num++;
3716
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003717 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003718 struct sk_buff *skb;
3719
3720 if (chan->sent) {
3721 chan->sent = 0;
3722 continue;
3723 }
3724
3725 if (skb_queue_empty(&chan->data_q))
3726 continue;
3727
3728 skb = skb_peek(&chan->data_q);
3729 if (skb->priority >= HCI_PRIO_MAX - 1)
3730 continue;
3731
3732 skb->priority = HCI_PRIO_MAX - 1;
3733
3734 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003735 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003736 }
3737
3738 if (hci_conn_num(hdev, type) == num)
3739 break;
3740 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003741
3742 rcu_read_unlock();
3743
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003744}
3745
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003746static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3747{
3748 /* Calculate count of blocks used by this packet */
3749 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3750}
3751
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003752static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003754 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 /* ACL tx timeout must be longer than maximum
3756 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003757 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003758 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003759 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003761}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003763static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003764{
3765 unsigned int cnt = hdev->acl_cnt;
3766 struct hci_chan *chan;
3767 struct sk_buff *skb;
3768 int quote;
3769
3770 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003771
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003772 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003773 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003774 u32 priority = (skb_peek(&chan->data_q))->priority;
3775 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003777 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003778
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003779 /* Stop if priority has changed */
3780 if (skb->priority < priority)
3781 break;
3782
3783 skb = skb_dequeue(&chan->data_q);
3784
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003785 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003786 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003787
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003788 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 hdev->acl_last_tx = jiffies;
3790
3791 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003792 chan->sent++;
3793 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794 }
3795 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003796
3797 if (cnt != hdev->acl_cnt)
3798 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799}
3800
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003801static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003802{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003803 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003804 struct hci_chan *chan;
3805 struct sk_buff *skb;
3806 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003807 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003808
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003809 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003810
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003811 BT_DBG("%s", hdev->name);
3812
3813 if (hdev->dev_type == HCI_AMP)
3814 type = AMP_LINK;
3815 else
3816 type = ACL_LINK;
3817
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003818 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003819 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003820 u32 priority = (skb_peek(&chan->data_q))->priority;
3821 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3822 int blocks;
3823
3824 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003825 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003826
3827 /* Stop if priority has changed */
3828 if (skb->priority < priority)
3829 break;
3830
3831 skb = skb_dequeue(&chan->data_q);
3832
3833 blocks = __get_blocks(hdev, skb);
3834 if (blocks > hdev->block_cnt)
3835 return;
3836
3837 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003838 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003839
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003840 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003841 hdev->acl_last_tx = jiffies;
3842
3843 hdev->block_cnt -= blocks;
3844 quote -= blocks;
3845
3846 chan->sent += blocks;
3847 chan->conn->sent += blocks;
3848 }
3849 }
3850
3851 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003852 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003853}
3854
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003855static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003856{
3857 BT_DBG("%s", hdev->name);
3858
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003859 /* No ACL link over BR/EDR controller */
3860 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3861 return;
3862
3863 /* No AMP link over AMP controller */
3864 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003865 return;
3866
3867 switch (hdev->flow_ctl_mode) {
3868 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3869 hci_sched_acl_pkt(hdev);
3870 break;
3871
3872 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3873 hci_sched_acl_blk(hdev);
3874 break;
3875 }
3876}
3877
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003879static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880{
3881 struct hci_conn *conn;
3882 struct sk_buff *skb;
3883 int quote;
3884
3885 BT_DBG("%s", hdev->name);
3886
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003887 if (!hci_conn_num(hdev, SCO_LINK))
3888 return;
3889
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3891 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3892 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003893 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894
3895 conn->sent++;
3896 if (conn->sent == ~0)
3897 conn->sent = 0;
3898 }
3899 }
3900}
3901
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003902static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003903{
3904 struct hci_conn *conn;
3905 struct sk_buff *skb;
3906 int quote;
3907
3908 BT_DBG("%s", hdev->name);
3909
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003910 if (!hci_conn_num(hdev, ESCO_LINK))
3911 return;
3912
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003913 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3914 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003915 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3916 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003917 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003918
3919 conn->sent++;
3920 if (conn->sent == ~0)
3921 conn->sent = 0;
3922 }
3923 }
3924}
3925
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003926static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003927{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003928 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003929 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003930 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003931
3932 BT_DBG("%s", hdev->name);
3933
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003934 if (!hci_conn_num(hdev, LE_LINK))
3935 return;
3936
Marcel Holtmann4a964402014-07-02 19:10:33 +02003937 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003938 /* LE tx timeout must be longer than maximum
3939 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003940 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003941 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003942 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003943 }
3944
3945 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003946 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003947 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003948 u32 priority = (skb_peek(&chan->data_q))->priority;
3949 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003950 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003951 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003952
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003953 /* Stop if priority has changed */
3954 if (skb->priority < priority)
3955 break;
3956
3957 skb = skb_dequeue(&chan->data_q);
3958
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003959 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003960 hdev->le_last_tx = jiffies;
3961
3962 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003963 chan->sent++;
3964 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003965 }
3966 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003967
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003968 if (hdev->le_pkts)
3969 hdev->le_cnt = cnt;
3970 else
3971 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003972
3973 if (cnt != tmp)
3974 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003975}
3976
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003977static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003979 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 struct sk_buff *skb;
3981
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003982 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003983 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984
Marcel Holtmann52de5992013-09-03 18:08:38 -07003985 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3986 /* Schedule queues and send stuff to HCI driver */
3987 hci_sched_acl(hdev);
3988 hci_sched_sco(hdev);
3989 hci_sched_esco(hdev);
3990 hci_sched_le(hdev);
3991 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003992
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 /* Send next queued raw (unknown type) packet */
3994 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003995 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996}
3997
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003998/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999
4000/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004001static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002{
4003 struct hci_acl_hdr *hdr = (void *) skb->data;
4004 struct hci_conn *conn;
4005 __u16 handle, flags;
4006
4007 skb_pull(skb, HCI_ACL_HDR_SIZE);
4008
4009 handle = __le16_to_cpu(hdr->handle);
4010 flags = hci_flags(handle);
4011 handle = hci_handle(handle);
4012
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004013 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004014 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015
4016 hdev->stat.acl_rx++;
4017
4018 hci_dev_lock(hdev);
4019 conn = hci_conn_hash_lookup_handle(hdev, handle);
4020 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004021
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004023 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004024
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004026 l2cap_recv_acldata(conn, skb, flags);
4027 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004029 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004030 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 }
4032
4033 kfree_skb(skb);
4034}
4035
4036/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004037static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038{
4039 struct hci_sco_hdr *hdr = (void *) skb->data;
4040 struct hci_conn *conn;
4041 __u16 handle;
4042
4043 skb_pull(skb, HCI_SCO_HDR_SIZE);
4044
4045 handle = __le16_to_cpu(hdr->handle);
4046
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004047 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048
4049 hdev->stat.sco_rx++;
4050
4051 hci_dev_lock(hdev);
4052 conn = hci_conn_hash_lookup_handle(hdev, handle);
4053 hci_dev_unlock(hdev);
4054
4055 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004057 sco_recv_scodata(conn, skb);
4058 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004060 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004061 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 }
4063
4064 kfree_skb(skb);
4065}
4066
Johan Hedberg9238f362013-03-05 20:37:48 +02004067static bool hci_req_is_complete(struct hci_dev *hdev)
4068{
4069 struct sk_buff *skb;
4070
4071 skb = skb_peek(&hdev->cmd_q);
4072 if (!skb)
4073 return true;
4074
4075 return bt_cb(skb)->req.start;
4076}
4077
Johan Hedberg42c6b122013-03-05 20:37:49 +02004078static void hci_resend_last(struct hci_dev *hdev)
4079{
4080 struct hci_command_hdr *sent;
4081 struct sk_buff *skb;
4082 u16 opcode;
4083
4084 if (!hdev->sent_cmd)
4085 return;
4086
4087 sent = (void *) hdev->sent_cmd->data;
4088 opcode = __le16_to_cpu(sent->opcode);
4089 if (opcode == HCI_OP_RESET)
4090 return;
4091
4092 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4093 if (!skb)
4094 return;
4095
4096 skb_queue_head(&hdev->cmd_q, skb);
4097 queue_work(hdev->workqueue, &hdev->cmd_work);
4098}
4099
Johan Hedberg9238f362013-03-05 20:37:48 +02004100void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4101{
4102 hci_req_complete_t req_complete = NULL;
4103 struct sk_buff *skb;
4104 unsigned long flags;
4105
4106 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4107
Johan Hedberg42c6b122013-03-05 20:37:49 +02004108 /* If the completed command doesn't match the last one that was
4109 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004110 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004111 if (!hci_sent_cmd_data(hdev, opcode)) {
4112 /* Some CSR based controllers generate a spontaneous
4113 * reset complete event during init and any pending
4114 * command will never be completed. In such a case we
4115 * need to resend whatever was the last sent
4116 * command.
4117 */
4118 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4119 hci_resend_last(hdev);
4120
Johan Hedberg9238f362013-03-05 20:37:48 +02004121 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004122 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004123
4124 /* If the command succeeded and there's still more commands in
4125 * this request the request is not yet complete.
4126 */
4127 if (!status && !hci_req_is_complete(hdev))
4128 return;
4129
4130 /* If this was the last command in a request the complete
4131 * callback would be found in hdev->sent_cmd instead of the
4132 * command queue (hdev->cmd_q).
4133 */
4134 if (hdev->sent_cmd) {
4135 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004136
4137 if (req_complete) {
4138 /* We must set the complete callback to NULL to
4139 * avoid calling the callback more than once if
4140 * this function gets called again.
4141 */
4142 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4143
Johan Hedberg9238f362013-03-05 20:37:48 +02004144 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004145 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004146 }
4147
4148 /* Remove all pending commands belonging to this request */
4149 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4150 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4151 if (bt_cb(skb)->req.start) {
4152 __skb_queue_head(&hdev->cmd_q, skb);
4153 break;
4154 }
4155
4156 req_complete = bt_cb(skb)->req.complete;
4157 kfree_skb(skb);
4158 }
4159 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4160
4161call_complete:
4162 if (req_complete)
Marcel Holtmann1904a852015-01-11 13:50:44 -08004163 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
Johan Hedberg9238f362013-03-05 20:37:48 +02004164}
4165
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004166static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004168 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 struct sk_buff *skb;
4170
4171 BT_DBG("%s", hdev->name);
4172
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004174 /* Send copy to monitor */
4175 hci_send_to_monitor(hdev, skb);
4176
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 if (atomic_read(&hdev->promisc)) {
4178 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004179 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 }
4181
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004182 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 kfree_skb(skb);
4184 continue;
4185 }
4186
4187 if (test_bit(HCI_INIT, &hdev->flags)) {
4188 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004189 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 case HCI_ACLDATA_PKT:
4191 case HCI_SCODATA_PKT:
4192 kfree_skb(skb);
4193 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 }
4196
4197 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004198 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004200 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201 hci_event_packet(hdev, skb);
4202 break;
4203
4204 case HCI_ACLDATA_PKT:
4205 BT_DBG("%s ACL data packet", hdev->name);
4206 hci_acldata_packet(hdev, skb);
4207 break;
4208
4209 case HCI_SCODATA_PKT:
4210 BT_DBG("%s SCO data packet", hdev->name);
4211 hci_scodata_packet(hdev, skb);
4212 break;
4213
4214 default:
4215 kfree_skb(skb);
4216 break;
4217 }
4218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219}
4220
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004221static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004223 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 struct sk_buff *skb;
4225
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004226 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4227 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004230 if (atomic_read(&hdev->cmd_cnt)) {
4231 skb = skb_dequeue(&hdev->cmd_q);
4232 if (!skb)
4233 return;
4234
Wei Yongjun7585b972009-02-25 18:29:52 +08004235 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004237 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004238 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004240 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004241 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004242 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004243 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004244 schedule_delayed_work(&hdev->cmd_timer,
4245 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 } else {
4247 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004248 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 }
4250 }
4251}