blob: ebac859e1258bbc85469c3c20e47aeedcd3265ac [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Johan Hedberg42c6b122013-03-05 20:37:49 +0200144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 struct hci_dev *hdev = req->hdev;
503
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506 * command otherwise.
507 */
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
512 */
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
515
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700522 } else {
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200532
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
536 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537 }
538
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
544
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
550
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
568 */
569 }
570
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575}
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 struct hci_dev *hdev = req->hdev;
580
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300583 else
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585
586 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
591 *
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300596 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600
601 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
607 */
608 hdev->max_page = 0x01;
609
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614 } else {
615 struct hci_cp_write_eir cp;
616
617 memset(hdev->eir, 0, sizeof(hdev->eir));
618 memset(&cp, 0, sizeof(cp));
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621 }
622 }
623
Marcel Holtmann04422da2015-01-02 23:35:18 -0800624 if (lmp_inq_rssi_capable(hdev)) {
625 u8 mode;
626
627 /* If Extended Inquiry Result events are supported, then
628 * they are clearly preferred over Inquiry Result with RSSI
629 * events.
630 */
631 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
632
633 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
634 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
636 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200638
639 if (lmp_ext_feat_capable(hdev)) {
640 struct hci_cp_read_local_ext_features cp;
641
642 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
644 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200645 }
646
647 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
648 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
650 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651 }
652}
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200655{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657 struct hci_cp_write_def_link_policy cp;
658 u16 link_policy = 0;
659
660 if (lmp_rswitch_capable(hdev))
661 link_policy |= HCI_LP_RSWITCH;
662 if (lmp_hold_capable(hdev))
663 link_policy |= HCI_LP_HOLD;
664 if (lmp_sniff_capable(hdev))
665 link_policy |= HCI_LP_SNIFF;
666 if (lmp_park_capable(hdev))
667 link_policy |= HCI_LP_PARK;
668
669 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671}
672
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200676 struct hci_cp_write_le_host_supported cp;
677
Johan Hedbergc73eee92013-04-19 18:35:21 +0300678 /* LE-only devices do not support explicit enablement */
679 if (!lmp_bredr_capable(hdev))
680 return;
681
Johan Hedberg2177bab2013-03-05 20:37:43 +0200682 memset(&cp, 0, sizeof(cp));
683
684 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
685 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200686 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200687 }
688
689 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
691 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692}
693
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300694static void hci_set_event_mask_page_2(struct hci_request *req)
695{
696 struct hci_dev *hdev = req->hdev;
697 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
698
699 /* If Connectionless Slave Broadcast master role is supported
700 * enable all necessary events for it.
701 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800702 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300703 events[1] |= 0x40; /* Triggered Clock Capture */
704 events[1] |= 0x80; /* Synchronization Train Complete */
705 events[2] |= 0x10; /* Slave Page Response Timeout */
706 events[2] |= 0x20; /* CSB Channel Map Change */
707 }
708
709 /* If Connectionless Slave Broadcast slave role is supported
710 * enable all necessary events for it.
711 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800712 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300713 events[2] |= 0x01; /* Synchronization Train Received */
714 events[2] |= 0x02; /* CSB Receive */
715 events[2] |= 0x04; /* CSB Timeout */
716 events[2] |= 0x08; /* Truncated Page Complete */
717 }
718
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800719 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200720 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800721 events[2] |= 0x80;
722
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300723 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
724}
725
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300729 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200731 hci_setup_event_mask(req);
732
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100733 /* Some Broadcom based Bluetooth controllers do not support the
734 * Delete Stored Link Key command. They are clearly indicating its
735 * absence in the bit mask of supported commands.
736 *
737 * Check the supported commands and only if the the command is marked
738 * as supported send it. If not supported assume that the controller
739 * does not have actual support for stored link keys which makes this
740 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800741 *
742 * Some controllers indicate that they support handling deleting
743 * stored link keys, but they don't. The quirk lets a driver
744 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700745 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800746 if (hdev->commands[6] & 0x80 &&
747 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +0300748 struct hci_cp_delete_stored_link_key cp;
749
750 bacpy(&cp.bdaddr, BDADDR_ANY);
751 cp.delete_all = 0x01;
752 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
753 sizeof(cp), &cp);
754 }
755
Johan Hedberg2177bab2013-03-05 20:37:43 +0200756 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200758
Marcel Holtmann417287d2014-12-11 20:21:54 +0100759 if (hdev->commands[8] & 0x01)
760 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
761
762 /* Some older Broadcom based Bluetooth 1.2 controllers do not
763 * support the Read Page Scan Type command. Check support for
764 * this command in the bit mask of supported commands.
765 */
766 if (hdev->commands[13] & 0x01)
767 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
768
Andre Guedes9193c6e2014-07-01 18:10:09 -0300769 if (lmp_le_capable(hdev)) {
770 u8 events[8];
771
772 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200773 events[0] = 0x0f;
774
775 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
776 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300777
778 /* If controller supports the Connection Parameters Request
779 * Link Layer Procedure, enable the corresponding event.
780 */
781 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
782 events[0] |= 0x20; /* LE Remote Connection
783 * Parameter Request
784 */
785
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100786 /* If the controller supports the Data Length Extension
787 * feature, enable the corresponding event.
788 */
789 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
790 events[0] |= 0x40; /* LE Data Length Change */
791
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100792 /* If the controller supports Extended Scanner Filter
793 * Policies, enable the correspondig event.
794 */
795 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
796 events[1] |= 0x04; /* LE Direct Advertising
797 * Report
798 */
799
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100800 /* If the controller supports the LE Read Local P-256
801 * Public Key command, enable the corresponding event.
802 */
803 if (hdev->commands[34] & 0x02)
804 events[0] |= 0x80; /* LE Read Local P-256
805 * Public Key Complete
806 */
807
808 /* If the controller supports the LE Generate DHKey
809 * command, enable the corresponding event.
810 */
811 if (hdev->commands[34] & 0x04)
812 events[1] |= 0x01; /* LE Generate DHKey Complete */
813
Andre Guedes9193c6e2014-07-01 18:10:09 -0300814 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
815 events);
816
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200817 if (hdev->commands[25] & 0x40) {
818 /* Read LE Advertising Channel TX Power */
819 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
820 }
821
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100822 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
823 /* Read LE Maximum Data Length */
824 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
825
826 /* Read LE Suggested Default Data Length */
827 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
828 }
829
Johan Hedberg42c6b122013-03-05 20:37:49 +0200830 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300831 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300832
833 /* Read features beyond page 1 if available */
834 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
835 struct hci_cp_read_local_ext_features cp;
836
837 cp.page = p;
838 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
839 sizeof(cp), &cp);
840 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200841}
842
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300843static void hci_init4_req(struct hci_request *req, unsigned long opt)
844{
845 struct hci_dev *hdev = req->hdev;
846
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300847 /* Set event mask page 2 if the HCI command for it is supported */
848 if (hdev->commands[22] & 0x04)
849 hci_set_event_mask_page_2(req);
850
Marcel Holtmann109e3192014-07-23 19:24:56 +0200851 /* Read local codec list if the HCI command is supported */
852 if (hdev->commands[29] & 0x20)
853 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
854
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200855 /* Get MWS transport configuration if the HCI command is supported */
856 if (hdev->commands[30] & 0x08)
857 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
858
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300859 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800860 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300861 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800862
863 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +0300864 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800865 u8 support = 0x01;
866 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
867 sizeof(support), &support);
868 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300869}
870
Johan Hedberg2177bab2013-03-05 20:37:43 +0200871static int __hci_init(struct hci_dev *hdev)
872{
873 int err;
874
875 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
876 if (err < 0)
877 return err;
878
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700879 /* The Device Under Test (DUT) mode is special and available for
880 * all controller types. So just create it early on.
881 */
882 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
883 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
884 &dut_mode_fops);
885 }
886
Johan Hedberg2177bab2013-03-05 20:37:43 +0200887 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
888 * BR/EDR/LE type controllers. AMP controllers only need the
889 * first stage init.
890 */
891 if (hdev->dev_type != HCI_BREDR)
892 return 0;
893
894 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
895 if (err < 0)
896 return err;
897
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300898 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
899 if (err < 0)
900 return err;
901
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700902 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
903 if (err < 0)
904 return err;
905
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800906 /* This function is only called when the controller is actually in
907 * configured state. When the controller is marked as unconfigured,
908 * this initialization procedure is not run.
909 *
910 * It means that it is possible that a controller runs through its
911 * setup phase and then discovers missing settings. If that is the
912 * case, then this function will not be called. It then will only
913 * be called during the config phase.
914 *
915 * So only when in setup phase or config phase, create the debugfs
916 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700917 */
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800918 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
919 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700920 return 0;
921
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100922 hci_debugfs_create_common(hdev);
923
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100924 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100925 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700926
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700927 if (lmp_le_capable(hdev)) {
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100928 hci_debugfs_create_le(hdev);
Johan Hedberg711eafe2014-08-08 09:32:52 +0300929 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700930 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700931
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700932 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200933}
934
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200935static void hci_init0_req(struct hci_request *req, unsigned long opt)
936{
937 struct hci_dev *hdev = req->hdev;
938
939 BT_DBG("%s %ld", hdev->name, opt);
940
941 /* Reset */
942 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
943 hci_reset_req(req, 0);
944
945 /* Read Local Version */
946 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
947
948 /* Read BD Address */
949 if (hdev->set_bdaddr)
950 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
951}
952
953static int __hci_unconf_init(struct hci_dev *hdev)
954{
955 int err;
956
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200957 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
958 return 0;
959
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200960 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
961 if (err < 0)
962 return err;
963
964 return 0;
965}
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
969 __u8 scan = opt;
970
Johan Hedberg42c6b122013-03-05 20:37:49 +0200971 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200974 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978{
979 __u8 auth = opt;
980
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200984 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985}
986
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 __u8 encrypt = opt;
990
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200993 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200998{
999 __le16 policy = cpu_to_le16(opt);
1000
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001002
1003 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001005}
1006
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001007/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 * Device is held on return. */
1009struct hci_dev *hci_dev_get(int index)
1010{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001011 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 BT_DBG("%d", index);
1014
1015 if (index < 0)
1016 return NULL;
1017
1018 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001019 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 if (d->id == index) {
1021 hdev = hci_dev_hold(d);
1022 break;
1023 }
1024 }
1025 read_unlock(&hci_dev_list_lock);
1026 return hdev;
1027}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001030
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001031bool hci_discovery_active(struct hci_dev *hdev)
1032{
1033 struct discovery_state *discov = &hdev->discovery;
1034
Andre Guedes6fbe1952012-02-03 17:47:58 -03001035 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001036 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001037 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001038 return true;
1039
Andre Guedes6fbe1952012-02-03 17:47:58 -03001040 default:
1041 return false;
1042 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001043}
1044
Johan Hedbergff9ef572012-01-04 14:23:45 +02001045void hci_discovery_set_state(struct hci_dev *hdev, int state)
1046{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001047 int old_state = hdev->discovery.state;
1048
Johan Hedbergff9ef572012-01-04 14:23:45 +02001049 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1050
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001051 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001052 return;
1053
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001054 hdev->discovery.state = state;
1055
Johan Hedbergff9ef572012-01-04 14:23:45 +02001056 switch (state) {
1057 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001058 hci_update_background_scan(hdev);
1059
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001060 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001061 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001062 break;
1063 case DISCOVERY_STARTING:
1064 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001065 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001066 mgmt_discovering(hdev, 1);
1067 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001068 case DISCOVERY_RESOLVING:
1069 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001070 case DISCOVERY_STOPPING:
1071 break;
1072 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001073}
1074
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001075void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076{
Johan Hedberg30883512012-01-04 14:16:21 +02001077 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001078 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Johan Hedberg561aafb2012-01-04 13:31:59 +02001080 list_for_each_entry_safe(p, n, &cache->all, all) {
1081 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001082 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001084
1085 INIT_LIST_HEAD(&cache->unknown);
1086 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001089struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1090 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
Johan Hedberg30883512012-01-04 14:16:21 +02001092 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 struct inquiry_entry *e;
1094
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001095 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
Johan Hedberg561aafb2012-01-04 13:31:59 +02001097 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001099 return e;
1100 }
1101
1102 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103}
1104
Johan Hedberg561aafb2012-01-04 13:31:59 +02001105struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001106 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001107{
Johan Hedberg30883512012-01-04 14:16:21 +02001108 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001109 struct inquiry_entry *e;
1110
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001111 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001112
1113 list_for_each_entry(e, &cache->unknown, list) {
1114 if (!bacmp(&e->data.bdaddr, bdaddr))
1115 return e;
1116 }
1117
1118 return NULL;
1119}
1120
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001121struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001122 bdaddr_t *bdaddr,
1123 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001124{
1125 struct discovery_state *cache = &hdev->discovery;
1126 struct inquiry_entry *e;
1127
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001128 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001129
1130 list_for_each_entry(e, &cache->resolve, list) {
1131 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1132 return e;
1133 if (!bacmp(&e->data.bdaddr, bdaddr))
1134 return e;
1135 }
1136
1137 return NULL;
1138}
1139
Johan Hedberga3d4e202012-01-09 00:53:02 +02001140void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001141 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001142{
1143 struct discovery_state *cache = &hdev->discovery;
1144 struct list_head *pos = &cache->resolve;
1145 struct inquiry_entry *p;
1146
1147 list_del(&ie->list);
1148
1149 list_for_each_entry(p, &cache->resolve, list) {
1150 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001151 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001152 break;
1153 pos = &p->list;
1154 }
1155
1156 list_add(&ie->list, pos);
1157}
1158
Marcel Holtmannaf589252014-07-01 14:11:20 +02001159u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1160 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
Johan Hedberg30883512012-01-04 14:16:21 +02001162 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001163 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001164 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001166 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Johan Hedberg6928a922014-10-26 20:46:09 +01001168 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001169
Marcel Holtmannaf589252014-07-01 14:11:20 +02001170 if (!data->ssp_mode)
1171 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001172
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001173 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001174 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001175 if (!ie->data.ssp_mode)
1176 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001177
Johan Hedberga3d4e202012-01-09 00:53:02 +02001178 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001179 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001180 ie->data.rssi = data->rssi;
1181 hci_inquiry_cache_update_resolve(hdev, ie);
1182 }
1183
Johan Hedberg561aafb2012-01-04 13:31:59 +02001184 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001185 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001186
Johan Hedberg561aafb2012-01-04 13:31:59 +02001187 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001188 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001189 if (!ie) {
1190 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1191 goto done;
1192 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001193
1194 list_add(&ie->all, &cache->all);
1195
1196 if (name_known) {
1197 ie->name_state = NAME_KNOWN;
1198 } else {
1199 ie->name_state = NAME_NOT_KNOWN;
1200 list_add(&ie->list, &cache->unknown);
1201 }
1202
1203update:
1204 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001205 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001206 ie->name_state = NAME_KNOWN;
1207 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
1209
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001210 memcpy(&ie->data, data, sizeof(*data));
1211 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001213
1214 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001215 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001216
Marcel Holtmannaf589252014-07-01 14:11:20 +02001217done:
1218 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}
1220
1221static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1222{
Johan Hedberg30883512012-01-04 14:16:21 +02001223 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 struct inquiry_info *info = (struct inquiry_info *) buf;
1225 struct inquiry_entry *e;
1226 int copied = 0;
1227
Johan Hedberg561aafb2012-01-04 13:31:59 +02001228 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001230
1231 if (copied >= num)
1232 break;
1233
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 bacpy(&info->bdaddr, &data->bdaddr);
1235 info->pscan_rep_mode = data->pscan_rep_mode;
1236 info->pscan_period_mode = data->pscan_period_mode;
1237 info->pscan_mode = data->pscan_mode;
1238 memcpy(info->dev_class, data->dev_class, 3);
1239 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001242 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 }
1244
1245 BT_DBG("cache %p, copied %d", cache, copied);
1246 return copied;
1247}
1248
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250{
1251 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 struct hci_cp_inquiry cp;
1254
1255 BT_DBG("%s", hdev->name);
1256
1257 if (test_bit(HCI_INQUIRY, &hdev->flags))
1258 return;
1259
1260 /* Start Inquiry */
1261 memcpy(&cp.lap, &ir->lap, 3);
1262 cp.length = ir->length;
1263 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265}
1266
1267int hci_inquiry(void __user *arg)
1268{
1269 __u8 __user *ptr = arg;
1270 struct hci_inquiry_req ir;
1271 struct hci_dev *hdev;
1272 int err = 0, do_inquiry = 0, max_rsp;
1273 long timeo;
1274 __u8 *buf;
1275
1276 if (copy_from_user(&ir, ptr, sizeof(ir)))
1277 return -EFAULT;
1278
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001279 hdev = hci_dev_get(ir.dev_id);
1280 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return -ENODEV;
1282
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001283 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1284 err = -EBUSY;
1285 goto done;
1286 }
1287
Marcel Holtmann4a964402014-07-02 19:10:33 +02001288 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001289 err = -EOPNOTSUPP;
1290 goto done;
1291 }
1292
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001293 if (hdev->dev_type != HCI_BREDR) {
1294 err = -EOPNOTSUPP;
1295 goto done;
1296 }
1297
Johan Hedberg56f87902013-10-02 13:43:13 +03001298 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1299 err = -EOPNOTSUPP;
1300 goto done;
1301 }
1302
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001303 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001304 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001305 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001306 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 do_inquiry = 1;
1308 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001309 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
Marcel Holtmann04837f62006-07-03 10:02:33 +02001311 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001312
1313 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001314 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1315 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001316 if (err < 0)
1317 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001318
1319 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1320 * cleared). If it is interrupted by a signal, return -EINTR.
1321 */
NeilBrown74316202014-07-07 15:16:04 +10001322 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001323 TASK_INTERRUPTIBLE))
1324 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001327 /* for unlimited number of responses we will use buffer with
1328 * 255 entries
1329 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1331
1332 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1333 * copy it to the user space.
1334 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001335 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001336 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 err = -ENOMEM;
1338 goto done;
1339 }
1340
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001341 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001343 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
1345 BT_DBG("num_rsp %d", ir.num_rsp);
1346
1347 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1348 ptr += sizeof(ir);
1349 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001350 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001352 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 err = -EFAULT;
1354
1355 kfree(buf);
1356
1357done:
1358 hci_dev_put(hdev);
1359 return err;
1360}
1361
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001362static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 int ret = 0;
1365
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 BT_DBG("%s %p", hdev->name, hdev);
1367
1368 hci_req_lock(hdev);
1369
Johan Hovold94324962012-03-15 14:48:41 +01001370 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1371 ret = -ENODEV;
1372 goto done;
1373 }
1374
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001375 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1376 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001377 /* Check for rfkill but allow the HCI setup stage to
1378 * proceed (which in itself doesn't cause any RF activity).
1379 */
1380 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1381 ret = -ERFKILL;
1382 goto done;
1383 }
1384
1385 /* Check for valid public address or a configured static
1386 * random adddress, but let the HCI setup proceed to
1387 * be able to determine if there is a public address
1388 * or not.
1389 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001390 * In case of user channel usage, it is not important
1391 * if a public address or static random address is
1392 * available.
1393 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001394 * This check is only valid for BR/EDR controllers
1395 * since AMP controllers do not have an address.
1396 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001397 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1398 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001399 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1400 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1401 ret = -EADDRNOTAVAIL;
1402 goto done;
1403 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001404 }
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 if (test_bit(HCI_UP, &hdev->flags)) {
1407 ret = -EALREADY;
1408 goto done;
1409 }
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 if (hdev->open(hdev)) {
1412 ret = -EIO;
1413 goto done;
1414 }
1415
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001416 atomic_set(&hdev->cmd_cnt, 1);
1417 set_bit(HCI_INIT, &hdev->flags);
1418
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001419 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1420 if (hdev->setup)
1421 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001422
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001423 /* The transport driver can set these quirks before
1424 * creating the HCI device or in its setup callback.
1425 *
1426 * In case any of them is set, the controller has to
1427 * start up as unconfigured.
1428 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001429 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1430 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001431 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001432
1433 /* For an unconfigured controller it is required to
1434 * read at least the version information provided by
1435 * the Read Local Version Information command.
1436 *
1437 * If the set_bdaddr driver callback is provided, then
1438 * also the original Bluetooth public device address
1439 * will be read using the Read BD Address command.
1440 */
1441 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1442 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001443 }
1444
Marcel Holtmann9713c172014-07-06 12:11:15 +02001445 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1446 /* If public address change is configured, ensure that
1447 * the address gets programmed. If the driver does not
1448 * support changing the public address, fail the power
1449 * on procedure.
1450 */
1451 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1452 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001453 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1454 else
1455 ret = -EADDRNOTAVAIL;
1456 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001457
1458 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001459 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001460 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001461 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 }
1463
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001464 clear_bit(HCI_INIT, &hdev->flags);
1465
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 if (!ret) {
1467 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001468 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 set_bit(HCI_UP, &hdev->flags);
1470 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001471 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02001472 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001473 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001474 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001475 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001476 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001477 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001478 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001479 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001480 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001482 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001483 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001484 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 skb_queue_purge(&hdev->cmd_q);
1487 skb_queue_purge(&hdev->rx_q);
1488
1489 if (hdev->flush)
1490 hdev->flush(hdev);
1491
1492 if (hdev->sent_cmd) {
1493 kfree_skb(hdev->sent_cmd);
1494 hdev->sent_cmd = NULL;
1495 }
1496
1497 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001498 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 }
1500
1501done:
1502 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 return ret;
1504}
1505
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001506/* ---- HCI ioctl helpers ---- */
1507
1508int hci_dev_open(__u16 dev)
1509{
1510 struct hci_dev *hdev;
1511 int err;
1512
1513 hdev = hci_dev_get(dev);
1514 if (!hdev)
1515 return -ENODEV;
1516
Marcel Holtmann4a964402014-07-02 19:10:33 +02001517 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001518 * up as user channel. Trying to bring them up as normal devices
1519 * will result into a failure. Only user channel operation is
1520 * possible.
1521 *
1522 * When this function is called for a user channel, the flag
1523 * HCI_USER_CHANNEL will be set first before attempting to
1524 * open the device.
1525 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001526 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001527 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1528 err = -EOPNOTSUPP;
1529 goto done;
1530 }
1531
Johan Hedberge1d08f42013-10-01 22:44:50 +03001532 /* We need to ensure that no other power on/off work is pending
1533 * before proceeding to call hci_dev_do_open. This is
1534 * particularly important if the setup procedure has not yet
1535 * completed.
1536 */
1537 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1538 cancel_delayed_work(&hdev->power_off);
1539
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001540 /* After this call it is guaranteed that the setup procedure
1541 * has finished. This means that error conditions like RFKILL
1542 * or no valid public or static random address apply.
1543 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001544 flush_workqueue(hdev->req_workqueue);
1545
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001546 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001547 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001548 * so that pairing works for them. Once the management interface
1549 * is in use this bit will be cleared again and userspace has
1550 * to explicitly enable it.
1551 */
1552 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1553 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001554 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001555
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001556 err = hci_dev_do_open(hdev);
1557
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001558done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001559 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001560 return err;
1561}
1562
Johan Hedbergd7347f32014-07-04 12:37:23 +03001563/* This function requires the caller holds hdev->lock */
1564static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1565{
1566 struct hci_conn_params *p;
1567
Johan Hedbergf161dd42014-08-15 21:06:54 +03001568 list_for_each_entry(p, &hdev->le_conn_params, list) {
1569 if (p->conn) {
1570 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001571 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001572 p->conn = NULL;
1573 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001574 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001575 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001576
1577 BT_DBG("All LE pending actions cleared");
1578}
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580static int hci_dev_do_close(struct hci_dev *hdev)
1581{
1582 BT_DBG("%s %p", hdev->name, hdev);
1583
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001584 cancel_delayed_work(&hdev->power_off);
1585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 hci_req_cancel(hdev, ENODEV);
1587 hci_req_lock(hdev);
1588
1589 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001590 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 hci_req_unlock(hdev);
1592 return 0;
1593 }
1594
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001595 /* Flush RX and TX works */
1596 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001597 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001599 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001600 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001601 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001602 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001603 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001604 }
1605
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001606 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001607 cancel_delayed_work(&hdev->service_cache);
1608
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001609 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001610
1611 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1612 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001613
Johan Hedberg76727c02014-11-18 09:00:14 +02001614 /* Avoid potential lockdep warnings from the *_flush() calls by
1615 * ensuring the workqueue is empty up front.
1616 */
1617 drain_workqueue(hdev->workqueue);
1618
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001619 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001620
1621 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1622 if (hdev->dev_type == HCI_BREDR)
1623 mgmt_powered(hdev, 0);
1624 }
1625
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001626 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001627 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001628 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001629 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
1631 hci_notify(hdev, HCI_DEV_DOWN);
1632
1633 if (hdev->flush)
1634 hdev->flush(hdev);
1635
1636 /* Reset device */
1637 skb_queue_purge(&hdev->cmd_q);
1638 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001639 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1640 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001641 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001643 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 clear_bit(HCI_INIT, &hdev->flags);
1645 }
1646
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001647 /* flush cmd work */
1648 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650 /* Drop queues */
1651 skb_queue_purge(&hdev->rx_q);
1652 skb_queue_purge(&hdev->cmd_q);
1653 skb_queue_purge(&hdev->raw_q);
1654
1655 /* Drop last sent command */
1656 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001657 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 kfree_skb(hdev->sent_cmd);
1659 hdev->sent_cmd = NULL;
1660 }
1661
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001662 kfree_skb(hdev->recv_evt);
1663 hdev->recv_evt = NULL;
1664
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 /* After this point our queues are empty
1666 * and no tasks are scheduled. */
1667 hdev->close(hdev);
1668
Johan Hedberg35b973c2013-03-15 17:06:59 -05001669 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001670 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001671 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1672
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001673 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001674 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001675
Johan Hedberge59fda82012-02-22 18:11:53 +02001676 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001677 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001678 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 hci_req_unlock(hdev);
1681
1682 hci_dev_put(hdev);
1683 return 0;
1684}
1685
1686int hci_dev_close(__u16 dev)
1687{
1688 struct hci_dev *hdev;
1689 int err;
1690
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001691 hdev = hci_dev_get(dev);
1692 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001694
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001695 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1696 err = -EBUSY;
1697 goto done;
1698 }
1699
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001700 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1701 cancel_delayed_work(&hdev->power_off);
1702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001704
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001705done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 hci_dev_put(hdev);
1707 return err;
1708}
1709
1710int hci_dev_reset(__u16 dev)
1711{
1712 struct hci_dev *hdev;
1713 int ret = 0;
1714
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001715 hdev = hci_dev_get(dev);
1716 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 return -ENODEV;
1718
1719 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Marcel Holtmann808a0492013-08-26 20:57:58 -07001721 if (!test_bit(HCI_UP, &hdev->flags)) {
1722 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1727 ret = -EBUSY;
1728 goto done;
1729 }
1730
Marcel Holtmann4a964402014-07-02 19:10:33 +02001731 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001732 ret = -EOPNOTSUPP;
1733 goto done;
1734 }
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 /* Drop queues */
1737 skb_queue_purge(&hdev->rx_q);
1738 skb_queue_purge(&hdev->cmd_q);
1739
Johan Hedberg76727c02014-11-18 09:00:14 +02001740 /* Avoid potential lockdep warnings from the *_flush() calls by
1741 * ensuring the workqueue is empty up front.
1742 */
1743 drain_workqueue(hdev->workqueue);
1744
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001745 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001746 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001748 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 if (hdev->flush)
1751 hdev->flush(hdev);
1752
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001753 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001754 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001756 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
1758done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 hci_req_unlock(hdev);
1760 hci_dev_put(hdev);
1761 return ret;
1762}
1763
1764int hci_dev_reset_stat(__u16 dev)
1765{
1766 struct hci_dev *hdev;
1767 int ret = 0;
1768
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001769 hdev = hci_dev_get(dev);
1770 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 return -ENODEV;
1772
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001773 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774 ret = -EBUSY;
1775 goto done;
1776 }
1777
Marcel Holtmann4a964402014-07-02 19:10:33 +02001778 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001779 ret = -EOPNOTSUPP;
1780 goto done;
1781 }
1782
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1784
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001785done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 return ret;
1788}
1789
Johan Hedberg123abc02014-07-10 12:09:07 +03001790static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1791{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001792 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001793
1794 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1795
1796 if ((scan & SCAN_PAGE))
1797 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1798 &hdev->dev_flags);
1799 else
1800 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1801 &hdev->dev_flags);
1802
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001803 if ((scan & SCAN_INQUIRY)) {
1804 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1805 &hdev->dev_flags);
1806 } else {
1807 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1808 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1809 &hdev->dev_flags);
1810 }
1811
Johan Hedberg123abc02014-07-10 12:09:07 +03001812 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1813 return;
1814
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001815 if (conn_changed || discov_changed) {
1816 /* In case this was disabled through mgmt */
1817 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1818
1819 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1820 mgmt_update_adv_data(hdev);
1821
Johan Hedberg123abc02014-07-10 12:09:07 +03001822 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001823 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826int hci_dev_cmd(unsigned int cmd, void __user *arg)
1827{
1828 struct hci_dev *hdev;
1829 struct hci_dev_req dr;
1830 int err = 0;
1831
1832 if (copy_from_user(&dr, arg, sizeof(dr)))
1833 return -EFAULT;
1834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001835 hdev = hci_dev_get(dr.dev_id);
1836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -ENODEV;
1838
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001839 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
Marcel Holtmann4a964402014-07-02 19:10:33 +02001844 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001849 if (hdev->dev_type != HCI_BREDR) {
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Johan Hedberg56f87902013-10-02 13:43:13 +03001854 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1855 err = -EOPNOTSUPP;
1856 goto done;
1857 }
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 switch (cmd) {
1860 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001861 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1862 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 break;
1864
1865 case HCISETENCRYPT:
1866 if (!lmp_encrypt_capable(hdev)) {
1867 err = -EOPNOTSUPP;
1868 break;
1869 }
1870
1871 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1872 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001873 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1874 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 if (err)
1876 break;
1877 }
1878
Johan Hedberg01178cd2013-03-05 20:37:41 +02001879 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1880 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 break;
1882
1883 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001884 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1885 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001886
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001887 /* Ensure that the connectable and discoverable states
1888 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001889 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001890 if (!err)
1891 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 break;
1893
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001894 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001895 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1896 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001897 break;
1898
1899 case HCISETLINKMODE:
1900 hdev->link_mode = ((__u16) dr.dev_opt) &
1901 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1902 break;
1903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 case HCISETPTYPE:
1905 hdev->pkt_type = (__u16) dr.dev_opt;
1906 break;
1907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1910 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 break;
1912
1913 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001914 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1915 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 break;
1917
1918 default:
1919 err = -EINVAL;
1920 break;
1921 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001922
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001923done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 hci_dev_put(hdev);
1925 return err;
1926}
1927
1928int hci_get_dev_list(void __user *arg)
1929{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001930 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 struct hci_dev_list_req *dl;
1932 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 int n = 0, size, err;
1934 __u16 dev_num;
1935
1936 if (get_user(dev_num, (__u16 __user *) arg))
1937 return -EFAULT;
1938
1939 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1940 return -EINVAL;
1941
1942 size = sizeof(*dl) + dev_num * sizeof(*dr);
1943
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001944 dl = kzalloc(size, GFP_KERNEL);
1945 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 return -ENOMEM;
1947
1948 dr = dl->dev_req;
1949
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001950 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001951 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001952 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001953
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001954 /* When the auto-off is configured it means the transport
1955 * is running, but in that case still indicate that the
1956 * device is actually down.
1957 */
1958 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1959 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001962 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 if (++n >= dev_num)
1965 break;
1966 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001967 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
1969 dl->dev_num = n;
1970 size = sizeof(*dl) + n * sizeof(*dr);
1971
1972 err = copy_to_user(arg, dl, size);
1973 kfree(dl);
1974
1975 return err ? -EFAULT : 0;
1976}
1977
1978int hci_get_dev_info(void __user *arg)
1979{
1980 struct hci_dev *hdev;
1981 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001982 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 int err = 0;
1984
1985 if (copy_from_user(&di, arg, sizeof(di)))
1986 return -EFAULT;
1987
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001988 hdev = hci_dev_get(di.dev_id);
1989 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 return -ENODEV;
1991
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001992 /* When the auto-off is configured it means the transport
1993 * is running, but in that case still indicate that the
1994 * device is actually down.
1995 */
1996 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1997 flags = hdev->flags & ~BIT(HCI_UP);
1998 else
1999 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 strcpy(di.name, hdev->name);
2002 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002003 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002004 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002006 if (lmp_bredr_capable(hdev)) {
2007 di.acl_mtu = hdev->acl_mtu;
2008 di.acl_pkts = hdev->acl_pkts;
2009 di.sco_mtu = hdev->sco_mtu;
2010 di.sco_pkts = hdev->sco_pkts;
2011 } else {
2012 di.acl_mtu = hdev->le_mtu;
2013 di.acl_pkts = hdev->le_pkts;
2014 di.sco_mtu = 0;
2015 di.sco_pkts = 0;
2016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 di.link_policy = hdev->link_policy;
2018 di.link_mode = hdev->link_mode;
2019
2020 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2021 memcpy(&di.features, &hdev->features, sizeof(di.features));
2022
2023 if (copy_to_user(arg, &di, sizeof(di)))
2024 err = -EFAULT;
2025
2026 hci_dev_put(hdev);
2027
2028 return err;
2029}
2030
2031/* ---- Interface to HCI drivers ---- */
2032
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002033static int hci_rfkill_set_block(void *data, bool blocked)
2034{
2035 struct hci_dev *hdev = data;
2036
2037 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2038
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002039 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2040 return -EBUSY;
2041
Johan Hedberg5e130362013-09-13 08:58:17 +03002042 if (blocked) {
2043 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002044 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2045 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002046 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002047 } else {
2048 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002049 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002050
2051 return 0;
2052}
2053
2054static const struct rfkill_ops hci_rfkill_ops = {
2055 .set_block = hci_rfkill_set_block,
2056};
2057
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002058static void hci_power_on(struct work_struct *work)
2059{
2060 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002061 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002062
2063 BT_DBG("%s", hdev->name);
2064
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002065 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002066 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302067 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002068 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302069 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002070 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002071 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002072
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002073 /* During the HCI setup phase, a few error conditions are
2074 * ignored and they need to be checked now. If they are still
2075 * valid, it is important to turn the device back off.
2076 */
2077 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002078 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002079 (hdev->dev_type == HCI_BREDR &&
2080 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2081 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002082 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2083 hci_dev_do_close(hdev);
2084 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002085 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2086 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002087 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002088
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002089 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002090 /* For unconfigured devices, set the HCI_RAW flag
2091 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002092 */
2093 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2094 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002095
2096 /* For fully configured devices, this will send
2097 * the Index Added event. For unconfigured devices,
2098 * it will send Unconfigued Index Added event.
2099 *
2100 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2101 * and no event will be send.
2102 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002103 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002104 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002105 /* When the controller is now configured, then it
2106 * is important to clear the HCI_RAW flag.
2107 */
2108 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2109 clear_bit(HCI_RAW, &hdev->flags);
2110
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002111 /* Powering on the controller with HCI_CONFIG set only
2112 * happens with the transition from unconfigured to
2113 * configured. This will send the Index Added event.
2114 */
2115 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002116 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002117}
2118
2119static void hci_power_off(struct work_struct *work)
2120{
Johan Hedberg32435532011-11-07 22:16:04 +02002121 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002122 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002123
2124 BT_DBG("%s", hdev->name);
2125
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002126 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002127}
2128
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002129static void hci_discov_off(struct work_struct *work)
2130{
2131 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002132
2133 hdev = container_of(work, struct hci_dev, discov_off.work);
2134
2135 BT_DBG("%s", hdev->name);
2136
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002137 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002138}
2139
Johan Hedberg35f74982014-02-18 17:14:32 +02002140void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002141{
Johan Hedberg48210022013-01-27 00:31:28 +02002142 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002143
Johan Hedberg48210022013-01-27 00:31:28 +02002144 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2145 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002146 kfree(uuid);
2147 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002148}
2149
Johan Hedberg35f74982014-02-18 17:14:32 +02002150void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002151{
Johan Hedberg0378b592014-11-19 15:22:22 +02002152 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002153
Johan Hedberg0378b592014-11-19 15:22:22 +02002154 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2155 list_del_rcu(&key->list);
2156 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002157 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002158}
2159
Johan Hedberg35f74982014-02-18 17:14:32 +02002160void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002161{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002162 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002163
Johan Hedberg970d0f12014-11-13 14:37:47 +02002164 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2165 list_del_rcu(&k->list);
2166 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002167 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002168}
2169
Johan Hedberg970c4e42014-02-18 10:19:33 +02002170void hci_smp_irks_clear(struct hci_dev *hdev)
2171{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002172 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002173
Johan Hedbergadae20c2014-11-13 14:37:48 +02002174 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2175 list_del_rcu(&k->list);
2176 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002177 }
2178}
2179
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002180struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2181{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002182 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002183
Johan Hedberg0378b592014-11-19 15:22:22 +02002184 rcu_read_lock();
2185 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2186 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2187 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002188 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002189 }
2190 }
2191 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002192
2193 return NULL;
2194}
2195
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302196static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002197 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002198{
2199 /* Legacy key */
2200 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002202
2203 /* Debug keys are insecure so don't store them persistently */
2204 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302205 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002206
2207 /* Changed combination key and there's no previous one */
2208 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302209 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002210
2211 /* Security mode 3 case */
2212 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002214
Johan Hedberge3befab2014-06-01 16:33:39 +03002215 /* BR/EDR key derived using SC from an LE link */
2216 if (conn->type == LE_LINK)
2217 return true;
2218
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002219 /* Neither local nor remote side had no-bonding as requirement */
2220 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302221 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002222
2223 /* Local side had dedicated bonding as requirement */
2224 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302225 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002226
2227 /* Remote side had dedicated bonding as requirement */
2228 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302229 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002230
2231 /* If none of the above criteria match, then don't store the key
2232 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302233 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002234}
2235
Johan Hedberge804d252014-07-16 11:42:28 +03002236static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002237{
Johan Hedberge804d252014-07-16 11:42:28 +03002238 if (type == SMP_LTK)
2239 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002240
Johan Hedberge804d252014-07-16 11:42:28 +03002241 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002242}
2243
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002244struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2245 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002246{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002247 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002248
Johan Hedberg970d0f12014-11-13 14:37:47 +02002249 rcu_read_lock();
2250 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002251 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2252 continue;
2253
Johan Hedberg923e2412014-12-03 12:43:39 +02002254 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002255 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002256 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002257 }
2258 }
2259 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002260
2261 return NULL;
2262}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002263
Johan Hedberg970c4e42014-02-18 10:19:33 +02002264struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2265{
2266 struct smp_irk *irk;
2267
Johan Hedbergadae20c2014-11-13 14:37:48 +02002268 rcu_read_lock();
2269 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2270 if (!bacmp(&irk->rpa, rpa)) {
2271 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002272 return irk;
2273 }
2274 }
2275
Johan Hedbergadae20c2014-11-13 14:37:48 +02002276 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2277 if (smp_irk_matches(hdev, irk->val, rpa)) {
2278 bacpy(&irk->rpa, rpa);
2279 rcu_read_unlock();
2280 return irk;
2281 }
2282 }
2283 rcu_read_unlock();
2284
Johan Hedberg970c4e42014-02-18 10:19:33 +02002285 return NULL;
2286}
2287
2288struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2289 u8 addr_type)
2290{
2291 struct smp_irk *irk;
2292
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002293 /* Identity Address must be public or static random */
2294 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2295 return NULL;
2296
Johan Hedbergadae20c2014-11-13 14:37:48 +02002297 rcu_read_lock();
2298 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002299 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002300 bacmp(bdaddr, &irk->bdaddr) == 0) {
2301 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002302 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002303 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002304 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002305 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002306
2307 return NULL;
2308}
2309
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002310struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002311 bdaddr_t *bdaddr, u8 *val, u8 type,
2312 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002313{
2314 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302315 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002316
2317 old_key = hci_find_link_key(hdev, bdaddr);
2318 if (old_key) {
2319 old_key_type = old_key->type;
2320 key = old_key;
2321 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002322 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002323 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002324 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002325 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002326 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002327 }
2328
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002329 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002330
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002331 /* Some buggy controller combinations generate a changed
2332 * combination key for legacy pairing even when there's no
2333 * previous key */
2334 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002335 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002336 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002337 if (conn)
2338 conn->key_type = type;
2339 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002340
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002341 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002342 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002343 key->pin_len = pin_len;
2344
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002345 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002346 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002347 else
2348 key->type = type;
2349
Johan Hedberg7652ff62014-06-24 13:15:49 +03002350 if (persistent)
2351 *persistent = hci_persistent_key(hdev, conn, type,
2352 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002353
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002354 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355}
2356
Johan Hedbergca9142b2014-02-19 14:57:44 +02002357struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002358 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002359 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002360{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002361 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002362 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002363
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002364 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002365 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002366 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002367 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002368 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002369 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002370 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002371 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002372 }
2373
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002374 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002375 key->bdaddr_type = addr_type;
2376 memcpy(key->val, tk, sizeof(key->val));
2377 key->authenticated = authenticated;
2378 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002379 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002380 key->enc_size = enc_size;
2381 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002382
Johan Hedbergca9142b2014-02-19 14:57:44 +02002383 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002384}
2385
Johan Hedbergca9142b2014-02-19 14:57:44 +02002386struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2387 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002388{
2389 struct smp_irk *irk;
2390
2391 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2392 if (!irk) {
2393 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2394 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002395 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002396
2397 bacpy(&irk->bdaddr, bdaddr);
2398 irk->addr_type = addr_type;
2399
Johan Hedbergadae20c2014-11-13 14:37:48 +02002400 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002401 }
2402
2403 memcpy(irk->val, val, 16);
2404 bacpy(&irk->rpa, rpa);
2405
Johan Hedbergca9142b2014-02-19 14:57:44 +02002406 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002407}
2408
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002409int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2410{
2411 struct link_key *key;
2412
2413 key = hci_find_link_key(hdev, bdaddr);
2414 if (!key)
2415 return -ENOENT;
2416
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002417 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002418
Johan Hedberg0378b592014-11-19 15:22:22 +02002419 list_del_rcu(&key->list);
2420 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002421
2422 return 0;
2423}
2424
Johan Hedberge0b2b272014-02-18 17:14:31 +02002425int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002426{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002427 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002428 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002429
Johan Hedberg970d0f12014-11-13 14:37:47 +02002430 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002431 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002432 continue;
2433
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002434 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002435
Johan Hedberg970d0f12014-11-13 14:37:47 +02002436 list_del_rcu(&k->list);
2437 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002438 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002439 }
2440
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002441 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002442}
2443
Johan Hedberga7ec7332014-02-18 17:14:35 +02002444void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2445{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002446 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002447
Johan Hedbergadae20c2014-11-13 14:37:48 +02002448 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002449 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2450 continue;
2451
2452 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2453
Johan Hedbergadae20c2014-11-13 14:37:48 +02002454 list_del_rcu(&k->list);
2455 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002456 }
2457}
2458
Ville Tervo6bd32322011-02-16 16:32:41 +02002459/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002460static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002461{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002462 struct hci_dev *hdev = container_of(work, struct hci_dev,
2463 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002464
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002465 if (hdev->sent_cmd) {
2466 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2467 u16 opcode = __le16_to_cpu(sent->opcode);
2468
2469 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2470 } else {
2471 BT_ERR("%s command tx timeout", hdev->name);
2472 }
2473
Ville Tervo6bd32322011-02-16 16:32:41 +02002474 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002475 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002476}
2477
Szymon Janc2763eda2011-03-22 13:12:22 +01002478struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002479 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002480{
2481 struct oob_data *data;
2482
Johan Hedberg6928a922014-10-26 20:46:09 +01002483 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2484 if (bacmp(bdaddr, &data->bdaddr) != 0)
2485 continue;
2486 if (data->bdaddr_type != bdaddr_type)
2487 continue;
2488 return data;
2489 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002490
2491 return NULL;
2492}
2493
Johan Hedberg6928a922014-10-26 20:46:09 +01002494int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2495 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002496{
2497 struct oob_data *data;
2498
Johan Hedberg6928a922014-10-26 20:46:09 +01002499 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002500 if (!data)
2501 return -ENOENT;
2502
Johan Hedberg6928a922014-10-26 20:46:09 +01002503 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002504
2505 list_del(&data->list);
2506 kfree(data);
2507
2508 return 0;
2509}
2510
Johan Hedberg35f74982014-02-18 17:14:32 +02002511void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002512{
2513 struct oob_data *data, *n;
2514
2515 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2516 list_del(&data->list);
2517 kfree(data);
2518 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002519}
2520
Marcel Holtmann07988722014-01-10 02:07:29 -08002521int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002522 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002523 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002524{
2525 struct oob_data *data;
2526
Johan Hedberg6928a922014-10-26 20:46:09 +01002527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002528 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002529 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002530 if (!data)
2531 return -ENOMEM;
2532
2533 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002534 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002535 list_add(&data->list, &hdev->remote_oob_data);
2536 }
2537
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002538 if (hash192 && rand192) {
2539 memcpy(data->hash192, hash192, sizeof(data->hash192));
2540 memcpy(data->rand192, rand192, sizeof(data->rand192));
2541 } else {
2542 memset(data->hash192, 0, sizeof(data->hash192));
2543 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08002544 }
2545
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002546 if (hash256 && rand256) {
2547 memcpy(data->hash256, hash256, sizeof(data->hash256));
2548 memcpy(data->rand256, rand256, sizeof(data->rand256));
2549 } else {
2550 memset(data->hash256, 0, sizeof(data->hash256));
2551 memset(data->rand256, 0, sizeof(data->rand256));
2552 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002553
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002554 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002555
2556 return 0;
2557}
2558
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002559struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002560 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002561{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002562 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002563
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002564 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002565 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002566 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002567 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002568
2569 return NULL;
2570}
2571
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002572void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002573{
2574 struct list_head *p, *n;
2575
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002576 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002577 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002578
2579 list_del(p);
2580 kfree(b);
2581 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002582}
2583
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002584int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002585{
2586 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002587
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002588 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002589 return -EBADF;
2590
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002591 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002592 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002593
Johan Hedberg27f70f32014-07-21 10:50:06 +03002594 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002595 if (!entry)
2596 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002597
2598 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002599 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002600
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002601 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002602
2603 return 0;
2604}
2605
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002606int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002607{
2608 struct bdaddr_list *entry;
2609
Johan Hedberg35f74982014-02-18 17:14:32 +02002610 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002611 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002612 return 0;
2613 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002614
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002615 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002616 if (!entry)
2617 return -ENOENT;
2618
2619 list_del(&entry->list);
2620 kfree(entry);
2621
2622 return 0;
2623}
2624
Andre Guedes15819a72014-02-03 13:56:18 -03002625/* This function requires the caller holds hdev->lock */
2626struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2627 bdaddr_t *addr, u8 addr_type)
2628{
2629 struct hci_conn_params *params;
2630
Johan Hedberg738f6182014-07-03 19:33:51 +03002631 /* The conn params list only contains identity addresses */
2632 if (!hci_is_identity_address(addr, addr_type))
2633 return NULL;
2634
Andre Guedes15819a72014-02-03 13:56:18 -03002635 list_for_each_entry(params, &hdev->le_conn_params, list) {
2636 if (bacmp(&params->addr, addr) == 0 &&
2637 params->addr_type == addr_type) {
2638 return params;
2639 }
2640 }
2641
2642 return NULL;
2643}
2644
2645/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002646struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2647 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002648{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002649 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002650
Johan Hedberg738f6182014-07-03 19:33:51 +03002651 /* The list only contains identity addresses */
2652 if (!hci_is_identity_address(addr, addr_type))
2653 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002654
Johan Hedberg501f8822014-07-04 12:37:26 +03002655 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002656 if (bacmp(&param->addr, addr) == 0 &&
2657 param->addr_type == addr_type)
2658 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002659 }
2660
2661 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002662}
2663
2664/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002665struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2666 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002667{
2668 struct hci_conn_params *params;
2669
Johan Hedbergc46245b2014-07-02 17:37:33 +03002670 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002671 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002672
Andre Guedes15819a72014-02-03 13:56:18 -03002673 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002674 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002675 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002676
2677 params = kzalloc(sizeof(*params), GFP_KERNEL);
2678 if (!params) {
2679 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002680 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002681 }
2682
2683 bacpy(&params->addr, addr);
2684 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002685
2686 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002687 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002688
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002689 params->conn_min_interval = hdev->le_conn_min_interval;
2690 params->conn_max_interval = hdev->le_conn_max_interval;
2691 params->conn_latency = hdev->le_conn_latency;
2692 params->supervision_timeout = hdev->le_supv_timeout;
2693 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2694
2695 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2696
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002697 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002698}
2699
Johan Hedbergf6c63242014-08-15 21:06:59 +03002700static void hci_conn_params_free(struct hci_conn_params *params)
2701{
2702 if (params->conn) {
2703 hci_conn_drop(params->conn);
2704 hci_conn_put(params->conn);
2705 }
2706
2707 list_del(&params->action);
2708 list_del(&params->list);
2709 kfree(params);
2710}
2711
Andre Guedes15819a72014-02-03 13:56:18 -03002712/* This function requires the caller holds hdev->lock */
2713void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2714{
2715 struct hci_conn_params *params;
2716
2717 params = hci_conn_params_lookup(hdev, addr, addr_type);
2718 if (!params)
2719 return;
2720
Johan Hedbergf6c63242014-08-15 21:06:59 +03002721 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002722
Johan Hedberg95305ba2014-07-04 12:37:21 +03002723 hci_update_background_scan(hdev);
2724
Andre Guedes15819a72014-02-03 13:56:18 -03002725 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2726}
2727
2728/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002729void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002730{
2731 struct hci_conn_params *params, *tmp;
2732
2733 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002734 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2735 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002736 list_del(&params->list);
2737 kfree(params);
2738 }
2739
Johan Hedberg55af49a82014-07-02 17:37:26 +03002740 BT_DBG("All LE disabled connection parameters were removed");
2741}
2742
2743/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002744void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002745{
2746 struct hci_conn_params *params, *tmp;
2747
Johan Hedbergf6c63242014-08-15 21:06:59 +03002748 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2749 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002750
Johan Hedberga2f41a82014-07-04 12:37:19 +03002751 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002752
Andre Guedes15819a72014-02-03 13:56:18 -03002753 BT_DBG("All LE connection parameters were removed");
2754}
2755
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002756static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002757{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002758 if (status) {
2759 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002760
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002761 hci_dev_lock(hdev);
2762 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2763 hci_dev_unlock(hdev);
2764 return;
2765 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002766}
2767
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002768static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002769{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002770 /* General inquiry access code (GIAC) */
2771 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2772 struct hci_request req;
2773 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002774 int err;
2775
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002776 if (status) {
2777 BT_ERR("Failed to disable LE scanning: status %d", status);
2778 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002779 }
2780
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002781 switch (hdev->discovery.type) {
2782 case DISCOV_TYPE_LE:
2783 hci_dev_lock(hdev);
2784 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2785 hci_dev_unlock(hdev);
2786 break;
2787
2788 case DISCOV_TYPE_INTERLEAVED:
2789 hci_req_init(&req, hdev);
2790
2791 memset(&cp, 0, sizeof(cp));
2792 memcpy(&cp.lap, lap, sizeof(cp.lap));
2793 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2794 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2795
2796 hci_dev_lock(hdev);
2797
2798 hci_inquiry_cache_flush(hdev);
2799
2800 err = hci_req_run(&req, inquiry_complete);
2801 if (err) {
2802 BT_ERR("Inquiry request failed: err %d", err);
2803 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2804 }
2805
2806 hci_dev_unlock(hdev);
2807 break;
2808 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002809}
2810
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002811static void le_scan_disable_work(struct work_struct *work)
2812{
2813 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002814 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002815 struct hci_request req;
2816 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002817
2818 BT_DBG("%s", hdev->name);
2819
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002820 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002821
Andre Guedesb1efcc22014-02-26 20:21:40 -03002822 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002823
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002824 err = hci_req_run(&req, le_scan_disable_work_complete);
2825 if (err)
2826 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002827}
2828
Johan Hedberga1f4c312014-02-27 14:05:41 +02002829/* Copy the Identity Address of the controller.
2830 *
2831 * If the controller has a public BD_ADDR, then by default use that one.
2832 * If this is a LE only controller without a public address, default to
2833 * the static random address.
2834 *
2835 * For debugging purposes it is possible to force controllers with a
2836 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002837 *
2838 * In case BR/EDR has been disabled on a dual-mode controller and
2839 * userspace has configured a static address, then that address
2840 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002841 */
2842void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2843 u8 *bdaddr_type)
2844{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002845 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002846 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2847 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2848 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002849 bacpy(bdaddr, &hdev->static_addr);
2850 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2851 } else {
2852 bacpy(bdaddr, &hdev->bdaddr);
2853 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2854 }
2855}
2856
David Herrmann9be0dab2012-04-22 14:39:57 +02002857/* Alloc HCI device */
2858struct hci_dev *hci_alloc_dev(void)
2859{
2860 struct hci_dev *hdev;
2861
Johan Hedberg27f70f32014-07-21 10:50:06 +03002862 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002863 if (!hdev)
2864 return NULL;
2865
David Herrmannb1b813d2012-04-22 14:39:58 +02002866 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2867 hdev->esco_type = (ESCO_HV1);
2868 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002869 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2870 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002871 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002872 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2873 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002874
David Herrmannb1b813d2012-04-22 14:39:58 +02002875 hdev->sniff_max_interval = 800;
2876 hdev->sniff_min_interval = 80;
2877
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002878 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002879 hdev->le_adv_min_interval = 0x0800;
2880 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002881 hdev->le_scan_interval = 0x0060;
2882 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002883 hdev->le_conn_min_interval = 0x0028;
2884 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002885 hdev->le_conn_latency = 0x0000;
2886 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002887 hdev->le_def_tx_len = 0x001b;
2888 hdev->le_def_tx_time = 0x0148;
2889 hdev->le_max_tx_len = 0x001b;
2890 hdev->le_max_tx_time = 0x0148;
2891 hdev->le_max_rx_len = 0x001b;
2892 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002893
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002894 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002895 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002896 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2897 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002898
David Herrmannb1b813d2012-04-22 14:39:58 +02002899 mutex_init(&hdev->lock);
2900 mutex_init(&hdev->req_lock);
2901
2902 INIT_LIST_HEAD(&hdev->mgmt_pending);
2903 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002904 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002905 INIT_LIST_HEAD(&hdev->uuids);
2906 INIT_LIST_HEAD(&hdev->link_keys);
2907 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002908 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002909 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002910 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002911 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002912 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002913 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002914 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002915
2916 INIT_WORK(&hdev->rx_work, hci_rx_work);
2917 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2918 INIT_WORK(&hdev->tx_work, hci_tx_work);
2919 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002920
David Herrmannb1b813d2012-04-22 14:39:58 +02002921 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2922 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2923 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2924
David Herrmannb1b813d2012-04-22 14:39:58 +02002925 skb_queue_head_init(&hdev->rx_q);
2926 skb_queue_head_init(&hdev->cmd_q);
2927 skb_queue_head_init(&hdev->raw_q);
2928
2929 init_waitqueue_head(&hdev->req_wait_q);
2930
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002931 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02002932
David Herrmannb1b813d2012-04-22 14:39:58 +02002933 hci_init_sysfs(hdev);
2934 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002935
2936 return hdev;
2937}
2938EXPORT_SYMBOL(hci_alloc_dev);
2939
2940/* Free HCI device */
2941void hci_free_dev(struct hci_dev *hdev)
2942{
David Herrmann9be0dab2012-04-22 14:39:57 +02002943 /* will free via device release */
2944 put_device(&hdev->dev);
2945}
2946EXPORT_SYMBOL(hci_free_dev);
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948/* Register HCI device */
2949int hci_register_dev(struct hci_dev *hdev)
2950{
David Herrmannb1b813d2012-04-22 14:39:58 +02002951 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952
Marcel Holtmann74292d52014-07-06 15:50:27 +02002953 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 return -EINVAL;
2955
Mat Martineau08add512011-11-02 16:18:36 -07002956 /* Do not allow HCI_AMP devices to register at index 0,
2957 * so the index can be used as the AMP controller ID.
2958 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002959 switch (hdev->dev_type) {
2960 case HCI_BREDR:
2961 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2962 break;
2963 case HCI_AMP:
2964 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2965 break;
2966 default:
2967 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002969
Sasha Levin3df92b32012-05-27 22:36:56 +02002970 if (id < 0)
2971 return id;
2972
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 sprintf(hdev->name, "hci%d", id);
2974 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002975
2976 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2977
Kees Cookd8537542013-07-03 15:04:57 -07002978 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2979 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002980 if (!hdev->workqueue) {
2981 error = -ENOMEM;
2982 goto err;
2983 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002984
Kees Cookd8537542013-07-03 15:04:57 -07002985 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2986 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002987 if (!hdev->req_workqueue) {
2988 destroy_workqueue(hdev->workqueue);
2989 error = -ENOMEM;
2990 goto err;
2991 }
2992
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002993 if (!IS_ERR_OR_NULL(bt_debugfs))
2994 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2995
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002996 dev_set_name(&hdev->dev, "%s", hdev->name);
2997
2998 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002999 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003000 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003002 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003003 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3004 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003005 if (hdev->rfkill) {
3006 if (rfkill_register(hdev->rfkill) < 0) {
3007 rfkill_destroy(hdev->rfkill);
3008 hdev->rfkill = NULL;
3009 }
3010 }
3011
Johan Hedberg5e130362013-09-13 08:58:17 +03003012 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3013 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3014
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003015 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003016 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003017
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003018 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003019 /* Assume BR/EDR support until proven otherwise (such as
3020 * through reading supported features during init.
3021 */
3022 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3023 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003024
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003025 write_lock(&hci_dev_list_lock);
3026 list_add(&hdev->list, &hci_dev_list);
3027 write_unlock(&hci_dev_list_lock);
3028
Marcel Holtmann4a964402014-07-02 19:10:33 +02003029 /* Devices that are marked for raw-only usage are unconfigured
3030 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003031 */
3032 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003033 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003034
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003036 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Johan Hedberg19202572013-01-14 22:33:51 +02003038 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003039
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003041
David Herrmann33ca9542011-10-08 14:58:49 +02003042err_wqueue:
3043 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003044 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003045err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003046 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003047
David Herrmann33ca9542011-10-08 14:58:49 +02003048 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049}
3050EXPORT_SYMBOL(hci_register_dev);
3051
3052/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003053void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054{
Sasha Levin3df92b32012-05-27 22:36:56 +02003055 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003056
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003057 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
Johan Hovold94324962012-03-15 14:48:41 +01003059 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3060
Sasha Levin3df92b32012-05-27 22:36:56 +02003061 id = hdev->id;
3062
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003063 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003065 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066
3067 hci_dev_do_close(hdev);
3068
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303069 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003070 kfree_skb(hdev->reassembly[i]);
3071
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003072 cancel_work_sync(&hdev->power_on);
3073
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003074 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003075 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3076 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003077 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003078 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003079 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003080 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003081
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003082 /* mgmt_index_removed should take care of emptying the
3083 * pending list */
3084 BUG_ON(!list_empty(&hdev->mgmt_pending));
3085
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 hci_notify(hdev, HCI_DEV_UNREG);
3087
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003088 if (hdev->rfkill) {
3089 rfkill_unregister(hdev->rfkill);
3090 rfkill_destroy(hdev->rfkill);
3091 }
3092
Johan Hedberg711eafe2014-08-08 09:32:52 +03003093 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003094
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003095 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003096
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003097 debugfs_remove_recursive(hdev->debugfs);
3098
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003099 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003100 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003101
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003102 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003103 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003104 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003105 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003106 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003107 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003108 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003109 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003110 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003111 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003112 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003113 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003114
David Herrmanndc946bd2012-01-07 15:47:24 +01003115 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003116
3117 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118}
3119EXPORT_SYMBOL(hci_unregister_dev);
3120
3121/* Suspend HCI device */
3122int hci_suspend_dev(struct hci_dev *hdev)
3123{
3124 hci_notify(hdev, HCI_DEV_SUSPEND);
3125 return 0;
3126}
3127EXPORT_SYMBOL(hci_suspend_dev);
3128
3129/* Resume HCI device */
3130int hci_resume_dev(struct hci_dev *hdev)
3131{
3132 hci_notify(hdev, HCI_DEV_RESUME);
3133 return 0;
3134}
3135EXPORT_SYMBOL(hci_resume_dev);
3136
Marcel Holtmann75e05692014-11-02 08:15:38 +01003137/* Reset HCI device */
3138int hci_reset_dev(struct hci_dev *hdev)
3139{
3140 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3141 struct sk_buff *skb;
3142
3143 skb = bt_skb_alloc(3, GFP_ATOMIC);
3144 if (!skb)
3145 return -ENOMEM;
3146
3147 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3148 memcpy(skb_put(skb, 3), hw_err, 3);
3149
3150 /* Send Hardware Error to upper stack */
3151 return hci_recv_frame(hdev, skb);
3152}
3153EXPORT_SYMBOL(hci_reset_dev);
3154
Marcel Holtmann76bca882009-11-18 00:40:39 +01003155/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003156int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003157{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003158 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003159 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003160 kfree_skb(skb);
3161 return -ENXIO;
3162 }
3163
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003164 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003165 bt_cb(skb)->incoming = 1;
3166
3167 /* Time stamp */
3168 __net_timestamp(skb);
3169
Marcel Holtmann76bca882009-11-18 00:40:39 +01003170 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003171 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003172
Marcel Holtmann76bca882009-11-18 00:40:39 +01003173 return 0;
3174}
3175EXPORT_SYMBOL(hci_recv_frame);
3176
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303177static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003178 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303179{
3180 int len = 0;
3181 int hlen = 0;
3182 int remain = count;
3183 struct sk_buff *skb;
3184 struct bt_skb_cb *scb;
3185
3186 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003187 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303188 return -EILSEQ;
3189
3190 skb = hdev->reassembly[index];
3191
3192 if (!skb) {
3193 switch (type) {
3194 case HCI_ACLDATA_PKT:
3195 len = HCI_MAX_FRAME_SIZE;
3196 hlen = HCI_ACL_HDR_SIZE;
3197 break;
3198 case HCI_EVENT_PKT:
3199 len = HCI_MAX_EVENT_SIZE;
3200 hlen = HCI_EVENT_HDR_SIZE;
3201 break;
3202 case HCI_SCODATA_PKT:
3203 len = HCI_MAX_SCO_SIZE;
3204 hlen = HCI_SCO_HDR_SIZE;
3205 break;
3206 }
3207
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003208 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303209 if (!skb)
3210 return -ENOMEM;
3211
3212 scb = (void *) skb->cb;
3213 scb->expect = hlen;
3214 scb->pkt_type = type;
3215
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303216 hdev->reassembly[index] = skb;
3217 }
3218
3219 while (count) {
3220 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003221 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303222
3223 memcpy(skb_put(skb, len), data, len);
3224
3225 count -= len;
3226 data += len;
3227 scb->expect -= len;
3228 remain = count;
3229
3230 switch (type) {
3231 case HCI_EVENT_PKT:
3232 if (skb->len == HCI_EVENT_HDR_SIZE) {
3233 struct hci_event_hdr *h = hci_event_hdr(skb);
3234 scb->expect = h->plen;
3235
3236 if (skb_tailroom(skb) < scb->expect) {
3237 kfree_skb(skb);
3238 hdev->reassembly[index] = NULL;
3239 return -ENOMEM;
3240 }
3241 }
3242 break;
3243
3244 case HCI_ACLDATA_PKT:
3245 if (skb->len == HCI_ACL_HDR_SIZE) {
3246 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3247 scb->expect = __le16_to_cpu(h->dlen);
3248
3249 if (skb_tailroom(skb) < scb->expect) {
3250 kfree_skb(skb);
3251 hdev->reassembly[index] = NULL;
3252 return -ENOMEM;
3253 }
3254 }
3255 break;
3256
3257 case HCI_SCODATA_PKT:
3258 if (skb->len == HCI_SCO_HDR_SIZE) {
3259 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3260 scb->expect = h->dlen;
3261
3262 if (skb_tailroom(skb) < scb->expect) {
3263 kfree_skb(skb);
3264 hdev->reassembly[index] = NULL;
3265 return -ENOMEM;
3266 }
3267 }
3268 break;
3269 }
3270
3271 if (scb->expect == 0) {
3272 /* Complete frame */
3273
3274 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003275 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303276
3277 hdev->reassembly[index] = NULL;
3278 return remain;
3279 }
3280 }
3281
3282 return remain;
3283}
3284
Suraj Sumangala99811512010-07-14 13:02:19 +05303285#define STREAM_REASSEMBLY 0
3286
3287int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3288{
3289 int type;
3290 int rem = 0;
3291
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003292 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303293 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3294
3295 if (!skb) {
3296 struct { char type; } *pkt;
3297
3298 /* Start of the frame */
3299 pkt = data;
3300 type = pkt->type;
3301
3302 data++;
3303 count--;
3304 } else
3305 type = bt_cb(skb)->pkt_type;
3306
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003307 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003308 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303309 if (rem < 0)
3310 return rem;
3311
3312 data += (count - rem);
3313 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003314 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303315
3316 return rem;
3317}
3318EXPORT_SYMBOL(hci_recv_stream_fragment);
3319
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320/* ---- Interface to upper protocols ---- */
3321
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322int hci_register_cb(struct hci_cb *cb)
3323{
3324 BT_DBG("%p name %s", cb, cb->name);
3325
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003326 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003328 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329
3330 return 0;
3331}
3332EXPORT_SYMBOL(hci_register_cb);
3333
3334int hci_unregister_cb(struct hci_cb *cb)
3335{
3336 BT_DBG("%p name %s", cb, cb->name);
3337
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003338 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003340 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 return 0;
3343}
3344EXPORT_SYMBOL(hci_unregister_cb);
3345
Marcel Holtmann51086992013-10-10 14:54:19 -07003346static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003348 int err;
3349
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003350 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003352 /* Time stamp */
3353 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003355 /* Send copy to monitor */
3356 hci_send_to_monitor(hdev, skb);
3357
3358 if (atomic_read(&hdev->promisc)) {
3359 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003360 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 }
3362
3363 /* Get rid of skb owner, prior to sending to the driver. */
3364 skb_orphan(skb);
3365
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003366 err = hdev->send(hdev, skb);
3367 if (err < 0) {
3368 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3369 kfree_skb(skb);
3370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371}
3372
Marcel Holtmann899de762014-07-11 05:51:58 +02003373bool hci_req_pending(struct hci_dev *hdev)
3374{
3375 return (hdev->req_status == HCI_REQ_PEND);
3376}
3377
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003378/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003379int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3380 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003381{
3382 struct sk_buff *skb;
3383
3384 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3385
3386 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3387 if (!skb) {
3388 BT_ERR("%s no memory for command", hdev->name);
3389 return -ENOMEM;
3390 }
3391
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003392 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003393 * single-command requests.
3394 */
3395 bt_cb(skb)->req.start = true;
3396
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003398 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399
3400 return 0;
3401}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402
3403/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003404void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405{
3406 struct hci_command_hdr *hdr;
3407
3408 if (!hdev->sent_cmd)
3409 return NULL;
3410
3411 hdr = (void *) hdev->sent_cmd->data;
3412
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003413 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 return NULL;
3415
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003416 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
3418 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3419}
3420
3421/* Send ACL data */
3422static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3423{
3424 struct hci_acl_hdr *hdr;
3425 int len = skb->len;
3426
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003427 skb_push(skb, HCI_ACL_HDR_SIZE);
3428 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003429 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003430 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3431 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432}
3433
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003434static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003435 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003437 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 struct hci_dev *hdev = conn->hdev;
3439 struct sk_buff *list;
3440
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003441 skb->len = skb_headlen(skb);
3442 skb->data_len = 0;
3443
3444 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003445
3446 switch (hdev->dev_type) {
3447 case HCI_BREDR:
3448 hci_add_acl_hdr(skb, conn->handle, flags);
3449 break;
3450 case HCI_AMP:
3451 hci_add_acl_hdr(skb, chan->handle, flags);
3452 break;
3453 default:
3454 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3455 return;
3456 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003458 list = skb_shinfo(skb)->frag_list;
3459 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 /* Non fragmented */
3461 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3462
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003463 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 } else {
3465 /* Fragmented */
3466 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3467
3468 skb_shinfo(skb)->frag_list = NULL;
3469
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003470 /* Queue all fragments atomically. We need to use spin_lock_bh
3471 * here because of 6LoWPAN links, as there this function is
3472 * called from softirq and using normal spin lock could cause
3473 * deadlocks.
3474 */
3475 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003477 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003478
3479 flags &= ~ACL_START;
3480 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 do {
3482 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003483
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003484 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003485 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486
3487 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3488
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003489 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 } while (list);
3491
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003492 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003494}
3495
3496void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3497{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003498 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003499
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003500 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003501
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003502 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003504 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
3507/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003508void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509{
3510 struct hci_dev *hdev = conn->hdev;
3511 struct hci_sco_hdr hdr;
3512
3513 BT_DBG("%s len %d", hdev->name, skb->len);
3514
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003515 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 hdr.dlen = skb->len;
3517
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003518 skb_push(skb, HCI_SCO_HDR_SIZE);
3519 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003520 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003522 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003523
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003525 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
3528/* ---- HCI TX task (outgoing data) ---- */
3529
3530/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003531static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3532 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533{
3534 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003535 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003536 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003538 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003540
3541 rcu_read_lock();
3542
3543 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003544 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003546
3547 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3548 continue;
3549
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 num++;
3551
3552 if (c->sent < min) {
3553 min = c->sent;
3554 conn = c;
3555 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003556
3557 if (hci_conn_num(hdev, type) == num)
3558 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 }
3560
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003561 rcu_read_unlock();
3562
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003564 int cnt, q;
3565
3566 switch (conn->type) {
3567 case ACL_LINK:
3568 cnt = hdev->acl_cnt;
3569 break;
3570 case SCO_LINK:
3571 case ESCO_LINK:
3572 cnt = hdev->sco_cnt;
3573 break;
3574 case LE_LINK:
3575 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3576 break;
3577 default:
3578 cnt = 0;
3579 BT_ERR("Unknown link type");
3580 }
3581
3582 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 *quote = q ? q : 1;
3584 } else
3585 *quote = 0;
3586
3587 BT_DBG("conn %p quote %d", conn, *quote);
3588 return conn;
3589}
3590
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003591static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592{
3593 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003594 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595
Ville Tervobae1f5d92011-02-10 22:38:53 -03003596 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003598 rcu_read_lock();
3599
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003601 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003602 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003603 BT_ERR("%s killing stalled connection %pMR",
3604 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003605 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 }
3607 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003608
3609 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610}
3611
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003612static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3613 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003614{
3615 struct hci_conn_hash *h = &hdev->conn_hash;
3616 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003617 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003618 struct hci_conn *conn;
3619 int cnt, q, conn_num = 0;
3620
3621 BT_DBG("%s", hdev->name);
3622
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003623 rcu_read_lock();
3624
3625 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003626 struct hci_chan *tmp;
3627
3628 if (conn->type != type)
3629 continue;
3630
3631 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3632 continue;
3633
3634 conn_num++;
3635
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003636 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003637 struct sk_buff *skb;
3638
3639 if (skb_queue_empty(&tmp->data_q))
3640 continue;
3641
3642 skb = skb_peek(&tmp->data_q);
3643 if (skb->priority < cur_prio)
3644 continue;
3645
3646 if (skb->priority > cur_prio) {
3647 num = 0;
3648 min = ~0;
3649 cur_prio = skb->priority;
3650 }
3651
3652 num++;
3653
3654 if (conn->sent < min) {
3655 min = conn->sent;
3656 chan = tmp;
3657 }
3658 }
3659
3660 if (hci_conn_num(hdev, type) == conn_num)
3661 break;
3662 }
3663
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003664 rcu_read_unlock();
3665
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003666 if (!chan)
3667 return NULL;
3668
3669 switch (chan->conn->type) {
3670 case ACL_LINK:
3671 cnt = hdev->acl_cnt;
3672 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003673 case AMP_LINK:
3674 cnt = hdev->block_cnt;
3675 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003676 case SCO_LINK:
3677 case ESCO_LINK:
3678 cnt = hdev->sco_cnt;
3679 break;
3680 case LE_LINK:
3681 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3682 break;
3683 default:
3684 cnt = 0;
3685 BT_ERR("Unknown link type");
3686 }
3687
3688 q = cnt / num;
3689 *quote = q ? q : 1;
3690 BT_DBG("chan %p quote %d", chan, *quote);
3691 return chan;
3692}
3693
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003694static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3695{
3696 struct hci_conn_hash *h = &hdev->conn_hash;
3697 struct hci_conn *conn;
3698 int num = 0;
3699
3700 BT_DBG("%s", hdev->name);
3701
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003702 rcu_read_lock();
3703
3704 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003705 struct hci_chan *chan;
3706
3707 if (conn->type != type)
3708 continue;
3709
3710 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3711 continue;
3712
3713 num++;
3714
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003715 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003716 struct sk_buff *skb;
3717
3718 if (chan->sent) {
3719 chan->sent = 0;
3720 continue;
3721 }
3722
3723 if (skb_queue_empty(&chan->data_q))
3724 continue;
3725
3726 skb = skb_peek(&chan->data_q);
3727 if (skb->priority >= HCI_PRIO_MAX - 1)
3728 continue;
3729
3730 skb->priority = HCI_PRIO_MAX - 1;
3731
3732 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003733 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003734 }
3735
3736 if (hci_conn_num(hdev, type) == num)
3737 break;
3738 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003739
3740 rcu_read_unlock();
3741
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003742}
3743
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003744static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3745{
3746 /* Calculate count of blocks used by this packet */
3747 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3748}
3749
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003750static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003752 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 /* ACL tx timeout must be longer than maximum
3754 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003755 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003756 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003757 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003759}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003761static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003762{
3763 unsigned int cnt = hdev->acl_cnt;
3764 struct hci_chan *chan;
3765 struct sk_buff *skb;
3766 int quote;
3767
3768 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003769
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003770 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003771 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003772 u32 priority = (skb_peek(&chan->data_q))->priority;
3773 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003774 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003775 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003777 /* Stop if priority has changed */
3778 if (skb->priority < priority)
3779 break;
3780
3781 skb = skb_dequeue(&chan->data_q);
3782
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003783 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003784 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003785
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003786 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 hdev->acl_last_tx = jiffies;
3788
3789 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003790 chan->sent++;
3791 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 }
3793 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003794
3795 if (cnt != hdev->acl_cnt)
3796 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797}
3798
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003799static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003800{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003801 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003802 struct hci_chan *chan;
3803 struct sk_buff *skb;
3804 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003805 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003806
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003807 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003808
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003809 BT_DBG("%s", hdev->name);
3810
3811 if (hdev->dev_type == HCI_AMP)
3812 type = AMP_LINK;
3813 else
3814 type = ACL_LINK;
3815
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003816 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003817 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003818 u32 priority = (skb_peek(&chan->data_q))->priority;
3819 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3820 int blocks;
3821
3822 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003823 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003824
3825 /* Stop if priority has changed */
3826 if (skb->priority < priority)
3827 break;
3828
3829 skb = skb_dequeue(&chan->data_q);
3830
3831 blocks = __get_blocks(hdev, skb);
3832 if (blocks > hdev->block_cnt)
3833 return;
3834
3835 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003836 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003837
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003838 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003839 hdev->acl_last_tx = jiffies;
3840
3841 hdev->block_cnt -= blocks;
3842 quote -= blocks;
3843
3844 chan->sent += blocks;
3845 chan->conn->sent += blocks;
3846 }
3847 }
3848
3849 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003850 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003851}
3852
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003853static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003854{
3855 BT_DBG("%s", hdev->name);
3856
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003857 /* No ACL link over BR/EDR controller */
3858 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3859 return;
3860
3861 /* No AMP link over AMP controller */
3862 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003863 return;
3864
3865 switch (hdev->flow_ctl_mode) {
3866 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3867 hci_sched_acl_pkt(hdev);
3868 break;
3869
3870 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3871 hci_sched_acl_blk(hdev);
3872 break;
3873 }
3874}
3875
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003877static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878{
3879 struct hci_conn *conn;
3880 struct sk_buff *skb;
3881 int quote;
3882
3883 BT_DBG("%s", hdev->name);
3884
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003885 if (!hci_conn_num(hdev, SCO_LINK))
3886 return;
3887
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3889 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3890 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003891 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
3893 conn->sent++;
3894 if (conn->sent == ~0)
3895 conn->sent = 0;
3896 }
3897 }
3898}
3899
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003900static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003901{
3902 struct hci_conn *conn;
3903 struct sk_buff *skb;
3904 int quote;
3905
3906 BT_DBG("%s", hdev->name);
3907
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003908 if (!hci_conn_num(hdev, ESCO_LINK))
3909 return;
3910
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003911 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3912 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003913 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3914 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003915 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003916
3917 conn->sent++;
3918 if (conn->sent == ~0)
3919 conn->sent = 0;
3920 }
3921 }
3922}
3923
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003924static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003925{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003926 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003927 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003928 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003929
3930 BT_DBG("%s", hdev->name);
3931
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003932 if (!hci_conn_num(hdev, LE_LINK))
3933 return;
3934
Marcel Holtmann4a964402014-07-02 19:10:33 +02003935 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003936 /* LE tx timeout must be longer than maximum
3937 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003938 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003939 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003940 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003941 }
3942
3943 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003944 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003945 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003946 u32 priority = (skb_peek(&chan->data_q))->priority;
3947 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003948 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003949 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003950
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003951 /* Stop if priority has changed */
3952 if (skb->priority < priority)
3953 break;
3954
3955 skb = skb_dequeue(&chan->data_q);
3956
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003957 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003958 hdev->le_last_tx = jiffies;
3959
3960 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003961 chan->sent++;
3962 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003963 }
3964 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003965
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003966 if (hdev->le_pkts)
3967 hdev->le_cnt = cnt;
3968 else
3969 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003970
3971 if (cnt != tmp)
3972 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003973}
3974
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003975static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003977 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 struct sk_buff *skb;
3979
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003980 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003981 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982
Marcel Holtmann52de5992013-09-03 18:08:38 -07003983 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3984 /* Schedule queues and send stuff to HCI driver */
3985 hci_sched_acl(hdev);
3986 hci_sched_sco(hdev);
3987 hci_sched_esco(hdev);
3988 hci_sched_le(hdev);
3989 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003990
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991 /* Send next queued raw (unknown type) packet */
3992 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003993 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994}
3995
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003996/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997
3998/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003999static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000{
4001 struct hci_acl_hdr *hdr = (void *) skb->data;
4002 struct hci_conn *conn;
4003 __u16 handle, flags;
4004
4005 skb_pull(skb, HCI_ACL_HDR_SIZE);
4006
4007 handle = __le16_to_cpu(hdr->handle);
4008 flags = hci_flags(handle);
4009 handle = hci_handle(handle);
4010
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004011 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004012 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013
4014 hdev->stat.acl_rx++;
4015
4016 hci_dev_lock(hdev);
4017 conn = hci_conn_hash_lookup_handle(hdev, handle);
4018 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004019
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004021 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004022
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004024 l2cap_recv_acldata(conn, skb, flags);
4025 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004027 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004028 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 }
4030
4031 kfree_skb(skb);
4032}
4033
4034/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004035static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036{
4037 struct hci_sco_hdr *hdr = (void *) skb->data;
4038 struct hci_conn *conn;
4039 __u16 handle;
4040
4041 skb_pull(skb, HCI_SCO_HDR_SIZE);
4042
4043 handle = __le16_to_cpu(hdr->handle);
4044
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004045 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046
4047 hdev->stat.sco_rx++;
4048
4049 hci_dev_lock(hdev);
4050 conn = hci_conn_hash_lookup_handle(hdev, handle);
4051 hci_dev_unlock(hdev);
4052
4053 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004055 sco_recv_scodata(conn, skb);
4056 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004058 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004059 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 }
4061
4062 kfree_skb(skb);
4063}
4064
Johan Hedberg9238f362013-03-05 20:37:48 +02004065static bool hci_req_is_complete(struct hci_dev *hdev)
4066{
4067 struct sk_buff *skb;
4068
4069 skb = skb_peek(&hdev->cmd_q);
4070 if (!skb)
4071 return true;
4072
4073 return bt_cb(skb)->req.start;
4074}
4075
Johan Hedberg42c6b122013-03-05 20:37:49 +02004076static void hci_resend_last(struct hci_dev *hdev)
4077{
4078 struct hci_command_hdr *sent;
4079 struct sk_buff *skb;
4080 u16 opcode;
4081
4082 if (!hdev->sent_cmd)
4083 return;
4084
4085 sent = (void *) hdev->sent_cmd->data;
4086 opcode = __le16_to_cpu(sent->opcode);
4087 if (opcode == HCI_OP_RESET)
4088 return;
4089
4090 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4091 if (!skb)
4092 return;
4093
4094 skb_queue_head(&hdev->cmd_q, skb);
4095 queue_work(hdev->workqueue, &hdev->cmd_work);
4096}
4097
Johan Hedberg9238f362013-03-05 20:37:48 +02004098void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4099{
4100 hci_req_complete_t req_complete = NULL;
4101 struct sk_buff *skb;
4102 unsigned long flags;
4103
4104 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4105
Johan Hedberg42c6b122013-03-05 20:37:49 +02004106 /* If the completed command doesn't match the last one that was
4107 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004108 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004109 if (!hci_sent_cmd_data(hdev, opcode)) {
4110 /* Some CSR based controllers generate a spontaneous
4111 * reset complete event during init and any pending
4112 * command will never be completed. In such a case we
4113 * need to resend whatever was the last sent
4114 * command.
4115 */
4116 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4117 hci_resend_last(hdev);
4118
Johan Hedberg9238f362013-03-05 20:37:48 +02004119 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004120 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004121
4122 /* If the command succeeded and there's still more commands in
4123 * this request the request is not yet complete.
4124 */
4125 if (!status && !hci_req_is_complete(hdev))
4126 return;
4127
4128 /* If this was the last command in a request the complete
4129 * callback would be found in hdev->sent_cmd instead of the
4130 * command queue (hdev->cmd_q).
4131 */
4132 if (hdev->sent_cmd) {
4133 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004134
4135 if (req_complete) {
4136 /* We must set the complete callback to NULL to
4137 * avoid calling the callback more than once if
4138 * this function gets called again.
4139 */
4140 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4141
Johan Hedberg9238f362013-03-05 20:37:48 +02004142 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004143 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004144 }
4145
4146 /* Remove all pending commands belonging to this request */
4147 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4148 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4149 if (bt_cb(skb)->req.start) {
4150 __skb_queue_head(&hdev->cmd_q, skb);
4151 break;
4152 }
4153
4154 req_complete = bt_cb(skb)->req.complete;
4155 kfree_skb(skb);
4156 }
4157 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4158
4159call_complete:
4160 if (req_complete)
4161 req_complete(hdev, status);
4162}
4163
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004164static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004166 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 struct sk_buff *skb;
4168
4169 BT_DBG("%s", hdev->name);
4170
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004172 /* Send copy to monitor */
4173 hci_send_to_monitor(hdev, skb);
4174
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 if (atomic_read(&hdev->promisc)) {
4176 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004177 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 }
4179
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004180 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 kfree_skb(skb);
4182 continue;
4183 }
4184
4185 if (test_bit(HCI_INIT, &hdev->flags)) {
4186 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004187 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 case HCI_ACLDATA_PKT:
4189 case HCI_SCODATA_PKT:
4190 kfree_skb(skb);
4191 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 }
4194
4195 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004196 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004198 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 hci_event_packet(hdev, skb);
4200 break;
4201
4202 case HCI_ACLDATA_PKT:
4203 BT_DBG("%s ACL data packet", hdev->name);
4204 hci_acldata_packet(hdev, skb);
4205 break;
4206
4207 case HCI_SCODATA_PKT:
4208 BT_DBG("%s SCO data packet", hdev->name);
4209 hci_scodata_packet(hdev, skb);
4210 break;
4211
4212 default:
4213 kfree_skb(skb);
4214 break;
4215 }
4216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217}
4218
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004219static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004221 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 struct sk_buff *skb;
4223
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004224 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4225 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004228 if (atomic_read(&hdev->cmd_cnt)) {
4229 skb = skb_dequeue(&hdev->cmd_q);
4230 if (!skb)
4231 return;
4232
Wei Yongjun7585b972009-02-25 18:29:52 +08004233 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004235 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004236 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004238 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004239 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004240 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004241 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004242 schedule_delayed_work(&hdev->cmd_timer,
4243 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 } else {
4245 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004246 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 }
4248 }
4249}