blob: e75bc545b48e7efd9fa8c6e6763a7d5439a727bd [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700123 kfree_skb(skb);
124
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
165 hci_req_lock(hdev);
166 err = hdev->set_diag(hdev, enable);
167 hci_req_unlock(hdev);
168
169 if (err < 0)
170 return err;
171
172 if (enable)
173 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
174 else
175 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176
177 return count;
178}
179
180static const struct file_operations vendor_diag_fops = {
181 .open = simple_open,
182 .read = vendor_diag_read,
183 .write = vendor_diag_write,
184 .llseek = default_llseek,
185};
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/* ---- HCI requests ---- */
188
Johan Hedbergf60cb302015-04-02 13:41:09 +0300189static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
190 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 if (hdev->req_status == HCI_REQ_PEND) {
195 hdev->req_result = result;
196 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300197 if (skb)
198 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 wake_up_interruptible(&hdev->req_wait_q);
200 }
201}
202
203static void hci_req_cancel(struct hci_dev *hdev, int err)
204{
205 BT_DBG("%s err 0x%2.2x", hdev->name, err);
206
207 if (hdev->req_status == HCI_REQ_PEND) {
208 hdev->req_result = err;
209 hdev->req_status = HCI_REQ_CANCELED;
210 wake_up_interruptible(&hdev->req_wait_q);
211 }
212}
213
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300214struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300215 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300216{
217 DECLARE_WAITQUEUE(wait, current);
218 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300219 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300220 int err = 0;
221
222 BT_DBG("%s", hdev->name);
223
224 hci_req_init(&req, hdev);
225
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300226 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300227
228 hdev->req_status = HCI_REQ_PEND;
229
Johan Hedberg75e84b72013-04-02 13:35:04 +0300230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
Johan Hedbergf60cb302015-04-02 13:41:09 +0300233 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900234 if (err < 0) {
235 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200236 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900237 return ERR_PTR(err);
238 }
239
Johan Hedberg75e84b72013-04-02 13:35:04 +0300240 schedule_timeout(timeout);
241
242 remove_wait_queue(&hdev->req_wait_q, &wait);
243
244 if (signal_pending(current))
245 return ERR_PTR(-EINTR);
246
247 switch (hdev->req_status) {
248 case HCI_REQ_DONE:
249 err = -bt_to_errno(hdev->req_result);
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 break;
255
256 default:
257 err = -ETIMEDOUT;
258 break;
259 }
260
261 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300262 skb = hdev->req_skb;
263 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
Johan Hedbergf60cb302015-04-02 13:41:09 +0300267 if (err < 0) {
268 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300269 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300270 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300271
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300272 if (!skb)
273 return ERR_PTR(-ENODATA);
274
275 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300276}
277EXPORT_SYMBOL(__hci_cmd_sync_ev);
278
279struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300280 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300281{
282 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300283}
284EXPORT_SYMBOL(__hci_cmd_sync);
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200287static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288 void (*func)(struct hci_request *req,
289 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200290 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200292 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 DECLARE_WAITQUEUE(wait, current);
294 int err = 0;
295
296 BT_DBG("%s start", hdev->name);
297
Johan Hedberg42c6b122013-03-05 20:37:49 +0200298 hci_req_init(&req, hdev);
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 hdev->req_status = HCI_REQ_PEND;
301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200303
Chan-yeol Park039fada2014-10-31 14:23:06 +0900304 add_wait_queue(&hdev->req_wait_q, &wait);
305 set_current_state(TASK_INTERRUPTIBLE);
306
Johan Hedbergf60cb302015-04-02 13:41:09 +0300307 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200309 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300310
Chan-yeol Park039fada2014-10-31 14:23:06 +0900311 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200312 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900313
Andre Guedes920c8302013-03-08 11:20:15 -0300314 /* ENODATA means the HCI request command queue is empty.
315 * This can happen when a request with conditionals doesn't
316 * trigger any commands to be sent. This is normal behavior
317 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 */
Andre Guedes920c8302013-03-08 11:20:15 -0300319 if (err == -ENODATA)
320 return 0;
321
322 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200323 }
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 schedule_timeout(timeout);
326
327 remove_wait_queue(&hdev->req_wait_q, &wait);
328
329 if (signal_pending(current))
330 return -EINTR;
331
332 switch (hdev->req_status) {
333 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700334 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 break;
336
337 case HCI_REQ_CANCELED:
338 err = -hdev->req_result;
339 break;
340
341 default:
342 err = -ETIMEDOUT;
343 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Johan Hedberga5040ef2011-01-10 13:28:59 +0200346 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 BT_DBG("%s end: err %d", hdev->name, err);
349
350 return err;
351}
352
Johan Hedberg01178cd2013-03-05 20:37:41 +0200353static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 void (*req)(struct hci_request *req,
355 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200356 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
358 int ret;
359
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200360 if (!test_bit(HCI_UP, &hdev->flags))
361 return -ENETDOWN;
362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 /* Serialize all requests */
364 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 hci_req_unlock(hdev);
367
368 return ret;
369}
370
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200376 set_bit(HCI_RESET, &req->hdev->flags);
377 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200387 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Johan Hedberg0af801b2015-02-17 15:05:21 +0200394static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200395{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200397
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200398 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200399 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300400
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700401 /* Read Local Supported Commands */
402 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
403
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300404 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300406
407 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200408 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700409
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700410 /* Read Flow Control Mode */
411 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
412
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700413 /* Read Location Data */
414 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200415}
416
Johan Hedberg0af801b2015-02-17 15:05:21 +0200417static void amp_init2(struct hci_request *req)
418{
419 /* Read Local Supported Features. Not all AMP controllers
420 * support this so it's placed conditionally in the second
421 * stage init.
422 */
423 if (req->hdev->commands[14] & 0x20)
424 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
425}
426
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200428{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200429 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200430
431 BT_DBG("%s %ld", hdev->name, opt);
432
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300433 /* Reset */
434 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300436
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200437 switch (hdev->dev_type) {
438 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200440 break;
441
442 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200443 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200444 break;
445
446 default:
447 BT_ERR("Unknown device type %d", hdev->dev_type);
448 break;
449 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200450}
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454 __le16 param;
455 __u8 flt_type;
456
457 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200458 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200459
460 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200461 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200462
463 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200464 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200465
466 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200467 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700469 /* Read Number of Supported IAC */
470 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
471
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700472 /* Read Current IAC LAP */
473 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
474
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475 /* Clear Event Filters */
476 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200477 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200478
479 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700480 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482}
483
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300486 struct hci_dev *hdev = req->hdev;
487
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200489 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200490
491 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800494 /* Read LE Supported States */
495 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
496
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800500 /* Clear LE White List */
501 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300502
503 /* LE-only controllers have LE implicitly enabled */
504 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700505 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506}
507
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200509{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 struct hci_dev *hdev = req->hdev;
511
Johan Hedberg2177bab2013-03-05 20:37:43 +0200512 /* The second byte is 0xff instead of 0x9f (two reserved bits
513 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
514 * command otherwise.
515 */
516 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
517
518 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
519 * any event mask for pre 1.2 devices.
520 */
521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 return;
523
524 if (lmp_bredr_capable(hdev)) {
525 events[4] |= 0x01; /* Flow Specification Complete */
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700530 } else {
531 /* Use a different default for LE-only devices */
532 memset(events, 0, sizeof(events));
533 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700534 events[1] |= 0x08; /* Read Remote Version Information Complete */
535 events[1] |= 0x20; /* Command Complete */
536 events[1] |= 0x40; /* Command Status */
537 events[1] |= 0x80; /* Hardware Error */
538 events[2] |= 0x04; /* Number of Completed Packets */
539 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200540
541 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
542 events[0] |= 0x80; /* Encryption Change */
543 events[5] |= 0x80; /* Encryption Key Refresh Complete */
544 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545 }
546
547 if (lmp_inq_rssi_capable(hdev))
548 events[4] |= 0x02; /* Inquiry Result with RSSI */
549
550 if (lmp_sniffsubr_capable(hdev))
551 events[5] |= 0x20; /* Sniff Subrating */
552
553 if (lmp_pause_enc_capable(hdev))
554 events[5] |= 0x80; /* Encryption Key Refresh Complete */
555
556 if (lmp_ext_inq_capable(hdev))
557 events[5] |= 0x40; /* Extended Inquiry Result */
558
559 if (lmp_no_flush_capable(hdev))
560 events[7] |= 0x01; /* Enhanced Flush Complete */
561
562 if (lmp_lsto_capable(hdev))
563 events[6] |= 0x80; /* Link Supervision Timeout Changed */
564
565 if (lmp_ssp_capable(hdev)) {
566 events[6] |= 0x01; /* IO Capability Request */
567 events[6] |= 0x02; /* IO Capability Response */
568 events[6] |= 0x04; /* User Confirmation Request */
569 events[6] |= 0x08; /* User Passkey Request */
570 events[6] |= 0x10; /* Remote OOB Data Request */
571 events[6] |= 0x20; /* Simple Pairing Complete */
572 events[7] |= 0x04; /* User Passkey Notification */
573 events[7] |= 0x08; /* Keypress Notification */
574 events[7] |= 0x10; /* Remote Host Supported
575 * Features Notification
576 */
577 }
578
579 if (lmp_le_capable(hdev))
580 events[7] |= 0x20; /* LE Meta-Event */
581
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200583}
584
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200586{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 struct hci_dev *hdev = req->hdev;
588
Johan Hedberg0af801b2015-02-17 15:05:21 +0200589 if (hdev->dev_type == HCI_AMP)
590 return amp_init2(req);
591
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300594 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700595 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596
597 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100600 /* All Bluetooth 1.2 and later controllers should support the
601 * HCI command for reading the local supported commands.
602 *
603 * Unfortunately some controllers indicate Bluetooth 1.2 support,
604 * but do not have support for this command. If that is the case,
605 * the driver can quirk the behavior and skip reading the local
606 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300607 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100608 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
609 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611
612 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700613 /* When SSP is available, then the host features page
614 * should also be available as well. However some
615 * controllers list the max_page as 0 as long as SSP
616 * has not been enabled. To achieve proper debugging
617 * output, force the minimum max_page to 1 at least.
618 */
619 hdev->max_page = 0x01;
620
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700621 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800623
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
625 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626 } else {
627 struct hci_cp_write_eir cp;
628
629 memset(hdev->eir, 0, sizeof(hdev->eir));
630 memset(&cp, 0, sizeof(cp));
631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 }
634 }
635
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800636 if (lmp_inq_rssi_capable(hdev) ||
637 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800638 u8 mode;
639
640 /* If Extended Inquiry Result events are supported, then
641 * they are clearly preferred over Inquiry Result with RSSI
642 * events.
643 */
644 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
645
646 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
647 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200648
649 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651
652 if (lmp_ext_feat_capable(hdev)) {
653 struct hci_cp_read_local_ext_features cp;
654
655 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
657 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658 }
659
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700660 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
663 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664 }
665}
666
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200668{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200670 struct hci_cp_write_def_link_policy cp;
671 u16 link_policy = 0;
672
673 if (lmp_rswitch_capable(hdev))
674 link_policy |= HCI_LP_RSWITCH;
675 if (lmp_hold_capable(hdev))
676 link_policy |= HCI_LP_HOLD;
677 if (lmp_sniff_capable(hdev))
678 link_policy |= HCI_LP_SNIFF;
679 if (lmp_park_capable(hdev))
680 link_policy |= HCI_LP_PARK;
681
682 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200684}
685
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200687{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200689 struct hci_cp_write_le_host_supported cp;
690
Johan Hedbergc73eee92013-04-19 18:35:21 +0300691 /* LE-only devices do not support explicit enablement */
692 if (!lmp_bredr_capable(hdev))
693 return;
694
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695 memset(&cp, 0, sizeof(cp));
696
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700697 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200699 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700 }
701
702 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
704 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705}
706
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300707static void hci_set_event_mask_page_2(struct hci_request *req)
708{
709 struct hci_dev *hdev = req->hdev;
710 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
711
712 /* If Connectionless Slave Broadcast master role is supported
713 * enable all necessary events for it.
714 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800715 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300716 events[1] |= 0x40; /* Triggered Clock Capture */
717 events[1] |= 0x80; /* Synchronization Train Complete */
718 events[2] |= 0x10; /* Slave Page Response Timeout */
719 events[2] |= 0x20; /* CSB Channel Map Change */
720 }
721
722 /* If Connectionless Slave Broadcast slave role is supported
723 * enable all necessary events for it.
724 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800725 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300726 events[2] |= 0x01; /* Synchronization Train Received */
727 events[2] |= 0x02; /* CSB Receive */
728 events[2] |= 0x04; /* CSB Timeout */
729 events[2] |= 0x08; /* Truncated Page Complete */
730 }
731
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800732 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200733 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800734 events[2] |= 0x80;
735
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300736 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
737}
738
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200740{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200741 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300742 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200744 hci_setup_event_mask(req);
745
Johan Hedberge81be902015-08-30 21:47:20 +0300746 if (hdev->commands[6] & 0x20 &&
747 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800748 struct hci_cp_read_stored_link_key cp;
749
750 bacpy(&cp.bdaddr, BDADDR_ANY);
751 cp.read_all = 0x01;
752 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
753 }
754
Johan Hedberg2177bab2013-03-05 20:37:43 +0200755 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200757
Marcel Holtmann417287d2014-12-11 20:21:54 +0100758 if (hdev->commands[8] & 0x01)
759 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
760
761 /* Some older Broadcom based Bluetooth 1.2 controllers do not
762 * support the Read Page Scan Type command. Check support for
763 * this command in the bit mask of supported commands.
764 */
765 if (hdev->commands[13] & 0x01)
766 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
767
Andre Guedes9193c6e2014-07-01 18:10:09 -0300768 if (lmp_le_capable(hdev)) {
769 u8 events[8];
770
771 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200772 events[0] = 0x0f;
773
774 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
775 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300776
777 /* If controller supports the Connection Parameters Request
778 * Link Layer Procedure, enable the corresponding event.
779 */
780 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
781 events[0] |= 0x20; /* LE Remote Connection
782 * Parameter Request
783 */
784
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100785 /* If the controller supports the Data Length Extension
786 * feature, enable the corresponding event.
787 */
788 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
789 events[0] |= 0x40; /* LE Data Length Change */
790
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100791 /* If the controller supports Extended Scanner Filter
792 * Policies, enable the correspondig event.
793 */
794 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
795 events[1] |= 0x04; /* LE Direct Advertising
796 * Report
797 */
798
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100799 /* If the controller supports the LE Read Local P-256
800 * Public Key command, enable the corresponding event.
801 */
802 if (hdev->commands[34] & 0x02)
803 events[0] |= 0x80; /* LE Read Local P-256
804 * Public Key Complete
805 */
806
807 /* If the controller supports the LE Generate DHKey
808 * command, enable the corresponding event.
809 */
810 if (hdev->commands[34] & 0x04)
811 events[1] |= 0x01; /* LE Generate DHKey Complete */
812
Andre Guedes9193c6e2014-07-01 18:10:09 -0300813 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
814 events);
815
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200816 if (hdev->commands[25] & 0x40) {
817 /* Read LE Advertising Channel TX Power */
818 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
819 }
820
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100821 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
822 /* Read LE Maximum Data Length */
823 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
824
825 /* Read LE Suggested Default Data Length */
826 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
827 }
828
Johan Hedberg42c6b122013-03-05 20:37:49 +0200829 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300830 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300831
832 /* Read features beyond page 1 if available */
833 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
834 struct hci_cp_read_local_ext_features cp;
835
836 cp.page = p;
837 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
838 sizeof(cp), &cp);
839 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200840}
841
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300842static void hci_init4_req(struct hci_request *req, unsigned long opt)
843{
844 struct hci_dev *hdev = req->hdev;
845
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800846 /* Some Broadcom based Bluetooth controllers do not support the
847 * Delete Stored Link Key command. They are clearly indicating its
848 * absence in the bit mask of supported commands.
849 *
850 * Check the supported commands and only if the the command is marked
851 * as supported send it. If not supported assume that the controller
852 * does not have actual support for stored link keys which makes this
853 * command redundant anyway.
854 *
855 * Some controllers indicate that they support handling deleting
856 * stored link keys, but they don't. The quirk lets a driver
857 * just disable this command.
858 */
859 if (hdev->commands[6] & 0x80 &&
860 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
861 struct hci_cp_delete_stored_link_key cp;
862
863 bacpy(&cp.bdaddr, BDADDR_ANY);
864 cp.delete_all = 0x01;
865 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
866 sizeof(cp), &cp);
867 }
868
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300869 /* Set event mask page 2 if the HCI command for it is supported */
870 if (hdev->commands[22] & 0x04)
871 hci_set_event_mask_page_2(req);
872
Marcel Holtmann109e3192014-07-23 19:24:56 +0200873 /* Read local codec list if the HCI command is supported */
874 if (hdev->commands[29] & 0x20)
875 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
876
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200877 /* Get MWS transport configuration if the HCI command is supported */
878 if (hdev->commands[30] & 0x08)
879 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
880
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300881 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800882 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300883 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800884
885 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800887 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800888 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800889
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800890 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
891 sizeof(support), &support);
892 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300893}
894
Johan Hedberg2177bab2013-03-05 20:37:43 +0200895static int __hci_init(struct hci_dev *hdev)
896{
897 int err;
898
899 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
900 if (err < 0)
901 return err;
902
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700903 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200904 /* The Device Under Test (DUT) mode is special and available
905 * for all controller types. So just create it early on.
906 */
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700907 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
908 &dut_mode_fops);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200909
910 /* When the driver supports the set_diag callback, then
911 * expose an entry to modify the vendor diagnostic setting.
912 */
913 if (hdev->set_diag)
914 debugfs_create_file("vendor_diag", 0644, hdev->debugfs,
915 hdev, &vendor_diag_fops);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700916 }
917
Johan Hedberg2177bab2013-03-05 20:37:43 +0200918 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
919 if (err < 0)
920 return err;
921
Johan Hedberg0af801b2015-02-17 15:05:21 +0200922 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
923 * BR/EDR/LE type controllers. AMP controllers only need the
924 * first two stages of init.
925 */
926 if (hdev->dev_type != HCI_BREDR)
927 return 0;
928
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300929 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
930 if (err < 0)
931 return err;
932
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700933 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
934 if (err < 0)
935 return err;
936
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800937 /* This function is only called when the controller is actually in
938 * configured state. When the controller is marked as unconfigured,
939 * this initialization procedure is not run.
940 *
941 * It means that it is possible that a controller runs through its
942 * setup phase and then discovers missing settings. If that is the
943 * case, then this function will not be called. It then will only
944 * be called during the config phase.
945 *
946 * So only when in setup phase or config phase, create the debugfs
947 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700948 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700949 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
950 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700951 return 0;
952
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100953 hci_debugfs_create_common(hdev);
954
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100955 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100956 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700957
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800958 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100959 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700960
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700961 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200962}
963
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200964static void hci_init0_req(struct hci_request *req, unsigned long opt)
965{
966 struct hci_dev *hdev = req->hdev;
967
968 BT_DBG("%s %ld", hdev->name, opt);
969
970 /* Reset */
971 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
972 hci_reset_req(req, 0);
973
974 /* Read Local Version */
975 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
976
977 /* Read BD Address */
978 if (hdev->set_bdaddr)
979 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
980}
981
982static int __hci_unconf_init(struct hci_dev *hdev)
983{
984 int err;
985
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200986 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
987 return 0;
988
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200989 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
990 if (err < 0)
991 return err;
992
993 return 0;
994}
995
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997{
998 __u8 scan = opt;
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
1008 __u8 auth = opt;
1009
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014}
1015
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
1018 __u8 encrypt = opt;
1019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001022 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001023 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024}
1025
Johan Hedberg42c6b122013-03-05 20:37:49 +02001026static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001027{
1028 __le16 policy = cpu_to_le16(opt);
1029
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001031
1032 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001034}
1035
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001036/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 * Device is held on return. */
1038struct hci_dev *hci_dev_get(int index)
1039{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001040 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 BT_DBG("%d", index);
1043
1044 if (index < 0)
1045 return NULL;
1046
1047 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001048 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 if (d->id == index) {
1050 hdev = hci_dev_hold(d);
1051 break;
1052 }
1053 }
1054 read_unlock(&hci_dev_list_lock);
1055 return hdev;
1056}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001059
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001060bool hci_discovery_active(struct hci_dev *hdev)
1061{
1062 struct discovery_state *discov = &hdev->discovery;
1063
Andre Guedes6fbe1952012-02-03 17:47:58 -03001064 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001065 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001066 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001067 return true;
1068
Andre Guedes6fbe1952012-02-03 17:47:58 -03001069 default:
1070 return false;
1071 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001072}
1073
Johan Hedbergff9ef572012-01-04 14:23:45 +02001074void hci_discovery_set_state(struct hci_dev *hdev, int state)
1075{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001076 int old_state = hdev->discovery.state;
1077
Johan Hedbergff9ef572012-01-04 14:23:45 +02001078 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1079
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001080 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001081 return;
1082
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001083 hdev->discovery.state = state;
1084
Johan Hedbergff9ef572012-01-04 14:23:45 +02001085 switch (state) {
1086 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001087 hci_update_background_scan(hdev);
1088
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001089 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001090 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001091 break;
1092 case DISCOVERY_STARTING:
1093 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001094 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001095 mgmt_discovering(hdev, 1);
1096 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001097 case DISCOVERY_RESOLVING:
1098 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001099 case DISCOVERY_STOPPING:
1100 break;
1101 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001102}
1103
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001104void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
Johan Hedberg30883512012-01-04 14:16:21 +02001106 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001107 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Johan Hedberg561aafb2012-01-04 13:31:59 +02001109 list_for_each_entry_safe(p, n, &cache->all, all) {
1110 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001111 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001113
1114 INIT_LIST_HEAD(&cache->unknown);
1115 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001118struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1119 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120{
Johan Hedberg30883512012-01-04 14:16:21 +02001121 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 struct inquiry_entry *e;
1123
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001124 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Johan Hedberg561aafb2012-01-04 13:31:59 +02001126 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001128 return e;
1129 }
1130
1131 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132}
1133
Johan Hedberg561aafb2012-01-04 13:31:59 +02001134struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001135 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001136{
Johan Hedberg30883512012-01-04 14:16:21 +02001137 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001138 struct inquiry_entry *e;
1139
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001140 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001141
1142 list_for_each_entry(e, &cache->unknown, list) {
1143 if (!bacmp(&e->data.bdaddr, bdaddr))
1144 return e;
1145 }
1146
1147 return NULL;
1148}
1149
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001150struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001151 bdaddr_t *bdaddr,
1152 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001153{
1154 struct discovery_state *cache = &hdev->discovery;
1155 struct inquiry_entry *e;
1156
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001157 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001158
1159 list_for_each_entry(e, &cache->resolve, list) {
1160 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1161 return e;
1162 if (!bacmp(&e->data.bdaddr, bdaddr))
1163 return e;
1164 }
1165
1166 return NULL;
1167}
1168
Johan Hedberga3d4e202012-01-09 00:53:02 +02001169void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001170 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001171{
1172 struct discovery_state *cache = &hdev->discovery;
1173 struct list_head *pos = &cache->resolve;
1174 struct inquiry_entry *p;
1175
1176 list_del(&ie->list);
1177
1178 list_for_each_entry(p, &cache->resolve, list) {
1179 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001180 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001181 break;
1182 pos = &p->list;
1183 }
1184
1185 list_add(&ie->list, pos);
1186}
1187
Marcel Holtmannaf589252014-07-01 14:11:20 +02001188u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1189 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190{
Johan Hedberg30883512012-01-04 14:16:21 +02001191 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001192 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001193 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001195 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberg6928a922014-10-26 20:46:09 +01001197 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001198
Marcel Holtmannaf589252014-07-01 14:11:20 +02001199 if (!data->ssp_mode)
1200 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001201
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001202 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001203 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001204 if (!ie->data.ssp_mode)
1205 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001206
Johan Hedberga3d4e202012-01-09 00:53:02 +02001207 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001208 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001209 ie->data.rssi = data->rssi;
1210 hci_inquiry_cache_update_resolve(hdev, ie);
1211 }
1212
Johan Hedberg561aafb2012-01-04 13:31:59 +02001213 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001214 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001215
Johan Hedberg561aafb2012-01-04 13:31:59 +02001216 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001217 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001218 if (!ie) {
1219 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1220 goto done;
1221 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001222
1223 list_add(&ie->all, &cache->all);
1224
1225 if (name_known) {
1226 ie->name_state = NAME_KNOWN;
1227 } else {
1228 ie->name_state = NAME_NOT_KNOWN;
1229 list_add(&ie->list, &cache->unknown);
1230 }
1231
1232update:
1233 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001234 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001235 ie->name_state = NAME_KNOWN;
1236 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 }
1238
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001239 memcpy(&ie->data, data, sizeof(*data));
1240 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001242
1243 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001244 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001245
Marcel Holtmannaf589252014-07-01 14:11:20 +02001246done:
1247 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248}
1249
1250static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1251{
Johan Hedberg30883512012-01-04 14:16:21 +02001252 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 struct inquiry_info *info = (struct inquiry_info *) buf;
1254 struct inquiry_entry *e;
1255 int copied = 0;
1256
Johan Hedberg561aafb2012-01-04 13:31:59 +02001257 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001259
1260 if (copied >= num)
1261 break;
1262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 bacpy(&info->bdaddr, &data->bdaddr);
1264 info->pscan_rep_mode = data->pscan_rep_mode;
1265 info->pscan_period_mode = data->pscan_period_mode;
1266 info->pscan_mode = data->pscan_mode;
1267 memcpy(info->dev_class, data->dev_class, 3);
1268 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001271 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 }
1273
1274 BT_DBG("cache %p, copied %d", cache, copied);
1275 return copied;
1276}
1277
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
1280 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001281 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 struct hci_cp_inquiry cp;
1283
1284 BT_DBG("%s", hdev->name);
1285
1286 if (test_bit(HCI_INQUIRY, &hdev->flags))
1287 return;
1288
1289 /* Start Inquiry */
1290 memcpy(&cp.lap, &ir->lap, 3);
1291 cp.length = ir->length;
1292 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
1296int hci_inquiry(void __user *arg)
1297{
1298 __u8 __user *ptr = arg;
1299 struct hci_inquiry_req ir;
1300 struct hci_dev *hdev;
1301 int err = 0, do_inquiry = 0, max_rsp;
1302 long timeo;
1303 __u8 *buf;
1304
1305 if (copy_from_user(&ir, ptr, sizeof(ir)))
1306 return -EFAULT;
1307
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001308 hdev = hci_dev_get(ir.dev_id);
1309 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 return -ENODEV;
1311
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001312 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001313 err = -EBUSY;
1314 goto done;
1315 }
1316
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001317 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001318 err = -EOPNOTSUPP;
1319 goto done;
1320 }
1321
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001322 if (hdev->dev_type != HCI_BREDR) {
1323 err = -EOPNOTSUPP;
1324 goto done;
1325 }
1326
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001327 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001328 err = -EOPNOTSUPP;
1329 goto done;
1330 }
1331
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001332 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001333 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001334 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001335 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 do_inquiry = 1;
1337 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001338 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Marcel Holtmann04837f62006-07-03 10:02:33 +02001340 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001341
1342 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001343 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1344 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001345 if (err < 0)
1346 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001347
1348 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1349 * cleared). If it is interrupted by a signal, return -EINTR.
1350 */
NeilBrown74316202014-07-07 15:16:04 +10001351 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001352 TASK_INTERRUPTIBLE))
1353 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001356 /* for unlimited number of responses we will use buffer with
1357 * 255 entries
1358 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1360
1361 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1362 * copy it to the user space.
1363 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001364 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001365 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 err = -ENOMEM;
1367 goto done;
1368 }
1369
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001370 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001372 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374 BT_DBG("num_rsp %d", ir.num_rsp);
1375
1376 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1377 ptr += sizeof(ir);
1378 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001379 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001381 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 err = -EFAULT;
1383
1384 kfree(buf);
1385
1386done:
1387 hci_dev_put(hdev);
1388 return err;
1389}
1390
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001391static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 int ret = 0;
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 BT_DBG("%s %p", hdev->name, hdev);
1396
1397 hci_req_lock(hdev);
1398
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001399 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001400 ret = -ENODEV;
1401 goto done;
1402 }
1403
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001404 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1405 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001406 /* Check for rfkill but allow the HCI setup stage to
1407 * proceed (which in itself doesn't cause any RF activity).
1408 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001409 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001410 ret = -ERFKILL;
1411 goto done;
1412 }
1413
1414 /* Check for valid public address or a configured static
1415 * random adddress, but let the HCI setup proceed to
1416 * be able to determine if there is a public address
1417 * or not.
1418 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001419 * In case of user channel usage, it is not important
1420 * if a public address or static random address is
1421 * available.
1422 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001423 * This check is only valid for BR/EDR controllers
1424 * since AMP controllers do not have an address.
1425 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001426 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001427 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001428 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1429 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1430 ret = -EADDRNOTAVAIL;
1431 goto done;
1432 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001433 }
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 if (test_bit(HCI_UP, &hdev->flags)) {
1436 ret = -EALREADY;
1437 goto done;
1438 }
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 if (hdev->open(hdev)) {
1441 ret = -EIO;
1442 goto done;
1443 }
1444
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001445 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001446 hci_notify(hdev, HCI_DEV_OPEN);
1447
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001448 atomic_set(&hdev->cmd_cnt, 1);
1449 set_bit(HCI_INIT, &hdev->flags);
1450
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001451 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001452 if (hdev->setup)
1453 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001454
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001455 /* The transport driver can set these quirks before
1456 * creating the HCI device or in its setup callback.
1457 *
1458 * In case any of them is set, the controller has to
1459 * start up as unconfigured.
1460 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001461 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1462 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001463 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001464
1465 /* For an unconfigured controller it is required to
1466 * read at least the version information provided by
1467 * the Read Local Version Information command.
1468 *
1469 * If the set_bdaddr driver callback is provided, then
1470 * also the original Bluetooth public device address
1471 * will be read using the Read BD Address command.
1472 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001473 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001474 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001475 }
1476
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001477 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001478 /* If public address change is configured, ensure that
1479 * the address gets programmed. If the driver does not
1480 * support changing the public address, fail the power
1481 * on procedure.
1482 */
1483 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1484 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001485 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1486 else
1487 ret = -EADDRNOTAVAIL;
1488 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001489
1490 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001491 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1492 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001493 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 }
1495
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001496 clear_bit(HCI_INIT, &hdev->flags);
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (!ret) {
1499 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001500 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 set_bit(HCI_UP, &hdev->flags);
1502 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001503 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1504 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1505 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001507 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001508 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001509 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001510 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001511 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001512 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001514 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001515 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001516 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518 skb_queue_purge(&hdev->cmd_q);
1519 skb_queue_purge(&hdev->rx_q);
1520
1521 if (hdev->flush)
1522 hdev->flush(hdev);
1523
1524 if (hdev->sent_cmd) {
1525 kfree_skb(hdev->sent_cmd);
1526 hdev->sent_cmd = NULL;
1527 }
1528
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001529 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001530 hci_notify(hdev, HCI_DEV_CLOSE);
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001533 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535
1536done:
1537 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 return ret;
1539}
1540
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001541/* ---- HCI ioctl helpers ---- */
1542
1543int hci_dev_open(__u16 dev)
1544{
1545 struct hci_dev *hdev;
1546 int err;
1547
1548 hdev = hci_dev_get(dev);
1549 if (!hdev)
1550 return -ENODEV;
1551
Marcel Holtmann4a964402014-07-02 19:10:33 +02001552 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001553 * up as user channel. Trying to bring them up as normal devices
1554 * will result into a failure. Only user channel operation is
1555 * possible.
1556 *
1557 * When this function is called for a user channel, the flag
1558 * HCI_USER_CHANNEL will be set first before attempting to
1559 * open the device.
1560 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001561 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1562 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001563 err = -EOPNOTSUPP;
1564 goto done;
1565 }
1566
Johan Hedberge1d08f42013-10-01 22:44:50 +03001567 /* We need to ensure that no other power on/off work is pending
1568 * before proceeding to call hci_dev_do_open. This is
1569 * particularly important if the setup procedure has not yet
1570 * completed.
1571 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001572 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001573 cancel_delayed_work(&hdev->power_off);
1574
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001575 /* After this call it is guaranteed that the setup procedure
1576 * has finished. This means that error conditions like RFKILL
1577 * or no valid public or static random address apply.
1578 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001579 flush_workqueue(hdev->req_workqueue);
1580
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001581 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001582 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001583 * so that pairing works for them. Once the management interface
1584 * is in use this bit will be cleared again and userspace has
1585 * to explicitly enable it.
1586 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001587 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1588 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001589 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001590
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001591 err = hci_dev_do_open(hdev);
1592
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001593done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001594 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001595 return err;
1596}
1597
Johan Hedbergd7347f32014-07-04 12:37:23 +03001598/* This function requires the caller holds hdev->lock */
1599static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1600{
1601 struct hci_conn_params *p;
1602
Johan Hedbergf161dd42014-08-15 21:06:54 +03001603 list_for_each_entry(p, &hdev->le_conn_params, list) {
1604 if (p->conn) {
1605 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001606 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001607 p->conn = NULL;
1608 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001609 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001610 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001611
1612 BT_DBG("All LE pending actions cleared");
1613}
1614
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001615int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
1617 BT_DBG("%s %p", hdev->name, hdev);
1618
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001619 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001620 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001621 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001622 /* Execute vendor specific shutdown routine */
1623 if (hdev->shutdown)
1624 hdev->shutdown(hdev);
1625 }
1626
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001627 cancel_delayed_work(&hdev->power_off);
1628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 hci_req_cancel(hdev, ENODEV);
1630 hci_req_lock(hdev);
1631
1632 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001633 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 hci_req_unlock(hdev);
1635 return 0;
1636 }
1637
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001638 /* Flush RX and TX works */
1639 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001640 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001642 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001643 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001644 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001645 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1646 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001647 }
1648
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001649 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001650 cancel_delayed_work(&hdev->service_cache);
1651
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001652 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001653 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001654
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001655 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001656 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001657
Florian Grandel5d900e42015-06-18 03:16:35 +02001658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1661 }
1662
Johan Hedberg76727c02014-11-18 09:00:14 +02001663 /* Avoid potential lockdep warnings from the *_flush() calls by
1664 * ensuring the workqueue is empty up front.
1665 */
1666 drain_workqueue(hdev->workqueue);
1667
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001668 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001669
Johan Hedberg8f502f82015-01-28 19:56:02 +02001670 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1671
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001672 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001673 if (hdev->dev_type == HCI_BREDR)
1674 mgmt_powered(hdev, 0);
1675 }
1676
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001677 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001678 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001679 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001680 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Marcel Holtmann64dae962015-01-28 14:10:28 -08001682 smp_unregister(hdev);
1683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 hci_notify(hdev, HCI_DEV_DOWN);
1685
1686 if (hdev->flush)
1687 hdev->flush(hdev);
1688
1689 /* Reset device */
1690 skb_queue_purge(&hdev->cmd_q);
1691 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001692 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1693 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001694 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001696 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 clear_bit(HCI_INIT, &hdev->flags);
1698 }
1699
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001700 /* flush cmd work */
1701 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703 /* Drop queues */
1704 skb_queue_purge(&hdev->rx_q);
1705 skb_queue_purge(&hdev->cmd_q);
1706 skb_queue_purge(&hdev->raw_q);
1707
1708 /* Drop last sent command */
1709 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001710 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 kfree_skb(hdev->sent_cmd);
1712 hdev->sent_cmd = NULL;
1713 }
1714
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001715 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001716 hci_notify(hdev, HCI_DEV_CLOSE);
1717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 /* After this point our queues are empty
1719 * and no tasks are scheduled. */
1720 hdev->close(hdev);
1721
Johan Hedberg35b973c2013-03-15 17:06:59 -05001722 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001723 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001724 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001725
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001726 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001727 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001728
Johan Hedberge59fda82012-02-22 18:11:53 +02001729 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001730 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001731 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 hci_req_unlock(hdev);
1734
1735 hci_dev_put(hdev);
1736 return 0;
1737}
1738
1739int hci_dev_close(__u16 dev)
1740{
1741 struct hci_dev *hdev;
1742 int err;
1743
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001744 hdev = hci_dev_get(dev);
1745 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001747
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001748 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001749 err = -EBUSY;
1750 goto done;
1751 }
1752
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001753 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001754 cancel_delayed_work(&hdev->power_off);
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001757
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001758done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 hci_dev_put(hdev);
1760 return err;
1761}
1762
Marcel Holtmann5c912492015-01-28 11:53:05 -08001763static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001765 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Marcel Holtmann5c912492015-01-28 11:53:05 -08001767 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 /* Drop queues */
1772 skb_queue_purge(&hdev->rx_q);
1773 skb_queue_purge(&hdev->cmd_q);
1774
Johan Hedberg76727c02014-11-18 09:00:14 +02001775 /* Avoid potential lockdep warnings from the *_flush() calls by
1776 * ensuring the workqueue is empty up front.
1777 */
1778 drain_workqueue(hdev->workqueue);
1779
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001780 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001781 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001783 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 if (hdev->flush)
1786 hdev->flush(hdev);
1787
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001788 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001789 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001791 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return ret;
1795}
1796
Marcel Holtmann5c912492015-01-28 11:53:05 -08001797int hci_dev_reset(__u16 dev)
1798{
1799 struct hci_dev *hdev;
1800 int err;
1801
1802 hdev = hci_dev_get(dev);
1803 if (!hdev)
1804 return -ENODEV;
1805
1806 if (!test_bit(HCI_UP, &hdev->flags)) {
1807 err = -ENETDOWN;
1808 goto done;
1809 }
1810
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001811 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001812 err = -EBUSY;
1813 goto done;
1814 }
1815
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001816 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001817 err = -EOPNOTSUPP;
1818 goto done;
1819 }
1820
1821 err = hci_dev_do_reset(hdev);
1822
1823done:
1824 hci_dev_put(hdev);
1825 return err;
1826}
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828int hci_dev_reset_stat(__u16 dev)
1829{
1830 struct hci_dev *hdev;
1831 int ret = 0;
1832
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001833 hdev = hci_dev_get(dev);
1834 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 return -ENODEV;
1836
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001837 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001838 ret = -EBUSY;
1839 goto done;
1840 }
1841
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001842 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001843 ret = -EOPNOTSUPP;
1844 goto done;
1845 }
1846
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1848
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001849done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 return ret;
1852}
1853
Johan Hedberg123abc02014-07-10 12:09:07 +03001854static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1855{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001856 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001857
1858 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1859
1860 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001861 conn_changed = !hci_dev_test_and_set_flag(hdev,
1862 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001863 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001864 conn_changed = hci_dev_test_and_clear_flag(hdev,
1865 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001866
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001867 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001868 discov_changed = !hci_dev_test_and_set_flag(hdev,
1869 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001870 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001871 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001872 discov_changed = hci_dev_test_and_clear_flag(hdev,
1873 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001874 }
1875
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001876 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001877 return;
1878
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001879 if (conn_changed || discov_changed) {
1880 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001881 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001882
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001883 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001884 mgmt_update_adv_data(hdev);
1885
Johan Hedberg123abc02014-07-10 12:09:07 +03001886 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001887 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001888}
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890int hci_dev_cmd(unsigned int cmd, void __user *arg)
1891{
1892 struct hci_dev *hdev;
1893 struct hci_dev_req dr;
1894 int err = 0;
1895
1896 if (copy_from_user(&dr, arg, sizeof(dr)))
1897 return -EFAULT;
1898
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001899 hdev = hci_dev_get(dr.dev_id);
1900 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 return -ENODEV;
1902
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001903 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001904 err = -EBUSY;
1905 goto done;
1906 }
1907
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001908 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001909 err = -EOPNOTSUPP;
1910 goto done;
1911 }
1912
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001913 if (hdev->dev_type != HCI_BREDR) {
1914 err = -EOPNOTSUPP;
1915 goto done;
1916 }
1917
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001918 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001919 err = -EOPNOTSUPP;
1920 goto done;
1921 }
1922
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 switch (cmd) {
1924 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001925 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1926 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 break;
1928
1929 case HCISETENCRYPT:
1930 if (!lmp_encrypt_capable(hdev)) {
1931 err = -EOPNOTSUPP;
1932 break;
1933 }
1934
1935 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1936 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001937 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1938 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 if (err)
1940 break;
1941 }
1942
Johan Hedberg01178cd2013-03-05 20:37:41 +02001943 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1944 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 break;
1946
1947 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001948 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1949 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001950
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001951 /* Ensure that the connectable and discoverable states
1952 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001953 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001954 if (!err)
1955 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 break;
1957
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001958 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001959 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1960 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001961 break;
1962
1963 case HCISETLINKMODE:
1964 hdev->link_mode = ((__u16) dr.dev_opt) &
1965 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1966 break;
1967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 case HCISETPTYPE:
1969 hdev->pkt_type = (__u16) dr.dev_opt;
1970 break;
1971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001973 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1974 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 break;
1976
1977 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001978 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1979 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 break;
1981
1982 default:
1983 err = -EINVAL;
1984 break;
1985 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001986
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001987done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 hci_dev_put(hdev);
1989 return err;
1990}
1991
1992int hci_get_dev_list(void __user *arg)
1993{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001994 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 struct hci_dev_list_req *dl;
1996 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 int n = 0, size, err;
1998 __u16 dev_num;
1999
2000 if (get_user(dev_num, (__u16 __user *) arg))
2001 return -EFAULT;
2002
2003 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2004 return -EINVAL;
2005
2006 size = sizeof(*dl) + dev_num * sizeof(*dr);
2007
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002008 dl = kzalloc(size, GFP_KERNEL);
2009 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 return -ENOMEM;
2011
2012 dr = dl->dev_req;
2013
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002014 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002015 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002016 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002017
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002018 /* When the auto-off is configured it means the transport
2019 * is running, but in that case still indicate that the
2020 * device is actually down.
2021 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002022 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002023 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002026 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 if (++n >= dev_num)
2029 break;
2030 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002031 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
2033 dl->dev_num = n;
2034 size = sizeof(*dl) + n * sizeof(*dr);
2035
2036 err = copy_to_user(arg, dl, size);
2037 kfree(dl);
2038
2039 return err ? -EFAULT : 0;
2040}
2041
2042int hci_get_dev_info(void __user *arg)
2043{
2044 struct hci_dev *hdev;
2045 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002046 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 int err = 0;
2048
2049 if (copy_from_user(&di, arg, sizeof(di)))
2050 return -EFAULT;
2051
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002052 hdev = hci_dev_get(di.dev_id);
2053 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 return -ENODEV;
2055
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002056 /* When the auto-off is configured it means the transport
2057 * is running, but in that case still indicate that the
2058 * device is actually down.
2059 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002060 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002061 flags = hdev->flags & ~BIT(HCI_UP);
2062 else
2063 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 strcpy(di.name, hdev->name);
2066 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002067 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002068 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002070 if (lmp_bredr_capable(hdev)) {
2071 di.acl_mtu = hdev->acl_mtu;
2072 di.acl_pkts = hdev->acl_pkts;
2073 di.sco_mtu = hdev->sco_mtu;
2074 di.sco_pkts = hdev->sco_pkts;
2075 } else {
2076 di.acl_mtu = hdev->le_mtu;
2077 di.acl_pkts = hdev->le_pkts;
2078 di.sco_mtu = 0;
2079 di.sco_pkts = 0;
2080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 di.link_policy = hdev->link_policy;
2082 di.link_mode = hdev->link_mode;
2083
2084 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2085 memcpy(&di.features, &hdev->features, sizeof(di.features));
2086
2087 if (copy_to_user(arg, &di, sizeof(di)))
2088 err = -EFAULT;
2089
2090 hci_dev_put(hdev);
2091
2092 return err;
2093}
2094
2095/* ---- Interface to HCI drivers ---- */
2096
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002097static int hci_rfkill_set_block(void *data, bool blocked)
2098{
2099 struct hci_dev *hdev = data;
2100
2101 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2102
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002103 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002104 return -EBUSY;
2105
Johan Hedberg5e130362013-09-13 08:58:17 +03002106 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002107 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002108 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2109 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002110 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002111 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002112 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002113 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002114
2115 return 0;
2116}
2117
2118static const struct rfkill_ops hci_rfkill_ops = {
2119 .set_block = hci_rfkill_set_block,
2120};
2121
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002122static void hci_power_on(struct work_struct *work)
2123{
2124 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002125 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002126
2127 BT_DBG("%s", hdev->name);
2128
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002129 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002130 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302131 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002132 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302133 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002134 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002135 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002136
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002137 /* During the HCI setup phase, a few error conditions are
2138 * ignored and they need to be checked now. If they are still
2139 * valid, it is important to turn the device back off.
2140 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002141 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2142 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002143 (hdev->dev_type == HCI_BREDR &&
2144 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2145 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002146 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002147 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002148 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002149 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2150 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002151 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002152
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002153 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002154 /* For unconfigured devices, set the HCI_RAW flag
2155 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002156 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002157 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002158 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002159
2160 /* For fully configured devices, this will send
2161 * the Index Added event. For unconfigured devices,
2162 * it will send Unconfigued Index Added event.
2163 *
2164 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2165 * and no event will be send.
2166 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002167 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002168 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002169 /* When the controller is now configured, then it
2170 * is important to clear the HCI_RAW flag.
2171 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002172 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002173 clear_bit(HCI_RAW, &hdev->flags);
2174
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002175 /* Powering on the controller with HCI_CONFIG set only
2176 * happens with the transition from unconfigured to
2177 * configured. This will send the Index Added event.
2178 */
2179 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002180 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002181}
2182
2183static void hci_power_off(struct work_struct *work)
2184{
Johan Hedberg32435532011-11-07 22:16:04 +02002185 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002186 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002187
2188 BT_DBG("%s", hdev->name);
2189
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002190 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002191}
2192
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002193static void hci_error_reset(struct work_struct *work)
2194{
2195 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2196
2197 BT_DBG("%s", hdev->name);
2198
2199 if (hdev->hw_error)
2200 hdev->hw_error(hdev, hdev->hw_error_code);
2201 else
2202 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2203 hdev->hw_error_code);
2204
2205 if (hci_dev_do_close(hdev))
2206 return;
2207
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002208 hci_dev_do_open(hdev);
2209}
2210
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002211static void hci_discov_off(struct work_struct *work)
2212{
2213 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002214
2215 hdev = container_of(work, struct hci_dev, discov_off.work);
2216
2217 BT_DBG("%s", hdev->name);
2218
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002219 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002220}
2221
Florian Grandel5d900e42015-06-18 03:16:35 +02002222static void hci_adv_timeout_expire(struct work_struct *work)
2223{
2224 struct hci_dev *hdev;
2225
2226 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2227
2228 BT_DBG("%s", hdev->name);
2229
2230 mgmt_adv_timeout_expired(hdev);
2231}
2232
Johan Hedberg35f74982014-02-18 17:14:32 +02002233void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002234{
Johan Hedberg48210022013-01-27 00:31:28 +02002235 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002236
Johan Hedberg48210022013-01-27 00:31:28 +02002237 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2238 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002239 kfree(uuid);
2240 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002241}
2242
Johan Hedberg35f74982014-02-18 17:14:32 +02002243void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002244{
Johan Hedberg0378b592014-11-19 15:22:22 +02002245 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002246
Johan Hedberg0378b592014-11-19 15:22:22 +02002247 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2248 list_del_rcu(&key->list);
2249 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002250 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002251}
2252
Johan Hedberg35f74982014-02-18 17:14:32 +02002253void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002254{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002255 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002256
Johan Hedberg970d0f12014-11-13 14:37:47 +02002257 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2258 list_del_rcu(&k->list);
2259 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002260 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002261}
2262
Johan Hedberg970c4e42014-02-18 10:19:33 +02002263void hci_smp_irks_clear(struct hci_dev *hdev)
2264{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002265 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002266
Johan Hedbergadae20c2014-11-13 14:37:48 +02002267 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2268 list_del_rcu(&k->list);
2269 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002270 }
2271}
2272
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002273struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2274{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002275 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002276
Johan Hedberg0378b592014-11-19 15:22:22 +02002277 rcu_read_lock();
2278 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2279 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2280 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002281 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002282 }
2283 }
2284 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002285
2286 return NULL;
2287}
2288
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302289static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002290 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002291{
2292 /* Legacy key */
2293 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302294 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002295
2296 /* Debug keys are insecure so don't store them persistently */
2297 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302298 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002299
2300 /* Changed combination key and there's no previous one */
2301 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302302 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002303
2304 /* Security mode 3 case */
2305 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302306 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002307
Johan Hedberge3befab2014-06-01 16:33:39 +03002308 /* BR/EDR key derived using SC from an LE link */
2309 if (conn->type == LE_LINK)
2310 return true;
2311
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002312 /* Neither local nor remote side had no-bonding as requirement */
2313 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302314 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002315
2316 /* Local side had dedicated bonding as requirement */
2317 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302318 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002319
2320 /* Remote side had dedicated bonding as requirement */
2321 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302322 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002323
2324 /* If none of the above criteria match, then don't store the key
2325 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302326 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002327}
2328
Johan Hedberge804d252014-07-16 11:42:28 +03002329static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002330{
Johan Hedberge804d252014-07-16 11:42:28 +03002331 if (type == SMP_LTK)
2332 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002333
Johan Hedberge804d252014-07-16 11:42:28 +03002334 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002335}
2336
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002337struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2338 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002339{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002340 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002341
Johan Hedberg970d0f12014-11-13 14:37:47 +02002342 rcu_read_lock();
2343 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002344 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2345 continue;
2346
Johan Hedberg923e2412014-12-03 12:43:39 +02002347 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002348 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002349 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002350 }
2351 }
2352 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002353
2354 return NULL;
2355}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002356
Johan Hedberg970c4e42014-02-18 10:19:33 +02002357struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2358{
2359 struct smp_irk *irk;
2360
Johan Hedbergadae20c2014-11-13 14:37:48 +02002361 rcu_read_lock();
2362 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2363 if (!bacmp(&irk->rpa, rpa)) {
2364 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002365 return irk;
2366 }
2367 }
2368
Johan Hedbergadae20c2014-11-13 14:37:48 +02002369 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2370 if (smp_irk_matches(hdev, irk->val, rpa)) {
2371 bacpy(&irk->rpa, rpa);
2372 rcu_read_unlock();
2373 return irk;
2374 }
2375 }
2376 rcu_read_unlock();
2377
Johan Hedberg970c4e42014-02-18 10:19:33 +02002378 return NULL;
2379}
2380
2381struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2382 u8 addr_type)
2383{
2384 struct smp_irk *irk;
2385
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002386 /* Identity Address must be public or static random */
2387 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2388 return NULL;
2389
Johan Hedbergadae20c2014-11-13 14:37:48 +02002390 rcu_read_lock();
2391 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002392 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002393 bacmp(bdaddr, &irk->bdaddr) == 0) {
2394 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002395 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002396 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002397 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002398 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002399
2400 return NULL;
2401}
2402
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002403struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002404 bdaddr_t *bdaddr, u8 *val, u8 type,
2405 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002406{
2407 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302408 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002409
2410 old_key = hci_find_link_key(hdev, bdaddr);
2411 if (old_key) {
2412 old_key_type = old_key->type;
2413 key = old_key;
2414 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002415 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002416 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002417 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002418 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002419 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002420 }
2421
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002422 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002423
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002424 /* Some buggy controller combinations generate a changed
2425 * combination key for legacy pairing even when there's no
2426 * previous key */
2427 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002428 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002429 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002430 if (conn)
2431 conn->key_type = type;
2432 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002433
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002434 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002435 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002436 key->pin_len = pin_len;
2437
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002438 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002439 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002440 else
2441 key->type = type;
2442
Johan Hedberg7652ff62014-06-24 13:15:49 +03002443 if (persistent)
2444 *persistent = hci_persistent_key(hdev, conn, type,
2445 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002446
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002447 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002448}
2449
Johan Hedbergca9142b2014-02-19 14:57:44 +02002450struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002451 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002452 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002453{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002454 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002455 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002456
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002457 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002458 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002459 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002460 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002461 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002462 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002463 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002464 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002465 }
2466
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002467 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002468 key->bdaddr_type = addr_type;
2469 memcpy(key->val, tk, sizeof(key->val));
2470 key->authenticated = authenticated;
2471 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002472 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002473 key->enc_size = enc_size;
2474 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002475
Johan Hedbergca9142b2014-02-19 14:57:44 +02002476 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002477}
2478
Johan Hedbergca9142b2014-02-19 14:57:44 +02002479struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2480 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002481{
2482 struct smp_irk *irk;
2483
2484 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2485 if (!irk) {
2486 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2487 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002488 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002489
2490 bacpy(&irk->bdaddr, bdaddr);
2491 irk->addr_type = addr_type;
2492
Johan Hedbergadae20c2014-11-13 14:37:48 +02002493 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002494 }
2495
2496 memcpy(irk->val, val, 16);
2497 bacpy(&irk->rpa, rpa);
2498
Johan Hedbergca9142b2014-02-19 14:57:44 +02002499 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002500}
2501
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002502int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2503{
2504 struct link_key *key;
2505
2506 key = hci_find_link_key(hdev, bdaddr);
2507 if (!key)
2508 return -ENOENT;
2509
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002510 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002511
Johan Hedberg0378b592014-11-19 15:22:22 +02002512 list_del_rcu(&key->list);
2513 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002514
2515 return 0;
2516}
2517
Johan Hedberge0b2b272014-02-18 17:14:31 +02002518int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002519{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002520 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002521 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002522
Johan Hedberg970d0f12014-11-13 14:37:47 +02002523 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002524 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002525 continue;
2526
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002527 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002528
Johan Hedberg970d0f12014-11-13 14:37:47 +02002529 list_del_rcu(&k->list);
2530 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002531 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002532 }
2533
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002534 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002535}
2536
Johan Hedberga7ec7332014-02-18 17:14:35 +02002537void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2538{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002539 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002540
Johan Hedbergadae20c2014-11-13 14:37:48 +02002541 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002542 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2543 continue;
2544
2545 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2546
Johan Hedbergadae20c2014-11-13 14:37:48 +02002547 list_del_rcu(&k->list);
2548 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002549 }
2550}
2551
Johan Hedberg55e76b32015-03-10 22:34:40 +02002552bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2553{
2554 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002555 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002556 u8 addr_type;
2557
2558 if (type == BDADDR_BREDR) {
2559 if (hci_find_link_key(hdev, bdaddr))
2560 return true;
2561 return false;
2562 }
2563
2564 /* Convert to HCI addr type which struct smp_ltk uses */
2565 if (type == BDADDR_LE_PUBLIC)
2566 addr_type = ADDR_LE_DEV_PUBLIC;
2567 else
2568 addr_type = ADDR_LE_DEV_RANDOM;
2569
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002570 irk = hci_get_irk(hdev, bdaddr, addr_type);
2571 if (irk) {
2572 bdaddr = &irk->bdaddr;
2573 addr_type = irk->addr_type;
2574 }
2575
Johan Hedberg55e76b32015-03-10 22:34:40 +02002576 rcu_read_lock();
2577 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002578 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2579 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002580 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002581 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002582 }
2583 rcu_read_unlock();
2584
2585 return false;
2586}
2587
Ville Tervo6bd32322011-02-16 16:32:41 +02002588/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002589static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002590{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002591 struct hci_dev *hdev = container_of(work, struct hci_dev,
2592 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002593
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002594 if (hdev->sent_cmd) {
2595 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2596 u16 opcode = __le16_to_cpu(sent->opcode);
2597
2598 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2599 } else {
2600 BT_ERR("%s command tx timeout", hdev->name);
2601 }
2602
Ville Tervo6bd32322011-02-16 16:32:41 +02002603 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002604 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002605}
2606
Szymon Janc2763eda2011-03-22 13:12:22 +01002607struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002608 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002609{
2610 struct oob_data *data;
2611
Johan Hedberg6928a922014-10-26 20:46:09 +01002612 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2613 if (bacmp(bdaddr, &data->bdaddr) != 0)
2614 continue;
2615 if (data->bdaddr_type != bdaddr_type)
2616 continue;
2617 return data;
2618 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002619
2620 return NULL;
2621}
2622
Johan Hedberg6928a922014-10-26 20:46:09 +01002623int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2624 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002625{
2626 struct oob_data *data;
2627
Johan Hedberg6928a922014-10-26 20:46:09 +01002628 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002629 if (!data)
2630 return -ENOENT;
2631
Johan Hedberg6928a922014-10-26 20:46:09 +01002632 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002633
2634 list_del(&data->list);
2635 kfree(data);
2636
2637 return 0;
2638}
2639
Johan Hedberg35f74982014-02-18 17:14:32 +02002640void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002641{
2642 struct oob_data *data, *n;
2643
2644 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2645 list_del(&data->list);
2646 kfree(data);
2647 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002648}
2649
Marcel Holtmann07988722014-01-10 02:07:29 -08002650int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002651 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002652 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002653{
2654 struct oob_data *data;
2655
Johan Hedberg6928a922014-10-26 20:46:09 +01002656 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002657 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002658 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002659 if (!data)
2660 return -ENOMEM;
2661
2662 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002663 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002664 list_add(&data->list, &hdev->remote_oob_data);
2665 }
2666
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002667 if (hash192 && rand192) {
2668 memcpy(data->hash192, hash192, sizeof(data->hash192));
2669 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002670 if (hash256 && rand256)
2671 data->present = 0x03;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002672 } else {
2673 memset(data->hash192, 0, sizeof(data->hash192));
2674 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002675 if (hash256 && rand256)
2676 data->present = 0x02;
2677 else
2678 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002679 }
2680
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002681 if (hash256 && rand256) {
2682 memcpy(data->hash256, hash256, sizeof(data->hash256));
2683 memcpy(data->rand256, rand256, sizeof(data->rand256));
2684 } else {
2685 memset(data->hash256, 0, sizeof(data->hash256));
2686 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002687 if (hash192 && rand192)
2688 data->present = 0x01;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002689 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002690
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002691 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002692
2693 return 0;
2694}
2695
Florian Grandeld2609b32015-06-18 03:16:34 +02002696/* This function requires the caller holds hdev->lock */
2697struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2698{
2699 struct adv_info *adv_instance;
2700
2701 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2702 if (adv_instance->instance == instance)
2703 return adv_instance;
2704 }
2705
2706 return NULL;
2707}
2708
2709/* This function requires the caller holds hdev->lock */
2710struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2711 struct adv_info *cur_instance;
2712
2713 cur_instance = hci_find_adv_instance(hdev, instance);
2714 if (!cur_instance)
2715 return NULL;
2716
2717 if (cur_instance == list_last_entry(&hdev->adv_instances,
2718 struct adv_info, list))
2719 return list_first_entry(&hdev->adv_instances,
2720 struct adv_info, list);
2721 else
2722 return list_next_entry(cur_instance, list);
2723}
2724
2725/* This function requires the caller holds hdev->lock */
2726int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2727{
2728 struct adv_info *adv_instance;
2729
2730 adv_instance = hci_find_adv_instance(hdev, instance);
2731 if (!adv_instance)
2732 return -ENOENT;
2733
2734 BT_DBG("%s removing %dMR", hdev->name, instance);
2735
Florian Grandel5d900e42015-06-18 03:16:35 +02002736 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2737 cancel_delayed_work(&hdev->adv_instance_expire);
2738 hdev->adv_instance_timeout = 0;
2739 }
2740
Florian Grandeld2609b32015-06-18 03:16:34 +02002741 list_del(&adv_instance->list);
2742 kfree(adv_instance);
2743
2744 hdev->adv_instance_cnt--;
2745
2746 return 0;
2747}
2748
2749/* This function requires the caller holds hdev->lock */
2750void hci_adv_instances_clear(struct hci_dev *hdev)
2751{
2752 struct adv_info *adv_instance, *n;
2753
Florian Grandel5d900e42015-06-18 03:16:35 +02002754 if (hdev->adv_instance_timeout) {
2755 cancel_delayed_work(&hdev->adv_instance_expire);
2756 hdev->adv_instance_timeout = 0;
2757 }
2758
Florian Grandeld2609b32015-06-18 03:16:34 +02002759 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2760 list_del(&adv_instance->list);
2761 kfree(adv_instance);
2762 }
2763
2764 hdev->adv_instance_cnt = 0;
2765}
2766
2767/* This function requires the caller holds hdev->lock */
2768int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2769 u16 adv_data_len, u8 *adv_data,
2770 u16 scan_rsp_len, u8 *scan_rsp_data,
2771 u16 timeout, u16 duration)
2772{
2773 struct adv_info *adv_instance;
2774
2775 adv_instance = hci_find_adv_instance(hdev, instance);
2776 if (adv_instance) {
2777 memset(adv_instance->adv_data, 0,
2778 sizeof(adv_instance->adv_data));
2779 memset(adv_instance->scan_rsp_data, 0,
2780 sizeof(adv_instance->scan_rsp_data));
2781 } else {
2782 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2783 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2784 return -EOVERFLOW;
2785
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002786 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002787 if (!adv_instance)
2788 return -ENOMEM;
2789
Florian Grandelfffd38b2015-06-18 03:16:47 +02002790 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002791 adv_instance->instance = instance;
2792 list_add(&adv_instance->list, &hdev->adv_instances);
2793 hdev->adv_instance_cnt++;
2794 }
2795
2796 adv_instance->flags = flags;
2797 adv_instance->adv_data_len = adv_data_len;
2798 adv_instance->scan_rsp_len = scan_rsp_len;
2799
2800 if (adv_data_len)
2801 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2802
2803 if (scan_rsp_len)
2804 memcpy(adv_instance->scan_rsp_data,
2805 scan_rsp_data, scan_rsp_len);
2806
2807 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002808 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002809
2810 if (duration == 0)
2811 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2812 else
2813 adv_instance->duration = duration;
2814
2815 BT_DBG("%s for %dMR", hdev->name, instance);
2816
2817 return 0;
2818}
2819
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002820struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002821 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002822{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002823 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002824
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002825 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002826 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002827 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002828 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002829
2830 return NULL;
2831}
2832
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002833void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002834{
2835 struct list_head *p, *n;
2836
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002837 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002838 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002839
2840 list_del(p);
2841 kfree(b);
2842 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002843}
2844
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002845int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002846{
2847 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002848
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002849 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002850 return -EBADF;
2851
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002852 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002853 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002854
Johan Hedberg27f70f32014-07-21 10:50:06 +03002855 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002856 if (!entry)
2857 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002858
2859 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002860 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002861
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002862 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002863
2864 return 0;
2865}
2866
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002867int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002868{
2869 struct bdaddr_list *entry;
2870
Johan Hedberg35f74982014-02-18 17:14:32 +02002871 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002872 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002873 return 0;
2874 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002875
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002876 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002877 if (!entry)
2878 return -ENOENT;
2879
2880 list_del(&entry->list);
2881 kfree(entry);
2882
2883 return 0;
2884}
2885
Andre Guedes15819a72014-02-03 13:56:18 -03002886/* This function requires the caller holds hdev->lock */
2887struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2888 bdaddr_t *addr, u8 addr_type)
2889{
2890 struct hci_conn_params *params;
2891
2892 list_for_each_entry(params, &hdev->le_conn_params, list) {
2893 if (bacmp(&params->addr, addr) == 0 &&
2894 params->addr_type == addr_type) {
2895 return params;
2896 }
2897 }
2898
2899 return NULL;
2900}
2901
2902/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002903struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2904 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002905{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002906 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002907
Johan Hedberg501f8822014-07-04 12:37:26 +03002908 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002909 if (bacmp(&param->addr, addr) == 0 &&
2910 param->addr_type == addr_type)
2911 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002912 }
2913
2914 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002915}
2916
2917/* This function requires the caller holds hdev->lock */
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002918struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2919 bdaddr_t *addr,
2920 u8 addr_type)
2921{
2922 struct hci_conn_params *param;
2923
2924 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2925 if (bacmp(&param->addr, addr) == 0 &&
2926 param->addr_type == addr_type &&
2927 param->explicit_connect)
2928 return param;
2929 }
2930
2931 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2932 if (bacmp(&param->addr, addr) == 0 &&
2933 param->addr_type == addr_type &&
2934 param->explicit_connect)
2935 return param;
2936 }
2937
2938 return NULL;
2939}
2940
2941/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002942struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2943 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002944{
2945 struct hci_conn_params *params;
2946
2947 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002948 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002949 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002950
2951 params = kzalloc(sizeof(*params), GFP_KERNEL);
2952 if (!params) {
2953 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002954 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002955 }
2956
2957 bacpy(&params->addr, addr);
2958 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002959
2960 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002961 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002962
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002963 params->conn_min_interval = hdev->le_conn_min_interval;
2964 params->conn_max_interval = hdev->le_conn_max_interval;
2965 params->conn_latency = hdev->le_conn_latency;
2966 params->supervision_timeout = hdev->le_supv_timeout;
2967 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2968
2969 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2970
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002971 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002972}
2973
Johan Hedbergf6c63242014-08-15 21:06:59 +03002974static void hci_conn_params_free(struct hci_conn_params *params)
2975{
2976 if (params->conn) {
2977 hci_conn_drop(params->conn);
2978 hci_conn_put(params->conn);
2979 }
2980
2981 list_del(&params->action);
2982 list_del(&params->list);
2983 kfree(params);
2984}
2985
Andre Guedes15819a72014-02-03 13:56:18 -03002986/* This function requires the caller holds hdev->lock */
2987void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2988{
2989 struct hci_conn_params *params;
2990
2991 params = hci_conn_params_lookup(hdev, addr, addr_type);
2992 if (!params)
2993 return;
2994
Johan Hedbergf6c63242014-08-15 21:06:59 +03002995 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002996
Johan Hedberg95305ba2014-07-04 12:37:21 +03002997 hci_update_background_scan(hdev);
2998
Andre Guedes15819a72014-02-03 13:56:18 -03002999 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3000}
3001
3002/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003003void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003004{
3005 struct hci_conn_params *params, *tmp;
3006
3007 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003008 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3009 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003010
3011 /* If trying to estabilish one time connection to disabled
3012 * device, leave the params, but mark them as just once.
3013 */
3014 if (params->explicit_connect) {
3015 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016 continue;
3017 }
3018
Andre Guedes15819a72014-02-03 13:56:18 -03003019 list_del(&params->list);
3020 kfree(params);
3021 }
3022
Johan Hedberg55af49a82014-07-02 17:37:26 +03003023 BT_DBG("All LE disabled connection parameters were removed");
3024}
3025
3026/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003027void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003028{
3029 struct hci_conn_params *params, *tmp;
3030
Johan Hedbergf6c63242014-08-15 21:06:59 +03003031 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3032 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003033
Johan Hedberga2f41a82014-07-04 12:37:19 +03003034 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003035
Andre Guedes15819a72014-02-03 13:56:18 -03003036 BT_DBG("All LE connection parameters were removed");
3037}
3038
Marcel Holtmann1904a852015-01-11 13:50:44 -08003039static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003040{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003041 if (status) {
3042 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003043
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003044 hci_dev_lock(hdev);
3045 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3046 hci_dev_unlock(hdev);
3047 return;
3048 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003049}
3050
Marcel Holtmann1904a852015-01-11 13:50:44 -08003051static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3052 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003053{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003054 /* General inquiry access code (GIAC) */
3055 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003056 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003057 int err;
3058
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003059 if (status) {
3060 BT_ERR("Failed to disable LE scanning: status %d", status);
3061 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003062 }
3063
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003064 hdev->discovery.scan_start = 0;
3065
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003066 switch (hdev->discovery.type) {
3067 case DISCOV_TYPE_LE:
3068 hci_dev_lock(hdev);
3069 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3070 hci_dev_unlock(hdev);
3071 break;
3072
3073 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003074 hci_dev_lock(hdev);
3075
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003076 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3077 &hdev->quirks)) {
3078 /* If we were running LE only scan, change discovery
3079 * state. If we were running both LE and BR/EDR inquiry
3080 * simultaneously, and BR/EDR inquiry is already
3081 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003082 * will stop discovery when finished. If we will resolve
3083 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003084 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003085 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3086 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003087 hci_discovery_set_state(hdev,
3088 DISCOVERY_STOPPED);
3089 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003090 struct hci_request req;
3091
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003092 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003093
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003094 hci_req_init(&req, hdev);
3095
3096 memset(&cp, 0, sizeof(cp));
3097 memcpy(&cp.lap, lap, sizeof(cp.lap));
3098 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3099 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3100
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003101 err = hci_req_run(&req, inquiry_complete);
3102 if (err) {
3103 BT_ERR("Inquiry request failed: err %d", err);
3104 hci_discovery_set_state(hdev,
3105 DISCOVERY_STOPPED);
3106 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003107 }
3108
3109 hci_dev_unlock(hdev);
3110 break;
3111 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003112}
3113
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003114static void le_scan_disable_work(struct work_struct *work)
3115{
3116 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003117 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003118 struct hci_request req;
3119 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003120
3121 BT_DBG("%s", hdev->name);
3122
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003123 cancel_delayed_work_sync(&hdev->le_scan_restart);
3124
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003125 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003126
Andre Guedesb1efcc22014-02-26 20:21:40 -03003127 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003128
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003129 err = hci_req_run(&req, le_scan_disable_work_complete);
3130 if (err)
3131 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003132}
3133
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003134static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3135 u16 opcode)
3136{
3137 unsigned long timeout, duration, scan_start, now;
3138
3139 BT_DBG("%s", hdev->name);
3140
3141 if (status) {
3142 BT_ERR("Failed to restart LE scan: status %d", status);
3143 return;
3144 }
3145
3146 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3147 !hdev->discovery.scan_start)
3148 return;
3149
3150 /* When the scan was started, hdev->le_scan_disable has been queued
3151 * after duration from scan_start. During scan restart this job
3152 * has been canceled, and we need to queue it again after proper
3153 * timeout, to make sure that scan does not run indefinitely.
3154 */
3155 duration = hdev->discovery.scan_duration;
3156 scan_start = hdev->discovery.scan_start;
3157 now = jiffies;
3158 if (now - scan_start <= duration) {
3159 int elapsed;
3160
3161 if (now >= scan_start)
3162 elapsed = now - scan_start;
3163 else
3164 elapsed = ULONG_MAX - scan_start + now;
3165
3166 timeout = duration - elapsed;
3167 } else {
3168 timeout = 0;
3169 }
3170 queue_delayed_work(hdev->workqueue,
3171 &hdev->le_scan_disable, timeout);
3172}
3173
3174static void le_scan_restart_work(struct work_struct *work)
3175{
3176 struct hci_dev *hdev = container_of(work, struct hci_dev,
3177 le_scan_restart.work);
3178 struct hci_request req;
3179 struct hci_cp_le_set_scan_enable cp;
3180 int err;
3181
3182 BT_DBG("%s", hdev->name);
3183
3184 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003185 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003186 return;
3187
3188 hci_req_init(&req, hdev);
3189
3190 hci_req_add_le_scan_disable(&req);
3191
3192 memset(&cp, 0, sizeof(cp));
3193 cp.enable = LE_SCAN_ENABLE;
3194 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3195 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3196
3197 err = hci_req_run(&req, le_scan_restart_work_complete);
3198 if (err)
3199 BT_ERR("Restart LE scan request failed: err %d", err);
3200}
3201
Johan Hedberga1f4c312014-02-27 14:05:41 +02003202/* Copy the Identity Address of the controller.
3203 *
3204 * If the controller has a public BD_ADDR, then by default use that one.
3205 * If this is a LE only controller without a public address, default to
3206 * the static random address.
3207 *
3208 * For debugging purposes it is possible to force controllers with a
3209 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003210 *
3211 * In case BR/EDR has been disabled on a dual-mode controller and
3212 * userspace has configured a static address, then that address
3213 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003214 */
3215void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3216 u8 *bdaddr_type)
3217{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003218 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003219 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003220 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003221 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003222 bacpy(bdaddr, &hdev->static_addr);
3223 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3224 } else {
3225 bacpy(bdaddr, &hdev->bdaddr);
3226 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3227 }
3228}
3229
David Herrmann9be0dab2012-04-22 14:39:57 +02003230/* Alloc HCI device */
3231struct hci_dev *hci_alloc_dev(void)
3232{
3233 struct hci_dev *hdev;
3234
Johan Hedberg27f70f32014-07-21 10:50:06 +03003235 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003236 if (!hdev)
3237 return NULL;
3238
David Herrmannb1b813d2012-04-22 14:39:58 +02003239 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3240 hdev->esco_type = (ESCO_HV1);
3241 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003242 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3243 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003244 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003245 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3246 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003247 hdev->adv_instance_cnt = 0;
3248 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003249 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003250
David Herrmannb1b813d2012-04-22 14:39:58 +02003251 hdev->sniff_max_interval = 800;
3252 hdev->sniff_min_interval = 80;
3253
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003254 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003255 hdev->le_adv_min_interval = 0x0800;
3256 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003257 hdev->le_scan_interval = 0x0060;
3258 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003259 hdev->le_conn_min_interval = 0x0028;
3260 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003261 hdev->le_conn_latency = 0x0000;
3262 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003263 hdev->le_def_tx_len = 0x001b;
3264 hdev->le_def_tx_time = 0x0148;
3265 hdev->le_max_tx_len = 0x001b;
3266 hdev->le_max_tx_time = 0x0148;
3267 hdev->le_max_rx_len = 0x001b;
3268 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003269
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003270 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003271 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003272 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3273 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003274
David Herrmannb1b813d2012-04-22 14:39:58 +02003275 mutex_init(&hdev->lock);
3276 mutex_init(&hdev->req_lock);
3277
3278 INIT_LIST_HEAD(&hdev->mgmt_pending);
3279 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003280 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003281 INIT_LIST_HEAD(&hdev->uuids);
3282 INIT_LIST_HEAD(&hdev->link_keys);
3283 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003284 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003285 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003286 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003287 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003288 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003289 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003290 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003291 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003292
3293 INIT_WORK(&hdev->rx_work, hci_rx_work);
3294 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3295 INIT_WORK(&hdev->tx_work, hci_tx_work);
3296 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003297 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003298
David Herrmannb1b813d2012-04-22 14:39:58 +02003299 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3300 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3301 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003302 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003303 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003304
David Herrmannb1b813d2012-04-22 14:39:58 +02003305 skb_queue_head_init(&hdev->rx_q);
3306 skb_queue_head_init(&hdev->cmd_q);
3307 skb_queue_head_init(&hdev->raw_q);
3308
3309 init_waitqueue_head(&hdev->req_wait_q);
3310
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003311 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003312
David Herrmannb1b813d2012-04-22 14:39:58 +02003313 hci_init_sysfs(hdev);
3314 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003315
3316 return hdev;
3317}
3318EXPORT_SYMBOL(hci_alloc_dev);
3319
3320/* Free HCI device */
3321void hci_free_dev(struct hci_dev *hdev)
3322{
David Herrmann9be0dab2012-04-22 14:39:57 +02003323 /* will free via device release */
3324 put_device(&hdev->dev);
3325}
3326EXPORT_SYMBOL(hci_free_dev);
3327
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328/* Register HCI device */
3329int hci_register_dev(struct hci_dev *hdev)
3330{
David Herrmannb1b813d2012-04-22 14:39:58 +02003331 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
Marcel Holtmann74292d52014-07-06 15:50:27 +02003333 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 return -EINVAL;
3335
Mat Martineau08add512011-11-02 16:18:36 -07003336 /* Do not allow HCI_AMP devices to register at index 0,
3337 * so the index can be used as the AMP controller ID.
3338 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003339 switch (hdev->dev_type) {
3340 case HCI_BREDR:
3341 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3342 break;
3343 case HCI_AMP:
3344 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3345 break;
3346 default:
3347 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003349
Sasha Levin3df92b32012-05-27 22:36:56 +02003350 if (id < 0)
3351 return id;
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 sprintf(hdev->name, "hci%d", id);
3354 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003355
3356 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3357
Kees Cookd8537542013-07-03 15:04:57 -07003358 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3359 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003360 if (!hdev->workqueue) {
3361 error = -ENOMEM;
3362 goto err;
3363 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003364
Kees Cookd8537542013-07-03 15:04:57 -07003365 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3366 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003367 if (!hdev->req_workqueue) {
3368 destroy_workqueue(hdev->workqueue);
3369 error = -ENOMEM;
3370 goto err;
3371 }
3372
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003373 if (!IS_ERR_OR_NULL(bt_debugfs))
3374 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3375
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003376 dev_set_name(&hdev->dev, "%s", hdev->name);
3377
3378 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003379 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003380 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003382 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003383 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3384 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003385 if (hdev->rfkill) {
3386 if (rfkill_register(hdev->rfkill) < 0) {
3387 rfkill_destroy(hdev->rfkill);
3388 hdev->rfkill = NULL;
3389 }
3390 }
3391
Johan Hedberg5e130362013-09-13 08:58:17 +03003392 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003393 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003394
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003395 hci_dev_set_flag(hdev, HCI_SETUP);
3396 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003397
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003398 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003399 /* Assume BR/EDR support until proven otherwise (such as
3400 * through reading supported features during init.
3401 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003402 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003403 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003404
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003405 write_lock(&hci_dev_list_lock);
3406 list_add(&hdev->list, &hci_dev_list);
3407 write_unlock(&hci_dev_list_lock);
3408
Marcel Holtmann4a964402014-07-02 19:10:33 +02003409 /* Devices that are marked for raw-only usage are unconfigured
3410 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003411 */
3412 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003413 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003414
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003416 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Johan Hedberg19202572013-01-14 22:33:51 +02003418 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003419
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003421
David Herrmann33ca9542011-10-08 14:58:49 +02003422err_wqueue:
3423 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003424 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003425err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003426 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003427
David Herrmann33ca9542011-10-08 14:58:49 +02003428 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429}
3430EXPORT_SYMBOL(hci_register_dev);
3431
3432/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003433void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003435 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003436
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003437 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003439 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003440
Sasha Levin3df92b32012-05-27 22:36:56 +02003441 id = hdev->id;
3442
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003443 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003445 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446
3447 hci_dev_do_close(hdev);
3448
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003449 cancel_work_sync(&hdev->power_on);
3450
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003451 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003452 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3453 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003454 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003455 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003456 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003457 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003458
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003459 /* mgmt_index_removed should take care of emptying the
3460 * pending list */
3461 BUG_ON(!list_empty(&hdev->mgmt_pending));
3462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 hci_notify(hdev, HCI_DEV_UNREG);
3464
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003465 if (hdev->rfkill) {
3466 rfkill_unregister(hdev->rfkill);
3467 rfkill_destroy(hdev->rfkill);
3468 }
3469
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003470 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003471
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003472 debugfs_remove_recursive(hdev->debugfs);
3473
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003474 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003475 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003476
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003477 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003478 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003479 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003480 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003481 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003482 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003483 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003484 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003485 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003486 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003487 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003488 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003489 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003490
David Herrmanndc946bd2012-01-07 15:47:24 +01003491 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003492
3493 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494}
3495EXPORT_SYMBOL(hci_unregister_dev);
3496
3497/* Suspend HCI device */
3498int hci_suspend_dev(struct hci_dev *hdev)
3499{
3500 hci_notify(hdev, HCI_DEV_SUSPEND);
3501 return 0;
3502}
3503EXPORT_SYMBOL(hci_suspend_dev);
3504
3505/* Resume HCI device */
3506int hci_resume_dev(struct hci_dev *hdev)
3507{
3508 hci_notify(hdev, HCI_DEV_RESUME);
3509 return 0;
3510}
3511EXPORT_SYMBOL(hci_resume_dev);
3512
Marcel Holtmann75e05692014-11-02 08:15:38 +01003513/* Reset HCI device */
3514int hci_reset_dev(struct hci_dev *hdev)
3515{
3516 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3517 struct sk_buff *skb;
3518
3519 skb = bt_skb_alloc(3, GFP_ATOMIC);
3520 if (!skb)
3521 return -ENOMEM;
3522
3523 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3524 memcpy(skb_put(skb, 3), hw_err, 3);
3525
3526 /* Send Hardware Error to upper stack */
3527 return hci_recv_frame(hdev, skb);
3528}
3529EXPORT_SYMBOL(hci_reset_dev);
3530
Marcel Holtmann76bca882009-11-18 00:40:39 +01003531/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003532int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003533{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003534 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003535 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003536 kfree_skb(skb);
3537 return -ENXIO;
3538 }
3539
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003540 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003541 bt_cb(skb)->incoming = 1;
3542
3543 /* Time stamp */
3544 __net_timestamp(skb);
3545
Marcel Holtmann76bca882009-11-18 00:40:39 +01003546 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003547 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003548
Marcel Holtmann76bca882009-11-18 00:40:39 +01003549 return 0;
3550}
3551EXPORT_SYMBOL(hci_recv_frame);
3552
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003553/* Receive diagnostic message from HCI drivers */
3554int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3555{
3556 /* Time stamp */
3557 __net_timestamp(skb);
3558
3559 /* Mark as diagnostic packet and send to monitor */
3560 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3561 hci_send_to_monitor(hdev, skb);
3562
3563 kfree_skb(skb);
3564 return 0;
3565}
3566EXPORT_SYMBOL(hci_recv_diag);
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568/* ---- Interface to upper protocols ---- */
3569
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570int hci_register_cb(struct hci_cb *cb)
3571{
3572 BT_DBG("%p name %s", cb, cb->name);
3573
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003574 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003575 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003576 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
3578 return 0;
3579}
3580EXPORT_SYMBOL(hci_register_cb);
3581
3582int hci_unregister_cb(struct hci_cb *cb)
3583{
3584 BT_DBG("%p name %s", cb, cb->name);
3585
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003586 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003588 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
3590 return 0;
3591}
3592EXPORT_SYMBOL(hci_unregister_cb);
3593
Marcel Holtmann51086992013-10-10 14:54:19 -07003594static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003596 int err;
3597
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003598 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003600 /* Time stamp */
3601 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003603 /* Send copy to monitor */
3604 hci_send_to_monitor(hdev, skb);
3605
3606 if (atomic_read(&hdev->promisc)) {
3607 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003608 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 }
3610
3611 /* Get rid of skb owner, prior to sending to the driver. */
3612 skb_orphan(skb);
3613
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003614 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3615 kfree_skb(skb);
3616 return;
3617 }
3618
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003619 err = hdev->send(hdev, skb);
3620 if (err < 0) {
3621 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3622 kfree_skb(skb);
3623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624}
3625
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003626/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003627int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3628 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003629{
3630 struct sk_buff *skb;
3631
3632 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3633
3634 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3635 if (!skb) {
3636 BT_ERR("%s no memory for command", hdev->name);
3637 return -ENOMEM;
3638 }
3639
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003640 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003641 * single-command requests.
3642 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03003643 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003644
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003646 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647
3648 return 0;
3649}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650
3651/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003652void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653{
3654 struct hci_command_hdr *hdr;
3655
3656 if (!hdev->sent_cmd)
3657 return NULL;
3658
3659 hdr = (void *) hdev->sent_cmd->data;
3660
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003661 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 return NULL;
3663
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003664 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665
3666 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3667}
3668
Loic Poulainfbef1682015-09-29 15:05:44 +02003669/* Send HCI command and wait for command commplete event */
3670struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3671 const void *param, u32 timeout)
3672{
3673 struct sk_buff *skb;
3674
3675 if (!test_bit(HCI_UP, &hdev->flags))
3676 return ERR_PTR(-ENETDOWN);
3677
3678 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3679
3680 hci_req_lock(hdev);
3681 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3682 hci_req_unlock(hdev);
3683
3684 return skb;
3685}
3686EXPORT_SYMBOL(hci_cmd_sync);
3687
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688/* Send ACL data */
3689static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3690{
3691 struct hci_acl_hdr *hdr;
3692 int len = skb->len;
3693
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003694 skb_push(skb, HCI_ACL_HDR_SIZE);
3695 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003696 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003697 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3698 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699}
3700
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003701static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003702 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003704 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 struct hci_dev *hdev = conn->hdev;
3706 struct sk_buff *list;
3707
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003708 skb->len = skb_headlen(skb);
3709 skb->data_len = 0;
3710
3711 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003712
3713 switch (hdev->dev_type) {
3714 case HCI_BREDR:
3715 hci_add_acl_hdr(skb, conn->handle, flags);
3716 break;
3717 case HCI_AMP:
3718 hci_add_acl_hdr(skb, chan->handle, flags);
3719 break;
3720 default:
3721 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3722 return;
3723 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003724
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003725 list = skb_shinfo(skb)->frag_list;
3726 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 /* Non fragmented */
3728 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3729
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003730 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 } else {
3732 /* Fragmented */
3733 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3734
3735 skb_shinfo(skb)->frag_list = NULL;
3736
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003737 /* Queue all fragments atomically. We need to use spin_lock_bh
3738 * here because of 6LoWPAN links, as there this function is
3739 * called from softirq and using normal spin lock could cause
3740 * deadlocks.
3741 */
3742 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003744 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003745
3746 flags &= ~ACL_START;
3747 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 do {
3749 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003750
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003751 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003752 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
3754 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3755
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003756 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 } while (list);
3758
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003759 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003761}
3762
3763void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3764{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003765 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003766
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003767 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003768
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003769 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003771 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773
3774/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003775void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776{
3777 struct hci_dev *hdev = conn->hdev;
3778 struct hci_sco_hdr hdr;
3779
3780 BT_DBG("%s len %d", hdev->name, skb->len);
3781
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003782 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 hdr.dlen = skb->len;
3784
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003785 skb_push(skb, HCI_SCO_HDR_SIZE);
3786 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003787 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003789 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003790
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003792 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
3795/* ---- HCI TX task (outgoing data) ---- */
3796
3797/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003798static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3799 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800{
3801 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003802 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003803 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003805 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003807
3808 rcu_read_lock();
3809
3810 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003811 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003813
3814 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3815 continue;
3816
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 num++;
3818
3819 if (c->sent < min) {
3820 min = c->sent;
3821 conn = c;
3822 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003823
3824 if (hci_conn_num(hdev, type) == num)
3825 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 }
3827
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003828 rcu_read_unlock();
3829
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003831 int cnt, q;
3832
3833 switch (conn->type) {
3834 case ACL_LINK:
3835 cnt = hdev->acl_cnt;
3836 break;
3837 case SCO_LINK:
3838 case ESCO_LINK:
3839 cnt = hdev->sco_cnt;
3840 break;
3841 case LE_LINK:
3842 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3843 break;
3844 default:
3845 cnt = 0;
3846 BT_ERR("Unknown link type");
3847 }
3848
3849 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 *quote = q ? q : 1;
3851 } else
3852 *quote = 0;
3853
3854 BT_DBG("conn %p quote %d", conn, *quote);
3855 return conn;
3856}
3857
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003858static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859{
3860 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003861 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862
Ville Tervobae1f5d92011-02-10 22:38:53 -03003863 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003865 rcu_read_lock();
3866
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003868 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003869 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003870 BT_ERR("%s killing stalled connection %pMR",
3871 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003872 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 }
3874 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003875
3876 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877}
3878
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003879static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3880 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003881{
3882 struct hci_conn_hash *h = &hdev->conn_hash;
3883 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003884 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003885 struct hci_conn *conn;
3886 int cnt, q, conn_num = 0;
3887
3888 BT_DBG("%s", hdev->name);
3889
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003890 rcu_read_lock();
3891
3892 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003893 struct hci_chan *tmp;
3894
3895 if (conn->type != type)
3896 continue;
3897
3898 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3899 continue;
3900
3901 conn_num++;
3902
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003903 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003904 struct sk_buff *skb;
3905
3906 if (skb_queue_empty(&tmp->data_q))
3907 continue;
3908
3909 skb = skb_peek(&tmp->data_q);
3910 if (skb->priority < cur_prio)
3911 continue;
3912
3913 if (skb->priority > cur_prio) {
3914 num = 0;
3915 min = ~0;
3916 cur_prio = skb->priority;
3917 }
3918
3919 num++;
3920
3921 if (conn->sent < min) {
3922 min = conn->sent;
3923 chan = tmp;
3924 }
3925 }
3926
3927 if (hci_conn_num(hdev, type) == conn_num)
3928 break;
3929 }
3930
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003931 rcu_read_unlock();
3932
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003933 if (!chan)
3934 return NULL;
3935
3936 switch (chan->conn->type) {
3937 case ACL_LINK:
3938 cnt = hdev->acl_cnt;
3939 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003940 case AMP_LINK:
3941 cnt = hdev->block_cnt;
3942 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003943 case SCO_LINK:
3944 case ESCO_LINK:
3945 cnt = hdev->sco_cnt;
3946 break;
3947 case LE_LINK:
3948 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3949 break;
3950 default:
3951 cnt = 0;
3952 BT_ERR("Unknown link type");
3953 }
3954
3955 q = cnt / num;
3956 *quote = q ? q : 1;
3957 BT_DBG("chan %p quote %d", chan, *quote);
3958 return chan;
3959}
3960
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003961static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3962{
3963 struct hci_conn_hash *h = &hdev->conn_hash;
3964 struct hci_conn *conn;
3965 int num = 0;
3966
3967 BT_DBG("%s", hdev->name);
3968
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003969 rcu_read_lock();
3970
3971 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003972 struct hci_chan *chan;
3973
3974 if (conn->type != type)
3975 continue;
3976
3977 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3978 continue;
3979
3980 num++;
3981
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003982 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003983 struct sk_buff *skb;
3984
3985 if (chan->sent) {
3986 chan->sent = 0;
3987 continue;
3988 }
3989
3990 if (skb_queue_empty(&chan->data_q))
3991 continue;
3992
3993 skb = skb_peek(&chan->data_q);
3994 if (skb->priority >= HCI_PRIO_MAX - 1)
3995 continue;
3996
3997 skb->priority = HCI_PRIO_MAX - 1;
3998
3999 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004000 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004001 }
4002
4003 if (hci_conn_num(hdev, type) == num)
4004 break;
4005 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004006
4007 rcu_read_unlock();
4008
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004009}
4010
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004011static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4012{
4013 /* Calculate count of blocks used by this packet */
4014 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4015}
4016
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004017static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004019 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 /* ACL tx timeout must be longer than maximum
4021 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004022 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004023 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004024 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004026}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004028static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004029{
4030 unsigned int cnt = hdev->acl_cnt;
4031 struct hci_chan *chan;
4032 struct sk_buff *skb;
4033 int quote;
4034
4035 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004036
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004037 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004038 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004039 u32 priority = (skb_peek(&chan->data_q))->priority;
4040 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004041 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004042 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004043
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004044 /* Stop if priority has changed */
4045 if (skb->priority < priority)
4046 break;
4047
4048 skb = skb_dequeue(&chan->data_q);
4049
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004050 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004051 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004052
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004053 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 hdev->acl_last_tx = jiffies;
4055
4056 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004057 chan->sent++;
4058 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 }
4060 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004061
4062 if (cnt != hdev->acl_cnt)
4063 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064}
4065
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004066static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004067{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004068 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004069 struct hci_chan *chan;
4070 struct sk_buff *skb;
4071 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004072 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004073
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004074 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004075
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004076 BT_DBG("%s", hdev->name);
4077
4078 if (hdev->dev_type == HCI_AMP)
4079 type = AMP_LINK;
4080 else
4081 type = ACL_LINK;
4082
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004083 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004084 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004085 u32 priority = (skb_peek(&chan->data_q))->priority;
4086 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4087 int blocks;
4088
4089 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004090 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004091
4092 /* Stop if priority has changed */
4093 if (skb->priority < priority)
4094 break;
4095
4096 skb = skb_dequeue(&chan->data_q);
4097
4098 blocks = __get_blocks(hdev, skb);
4099 if (blocks > hdev->block_cnt)
4100 return;
4101
4102 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004103 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004104
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004105 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004106 hdev->acl_last_tx = jiffies;
4107
4108 hdev->block_cnt -= blocks;
4109 quote -= blocks;
4110
4111 chan->sent += blocks;
4112 chan->conn->sent += blocks;
4113 }
4114 }
4115
4116 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004117 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004118}
4119
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004120static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004121{
4122 BT_DBG("%s", hdev->name);
4123
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004124 /* No ACL link over BR/EDR controller */
4125 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4126 return;
4127
4128 /* No AMP link over AMP controller */
4129 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004130 return;
4131
4132 switch (hdev->flow_ctl_mode) {
4133 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4134 hci_sched_acl_pkt(hdev);
4135 break;
4136
4137 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4138 hci_sched_acl_blk(hdev);
4139 break;
4140 }
4141}
4142
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004144static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145{
4146 struct hci_conn *conn;
4147 struct sk_buff *skb;
4148 int quote;
4149
4150 BT_DBG("%s", hdev->name);
4151
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004152 if (!hci_conn_num(hdev, SCO_LINK))
4153 return;
4154
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4156 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4157 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004158 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159
4160 conn->sent++;
4161 if (conn->sent == ~0)
4162 conn->sent = 0;
4163 }
4164 }
4165}
4166
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004167static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004168{
4169 struct hci_conn *conn;
4170 struct sk_buff *skb;
4171 int quote;
4172
4173 BT_DBG("%s", hdev->name);
4174
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004175 if (!hci_conn_num(hdev, ESCO_LINK))
4176 return;
4177
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004178 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4179 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004180 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4181 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004182 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004183
4184 conn->sent++;
4185 if (conn->sent == ~0)
4186 conn->sent = 0;
4187 }
4188 }
4189}
4190
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004191static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004192{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004193 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004194 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004195 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004196
4197 BT_DBG("%s", hdev->name);
4198
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004199 if (!hci_conn_num(hdev, LE_LINK))
4200 return;
4201
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004202 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004203 /* LE tx timeout must be longer than maximum
4204 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004205 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004206 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004207 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004208 }
4209
4210 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004211 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004212 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004213 u32 priority = (skb_peek(&chan->data_q))->priority;
4214 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004215 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004216 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004217
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004218 /* Stop if priority has changed */
4219 if (skb->priority < priority)
4220 break;
4221
4222 skb = skb_dequeue(&chan->data_q);
4223
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004224 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004225 hdev->le_last_tx = jiffies;
4226
4227 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004228 chan->sent++;
4229 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004230 }
4231 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004232
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004233 if (hdev->le_pkts)
4234 hdev->le_cnt = cnt;
4235 else
4236 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004237
4238 if (cnt != tmp)
4239 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004240}
4241
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004242static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004244 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 struct sk_buff *skb;
4246
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004247 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004248 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004250 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004251 /* Schedule queues and send stuff to HCI driver */
4252 hci_sched_acl(hdev);
4253 hci_sched_sco(hdev);
4254 hci_sched_esco(hdev);
4255 hci_sched_le(hdev);
4256 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004257
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 /* Send next queued raw (unknown type) packet */
4259 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004260 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261}
4262
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004263/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264
4265/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004266static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267{
4268 struct hci_acl_hdr *hdr = (void *) skb->data;
4269 struct hci_conn *conn;
4270 __u16 handle, flags;
4271
4272 skb_pull(skb, HCI_ACL_HDR_SIZE);
4273
4274 handle = __le16_to_cpu(hdr->handle);
4275 flags = hci_flags(handle);
4276 handle = hci_handle(handle);
4277
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004278 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004279 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280
4281 hdev->stat.acl_rx++;
4282
4283 hci_dev_lock(hdev);
4284 conn = hci_conn_hash_lookup_handle(hdev, handle);
4285 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004286
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004288 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004289
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004291 l2cap_recv_acldata(conn, skb, flags);
4292 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004294 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004295 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 }
4297
4298 kfree_skb(skb);
4299}
4300
4301/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004302static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303{
4304 struct hci_sco_hdr *hdr = (void *) skb->data;
4305 struct hci_conn *conn;
4306 __u16 handle;
4307
4308 skb_pull(skb, HCI_SCO_HDR_SIZE);
4309
4310 handle = __le16_to_cpu(hdr->handle);
4311
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004312 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313
4314 hdev->stat.sco_rx++;
4315
4316 hci_dev_lock(hdev);
4317 conn = hci_conn_hash_lookup_handle(hdev, handle);
4318 hci_dev_unlock(hdev);
4319
4320 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004322 sco_recv_scodata(conn, skb);
4323 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004325 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004326 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 }
4328
4329 kfree_skb(skb);
4330}
4331
Johan Hedberg9238f362013-03-05 20:37:48 +02004332static bool hci_req_is_complete(struct hci_dev *hdev)
4333{
4334 struct sk_buff *skb;
4335
4336 skb = skb_peek(&hdev->cmd_q);
4337 if (!skb)
4338 return true;
4339
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004340 return bt_cb(skb)->req.start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004341}
4342
Johan Hedberg42c6b122013-03-05 20:37:49 +02004343static void hci_resend_last(struct hci_dev *hdev)
4344{
4345 struct hci_command_hdr *sent;
4346 struct sk_buff *skb;
4347 u16 opcode;
4348
4349 if (!hdev->sent_cmd)
4350 return;
4351
4352 sent = (void *) hdev->sent_cmd->data;
4353 opcode = __le16_to_cpu(sent->opcode);
4354 if (opcode == HCI_OP_RESET)
4355 return;
4356
4357 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4358 if (!skb)
4359 return;
4360
4361 skb_queue_head(&hdev->cmd_q, skb);
4362 queue_work(hdev->workqueue, &hdev->cmd_work);
4363}
4364
Johan Hedberge62144872015-04-02 13:41:08 +03004365void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4366 hci_req_complete_t *req_complete,
4367 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004368{
Johan Hedberg9238f362013-03-05 20:37:48 +02004369 struct sk_buff *skb;
4370 unsigned long flags;
4371
4372 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4373
Johan Hedberg42c6b122013-03-05 20:37:49 +02004374 /* If the completed command doesn't match the last one that was
4375 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004376 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004377 if (!hci_sent_cmd_data(hdev, opcode)) {
4378 /* Some CSR based controllers generate a spontaneous
4379 * reset complete event during init and any pending
4380 * command will never be completed. In such a case we
4381 * need to resend whatever was the last sent
4382 * command.
4383 */
4384 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4385 hci_resend_last(hdev);
4386
Johan Hedberg9238f362013-03-05 20:37:48 +02004387 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004388 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004389
4390 /* If the command succeeded and there's still more commands in
4391 * this request the request is not yet complete.
4392 */
4393 if (!status && !hci_req_is_complete(hdev))
4394 return;
4395
4396 /* If this was the last command in a request the complete
4397 * callback would be found in hdev->sent_cmd instead of the
4398 * command queue (hdev->cmd_q).
4399 */
Johan Hedberge62144872015-04-02 13:41:08 +03004400 if (bt_cb(hdev->sent_cmd)->req.complete) {
4401 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4402 return;
4403 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004404
Johan Hedberge62144872015-04-02 13:41:08 +03004405 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4406 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4407 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004408 }
4409
4410 /* Remove all pending commands belonging to this request */
4411 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4412 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004413 if (bt_cb(skb)->req.start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004414 __skb_queue_head(&hdev->cmd_q, skb);
4415 break;
4416 }
4417
Johan Hedberge62144872015-04-02 13:41:08 +03004418 *req_complete = bt_cb(skb)->req.complete;
4419 *req_complete_skb = bt_cb(skb)->req.complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004420 kfree_skb(skb);
4421 }
4422 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004423}
4424
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004425static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004427 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428 struct sk_buff *skb;
4429
4430 BT_DBG("%s", hdev->name);
4431
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004433 /* Send copy to monitor */
4434 hci_send_to_monitor(hdev, skb);
4435
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 if (atomic_read(&hdev->promisc)) {
4437 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004438 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 }
4440
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004441 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 kfree_skb(skb);
4443 continue;
4444 }
4445
4446 if (test_bit(HCI_INIT, &hdev->flags)) {
4447 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004448 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 case HCI_ACLDATA_PKT:
4450 case HCI_SCODATA_PKT:
4451 kfree_skb(skb);
4452 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 }
4455
4456 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004457 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004459 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 hci_event_packet(hdev, skb);
4461 break;
4462
4463 case HCI_ACLDATA_PKT:
4464 BT_DBG("%s ACL data packet", hdev->name);
4465 hci_acldata_packet(hdev, skb);
4466 break;
4467
4468 case HCI_SCODATA_PKT:
4469 BT_DBG("%s SCO data packet", hdev->name);
4470 hci_scodata_packet(hdev, skb);
4471 break;
4472
4473 default:
4474 kfree_skb(skb);
4475 break;
4476 }
4477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478}
4479
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004480static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004482 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 struct sk_buff *skb;
4484
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004485 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4486 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004489 if (atomic_read(&hdev->cmd_cnt)) {
4490 skb = skb_dequeue(&hdev->cmd_q);
4491 if (!skb)
4492 return;
4493
Wei Yongjun7585b972009-02-25 18:29:52 +08004494 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004496 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004497 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004499 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004500 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004501 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004502 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004503 schedule_delayed_work(&hdev->cmd_timer,
4504 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 } else {
4506 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004507 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 }
4509 }
4510}