blob: 208aab09d1bfc025d9d47dd83b1f3f774ecb10ca [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Guodong Xu90027962015-05-08 13:55:08 +080042#include "led.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020043#include "smp.h"
44
Marcel Holtmannb78752c2010-08-08 23:06:53 -040045static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020046static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020047static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020055DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Sasha Levin3df92b32012-05-27 22:36:56 +020057/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
Marcel Holtmann899de762014-07-11 05:51:58 +020060/* ----- HCI requests ----- */
61
62#define HCI_REQ_DONE 0
63#define HCI_REQ_PEND 1
64#define HCI_REQ_CANCELED 2
65
66#define hci_req_lock(d) mutex_lock(&d->req_lock)
67#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070069/* ---- HCI debugfs entries ---- */
70
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070071static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
72 size_t count, loff_t *ppos)
73{
74 struct hci_dev *hdev = file->private_data;
75 char buf[3];
76
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070077 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070078 buf[1] = '\n';
79 buf[2] = '\0';
80 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
81}
82
83static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
84 size_t count, loff_t *ppos)
85{
86 struct hci_dev *hdev = file->private_data;
87 struct sk_buff *skb;
88 char buf[32];
89 size_t buf_size = min(count, (sizeof(buf)-1));
90 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070091
92 if (!test_bit(HCI_UP, &hdev->flags))
93 return -ENETDOWN;
94
95 if (copy_from_user(buf, user_buf, buf_size))
96 return -EFAULT;
97
98 buf[buf_size] = '\0';
99 if (strtobool(buf, &enable))
100 return -EINVAL;
101
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700102 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700103 return -EALREADY;
104
105 hci_req_lock(hdev);
106 if (enable)
107 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
108 HCI_CMD_TIMEOUT);
109 else
110 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
111 HCI_CMD_TIMEOUT);
112 hci_req_unlock(hdev);
113
114 if (IS_ERR(skb))
115 return PTR_ERR(skb);
116
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700117 kfree_skb(skb);
118
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700119 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200131static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
132 size_t count, loff_t *ppos)
133{
134 struct hci_dev *hdev = file->private_data;
135 char buf[3];
136
137 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
138 buf[1] = '\n';
139 buf[2] = '\0';
140 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
141}
142
143static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
144 size_t count, loff_t *ppos)
145{
146 struct hci_dev *hdev = file->private_data;
147 char buf[32];
148 size_t buf_size = min(count, (sizeof(buf)-1));
149 bool enable;
150 int err;
151
152 if (copy_from_user(buf, user_buf, buf_size))
153 return -EFAULT;
154
155 buf[buf_size] = '\0';
156 if (strtobool(buf, &enable))
157 return -EINVAL;
158
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200159 /* When the diagnostic flags are not persistent and the transport
160 * is not active, then there is no need for the vendor callback.
161 *
162 * Instead just store the desired value. If needed the setting
163 * will be programmed when the controller gets powered on.
164 */
165 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
166 !test_bit(HCI_RUNNING, &hdev->flags))
167 goto done;
168
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200169 hci_req_lock(hdev);
170 err = hdev->set_diag(hdev, enable);
171 hci_req_unlock(hdev);
172
173 if (err < 0)
174 return err;
175
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200176done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200177 if (enable)
178 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
179 else
180 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
181
182 return count;
183}
184
185static const struct file_operations vendor_diag_fops = {
186 .open = simple_open,
187 .read = vendor_diag_read,
188 .write = vendor_diag_write,
189 .llseek = default_llseek,
190};
191
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200192static void hci_debugfs_create_basic(struct hci_dev *hdev)
193{
194 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
195 &dut_mode_fops);
196
197 if (hdev->set_diag)
198 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
199 &vendor_diag_fops);
200}
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/* ---- HCI requests ---- */
203
Johan Hedbergf60cb302015-04-02 13:41:09 +0300204static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
205 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200207 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 if (hdev->req_status == HCI_REQ_PEND) {
210 hdev->req_result = result;
211 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300212 if (skb)
213 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 wake_up_interruptible(&hdev->req_wait_q);
215 }
216}
217
218static void hci_req_cancel(struct hci_dev *hdev, int err)
219{
220 BT_DBG("%s err 0x%2.2x", hdev->name, err);
221
222 if (hdev->req_status == HCI_REQ_PEND) {
223 hdev->req_result = err;
224 hdev->req_status = HCI_REQ_CANCELED;
225 wake_up_interruptible(&hdev->req_wait_q);
226 }
227}
228
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300229struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300230 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300231{
232 DECLARE_WAITQUEUE(wait, current);
233 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300234 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300235 int err = 0;
236
237 BT_DBG("%s", hdev->name);
238
239 hci_req_init(&req, hdev);
240
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300241 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300242
243 hdev->req_status = HCI_REQ_PEND;
244
Johan Hedberg75e84b72013-04-02 13:35:04 +0300245 add_wait_queue(&hdev->req_wait_q, &wait);
246 set_current_state(TASK_INTERRUPTIBLE);
247
Johan Hedbergf60cb302015-04-02 13:41:09 +0300248 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900249 if (err < 0) {
250 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200251 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900252 return ERR_PTR(err);
253 }
254
Johan Hedberg75e84b72013-04-02 13:35:04 +0300255 schedule_timeout(timeout);
256
257 remove_wait_queue(&hdev->req_wait_q, &wait);
258
259 if (signal_pending(current))
260 return ERR_PTR(-EINTR);
261
262 switch (hdev->req_status) {
263 case HCI_REQ_DONE:
264 err = -bt_to_errno(hdev->req_result);
265 break;
266
267 case HCI_REQ_CANCELED:
268 err = -hdev->req_result;
269 break;
270
271 default:
272 err = -ETIMEDOUT;
273 break;
274 }
275
276 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300277 skb = hdev->req_skb;
278 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300279
280 BT_DBG("%s end: err %d", hdev->name, err);
281
Johan Hedbergf60cb302015-04-02 13:41:09 +0300282 if (err < 0) {
283 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300284 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300285 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300286
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300287 if (!skb)
288 return ERR_PTR(-ENODATA);
289
290 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300291}
292EXPORT_SYMBOL(__hci_cmd_sync_ev);
293
294struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300295 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300296{
297 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300298}
299EXPORT_SYMBOL(__hci_cmd_sync);
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200302static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303 void (*func)(struct hci_request *req,
304 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200305 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 DECLARE_WAITQUEUE(wait, current);
309 int err = 0;
310
311 BT_DBG("%s start", hdev->name);
312
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_init(&req, hdev);
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 hdev->req_status = HCI_REQ_PEND;
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200318
Chan-yeol Park039fada2014-10-31 14:23:06 +0900319 add_wait_queue(&hdev->req_wait_q, &wait);
320 set_current_state(TASK_INTERRUPTIBLE);
321
Johan Hedbergf60cb302015-04-02 13:41:09 +0300322 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200323 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200324 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300325
Chan-yeol Park039fada2014-10-31 14:23:06 +0900326 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200327 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900328
Andre Guedes920c8302013-03-08 11:20:15 -0300329 /* ENODATA means the HCI request command queue is empty.
330 * This can happen when a request with conditionals doesn't
331 * trigger any commands to be sent. This is normal behavior
332 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 */
Andre Guedes920c8302013-03-08 11:20:15 -0300334 if (err == -ENODATA)
335 return 0;
336
337 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200338 }
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 schedule_timeout(timeout);
341
342 remove_wait_queue(&hdev->req_wait_q, &wait);
343
344 if (signal_pending(current))
345 return -EINTR;
346
347 switch (hdev->req_status) {
348 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700349 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 break;
351
352 case HCI_REQ_CANCELED:
353 err = -hdev->req_result;
354 break;
355
356 default:
357 err = -ETIMEDOUT;
358 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Johan Hedberga5040ef2011-01-10 13:28:59 +0200361 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 BT_DBG("%s end: err %d", hdev->name, err);
364
365 return err;
366}
367
Johan Hedberg01178cd2013-03-05 20:37:41 +0200368static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 void (*req)(struct hci_request *req,
370 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200371 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
373 int ret;
374
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200375 if (!test_bit(HCI_UP, &hdev->flags))
376 return -ENETDOWN;
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /* Serialize all requests */
379 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200380 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 hci_req_unlock(hdev);
382
383 return ret;
384}
385
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
390 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 set_bit(HCI_RESET, &req->hdev->flags);
392 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200402 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200404
405 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
408
Johan Hedberg0af801b2015-02-17 15:05:21 +0200409static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200410{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200411 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200412
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200413 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200414 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300415
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700416 /* Read Local Supported Commands */
417 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
418
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300419 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300421
422 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200423 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700424
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700425 /* Read Flow Control Mode */
426 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
427
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700428 /* Read Location Data */
429 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200430}
431
Johan Hedberg0af801b2015-02-17 15:05:21 +0200432static void amp_init2(struct hci_request *req)
433{
434 /* Read Local Supported Features. Not all AMP controllers
435 * support this so it's placed conditionally in the second
436 * stage init.
437 */
438 if (req->hdev->commands[14] & 0x20)
439 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
440}
441
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200443{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200445
446 BT_DBG("%s %ld", hdev->name, opt);
447
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300448 /* Reset */
449 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300451
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200452 switch (hdev->dev_type) {
453 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200455 break;
456
457 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200458 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200459 break;
460
461 default:
462 BT_ERR("Unknown device type %d", hdev->dev_type);
463 break;
464 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200465}
466
Johan Hedberg42c6b122013-03-05 20:37:49 +0200467static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 __le16 param;
470 __u8 flt_type;
471
472 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474
475 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477
478 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200479 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480
481 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200482 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200483
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700484 /* Read Number of Supported IAC */
485 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
486
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700487 /* Read Current IAC LAP */
488 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
489
Johan Hedberg2177bab2013-03-05 20:37:43 +0200490 /* Clear Event Filters */
491 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493
494 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700495 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497}
498
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300501 struct hci_dev *hdev = req->hdev;
502
Johan Hedberg2177bab2013-03-05 20:37:43 +0200503 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505
506 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800509 /* Read LE Supported States */
510 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
511
Johan Hedbergc73eee92013-04-19 18:35:21 +0300512 /* LE-only controllers have LE implicitly enabled */
513 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700514 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515}
516
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200519 struct hci_dev *hdev = req->hdev;
520
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521 /* The second byte is 0xff instead of 0x9f (two reserved bits
522 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
523 * command otherwise.
524 */
525 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
526
527 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
528 * any event mask for pre 1.2 devices.
529 */
530 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
531 return;
532
533 if (lmp_bredr_capable(hdev)) {
534 events[4] |= 0x01; /* Flow Specification Complete */
535 events[4] |= 0x02; /* Inquiry Result with RSSI */
536 events[4] |= 0x04; /* Read Remote Extended Features Complete */
537 events[5] |= 0x08; /* Synchronous Connection Complete */
538 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700539 } else {
540 /* Use a different default for LE-only devices */
541 memset(events, 0, sizeof(events));
542 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700543 events[1] |= 0x08; /* Read Remote Version Information Complete */
544 events[1] |= 0x20; /* Command Complete */
545 events[1] |= 0x40; /* Command Status */
546 events[1] |= 0x80; /* Hardware Error */
547 events[2] |= 0x04; /* Number of Completed Packets */
548 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200549
550 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
551 events[0] |= 0x80; /* Encryption Change */
552 events[5] |= 0x80; /* Encryption Key Refresh Complete */
553 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554 }
555
556 if (lmp_inq_rssi_capable(hdev))
557 events[4] |= 0x02; /* Inquiry Result with RSSI */
558
559 if (lmp_sniffsubr_capable(hdev))
560 events[5] |= 0x20; /* Sniff Subrating */
561
562 if (lmp_pause_enc_capable(hdev))
563 events[5] |= 0x80; /* Encryption Key Refresh Complete */
564
565 if (lmp_ext_inq_capable(hdev))
566 events[5] |= 0x40; /* Extended Inquiry Result */
567
568 if (lmp_no_flush_capable(hdev))
569 events[7] |= 0x01; /* Enhanced Flush Complete */
570
571 if (lmp_lsto_capable(hdev))
572 events[6] |= 0x80; /* Link Supervision Timeout Changed */
573
574 if (lmp_ssp_capable(hdev)) {
575 events[6] |= 0x01; /* IO Capability Request */
576 events[6] |= 0x02; /* IO Capability Response */
577 events[6] |= 0x04; /* User Confirmation Request */
578 events[6] |= 0x08; /* User Passkey Request */
579 events[6] |= 0x10; /* Remote OOB Data Request */
580 events[6] |= 0x20; /* Simple Pairing Complete */
581 events[7] |= 0x04; /* User Passkey Notification */
582 events[7] |= 0x08; /* Keypress Notification */
583 events[7] |= 0x10; /* Remote Host Supported
584 * Features Notification
585 */
586 }
587
588 if (lmp_le_capable(hdev))
589 events[7] |= 0x20; /* LE Meta-Event */
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592}
593
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 struct hci_dev *hdev = req->hdev;
597
Johan Hedberg0af801b2015-02-17 15:05:21 +0200598 if (hdev->dev_type == HCI_AMP)
599 return amp_init2(req);
600
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200602 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300603 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700604 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200605
606 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100609 /* All Bluetooth 1.2 and later controllers should support the
610 * HCI command for reading the local supported commands.
611 *
612 * Unfortunately some controllers indicate Bluetooth 1.2 support,
613 * but do not have support for this command. If that is the case,
614 * the driver can quirk the behavior and skip reading the local
615 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300616 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100617 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
618 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200620
621 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700622 /* When SSP is available, then the host features page
623 * should also be available as well. However some
624 * controllers list the max_page as 0 as long as SSP
625 * has not been enabled. To achieve proper debugging
626 * output, force the minimum max_page to 1 at least.
627 */
628 hdev->max_page = 0x01;
629
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700630 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200631 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800632
Johan Hedberg42c6b122013-03-05 20:37:49 +0200633 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
634 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635 } else {
636 struct hci_cp_write_eir cp;
637
638 memset(hdev->eir, 0, sizeof(hdev->eir));
639 memset(&cp, 0, sizeof(cp));
640
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200642 }
643 }
644
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800645 if (lmp_inq_rssi_capable(hdev) ||
646 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800647 u8 mode;
648
649 /* If Extended Inquiry Result events are supported, then
650 * they are clearly preferred over Inquiry Result with RSSI
651 * events.
652 */
653 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
654
655 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
656 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657
658 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660
661 if (lmp_ext_feat_capable(hdev)) {
662 struct hci_cp_read_local_ext_features cp;
663
664 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
666 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200667 }
668
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700669 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200670 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
672 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200673 }
674}
675
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 struct hci_cp_write_def_link_policy cp;
680 u16 link_policy = 0;
681
682 if (lmp_rswitch_capable(hdev))
683 link_policy |= HCI_LP_RSWITCH;
684 if (lmp_hold_capable(hdev))
685 link_policy |= HCI_LP_HOLD;
686 if (lmp_sniff_capable(hdev))
687 link_policy |= HCI_LP_SNIFF;
688 if (lmp_park_capable(hdev))
689 link_policy |= HCI_LP_PARK;
690
691 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200692 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200693}
694
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 struct hci_cp_write_le_host_supported cp;
699
Johan Hedbergc73eee92013-04-19 18:35:21 +0300700 /* LE-only devices do not support explicit enablement */
701 if (!lmp_bredr_capable(hdev))
702 return;
703
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704 memset(&cp, 0, sizeof(cp));
705
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700706 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200707 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200708 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709 }
710
711 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200712 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
713 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714}
715
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300716static void hci_set_event_mask_page_2(struct hci_request *req)
717{
718 struct hci_dev *hdev = req->hdev;
719 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
720
721 /* If Connectionless Slave Broadcast master role is supported
722 * enable all necessary events for it.
723 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800724 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300725 events[1] |= 0x40; /* Triggered Clock Capture */
726 events[1] |= 0x80; /* Synchronization Train Complete */
727 events[2] |= 0x10; /* Slave Page Response Timeout */
728 events[2] |= 0x20; /* CSB Channel Map Change */
729 }
730
731 /* If Connectionless Slave Broadcast slave role is supported
732 * enable all necessary events for it.
733 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800734 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300735 events[2] |= 0x01; /* Synchronization Train Received */
736 events[2] |= 0x02; /* CSB Receive */
737 events[2] |= 0x04; /* CSB Timeout */
738 events[2] |= 0x08; /* Truncated Page Complete */
739 }
740
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800741 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200742 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800743 events[2] |= 0x80;
744
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300745 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200749{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300751 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200753 hci_setup_event_mask(req);
754
Johan Hedberge81be902015-08-30 21:47:20 +0300755 if (hdev->commands[6] & 0x20 &&
756 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800757 struct hci_cp_read_stored_link_key cp;
758
759 bacpy(&cp.bdaddr, BDADDR_ANY);
760 cp.read_all = 0x01;
761 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
762 }
763
Johan Hedberg2177bab2013-03-05 20:37:43 +0200764 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200765 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200766
Marcel Holtmann417287d2014-12-11 20:21:54 +0100767 if (hdev->commands[8] & 0x01)
768 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
769
770 /* Some older Broadcom based Bluetooth 1.2 controllers do not
771 * support the Read Page Scan Type command. Check support for
772 * this command in the bit mask of supported commands.
773 */
774 if (hdev->commands[13] & 0x01)
775 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
776
Andre Guedes9193c6e2014-07-01 18:10:09 -0300777 if (lmp_le_capable(hdev)) {
778 u8 events[8];
779
780 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200781 events[0] = 0x0f;
782
783 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
784 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300785
786 /* If controller supports the Connection Parameters Request
787 * Link Layer Procedure, enable the corresponding event.
788 */
789 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
790 events[0] |= 0x20; /* LE Remote Connection
791 * Parameter Request
792 */
793
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100794 /* If the controller supports the Data Length Extension
795 * feature, enable the corresponding event.
796 */
797 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
798 events[0] |= 0x40; /* LE Data Length Change */
799
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100800 /* If the controller supports Extended Scanner Filter
801 * Policies, enable the correspondig event.
802 */
803 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
804 events[1] |= 0x04; /* LE Direct Advertising
805 * Report
806 */
807
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100808 /* If the controller supports the LE Read Local P-256
809 * Public Key command, enable the corresponding event.
810 */
811 if (hdev->commands[34] & 0x02)
812 events[0] |= 0x80; /* LE Read Local P-256
813 * Public Key Complete
814 */
815
816 /* If the controller supports the LE Generate DHKey
817 * command, enable the corresponding event.
818 */
819 if (hdev->commands[34] & 0x04)
820 events[1] |= 0x01; /* LE Generate DHKey Complete */
821
Andre Guedes9193c6e2014-07-01 18:10:09 -0300822 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
823 events);
824
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200825 if (hdev->commands[25] & 0x40) {
826 /* Read LE Advertising Channel TX Power */
827 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
828 }
829
Marcel Holtmann2ab216a72015-11-01 09:39:48 +0100830 if (hdev->commands[26] & 0x40) {
831 /* Read LE White List Size */
832 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
833 0, NULL);
834 }
835
836 if (hdev->commands[26] & 0x80) {
837 /* Clear LE White List */
838 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
839 }
840
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100841 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
842 /* Read LE Maximum Data Length */
843 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
844
845 /* Read LE Suggested Default Data Length */
846 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
847 }
848
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300850 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300851
852 /* Read features beyond page 1 if available */
853 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
854 struct hci_cp_read_local_ext_features cp;
855
856 cp.page = p;
857 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
858 sizeof(cp), &cp);
859 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200860}
861
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300862static void hci_init4_req(struct hci_request *req, unsigned long opt)
863{
864 struct hci_dev *hdev = req->hdev;
865
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800866 /* Some Broadcom based Bluetooth controllers do not support the
867 * Delete Stored Link Key command. They are clearly indicating its
868 * absence in the bit mask of supported commands.
869 *
870 * Check the supported commands and only if the the command is marked
871 * as supported send it. If not supported assume that the controller
872 * does not have actual support for stored link keys which makes this
873 * command redundant anyway.
874 *
875 * Some controllers indicate that they support handling deleting
876 * stored link keys, but they don't. The quirk lets a driver
877 * just disable this command.
878 */
879 if (hdev->commands[6] & 0x80 &&
880 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
881 struct hci_cp_delete_stored_link_key cp;
882
883 bacpy(&cp.bdaddr, BDADDR_ANY);
884 cp.delete_all = 0x01;
885 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
886 sizeof(cp), &cp);
887 }
888
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300889 /* Set event mask page 2 if the HCI command for it is supported */
890 if (hdev->commands[22] & 0x04)
891 hci_set_event_mask_page_2(req);
892
Marcel Holtmann109e3192014-07-23 19:24:56 +0200893 /* Read local codec list if the HCI command is supported */
894 if (hdev->commands[29] & 0x20)
895 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
896
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200897 /* Get MWS transport configuration if the HCI command is supported */
898 if (hdev->commands[30] & 0x08)
899 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
900
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300901 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800902 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300903 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800904
905 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700906 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800907 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800908 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800909
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800910 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
911 sizeof(support), &support);
912 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300913}
914
Johan Hedberg2177bab2013-03-05 20:37:43 +0200915static int __hci_init(struct hci_dev *hdev)
916{
917 int err;
918
919 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
920 if (err < 0)
921 return err;
922
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200923 if (hci_dev_test_flag(hdev, HCI_SETUP))
924 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700925
Johan Hedberg2177bab2013-03-05 20:37:43 +0200926 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
927 if (err < 0)
928 return err;
929
Johan Hedberg0af801b2015-02-17 15:05:21 +0200930 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
931 * BR/EDR/LE type controllers. AMP controllers only need the
932 * first two stages of init.
933 */
934 if (hdev->dev_type != HCI_BREDR)
935 return 0;
936
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300937 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
938 if (err < 0)
939 return err;
940
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700941 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
942 if (err < 0)
943 return err;
944
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800945 /* This function is only called when the controller is actually in
946 * configured state. When the controller is marked as unconfigured,
947 * this initialization procedure is not run.
948 *
949 * It means that it is possible that a controller runs through its
950 * setup phase and then discovers missing settings. If that is the
951 * case, then this function will not be called. It then will only
952 * be called during the config phase.
953 *
954 * So only when in setup phase or config phase, create the debugfs
955 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700956 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700957 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
958 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700959 return 0;
960
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100961 hci_debugfs_create_common(hdev);
962
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100963 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100964 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700965
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800966 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100967 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700968
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700969 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200970}
971
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200972static void hci_init0_req(struct hci_request *req, unsigned long opt)
973{
974 struct hci_dev *hdev = req->hdev;
975
976 BT_DBG("%s %ld", hdev->name, opt);
977
978 /* Reset */
979 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
980 hci_reset_req(req, 0);
981
982 /* Read Local Version */
983 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
984
985 /* Read BD Address */
986 if (hdev->set_bdaddr)
987 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
988}
989
990static int __hci_unconf_init(struct hci_dev *hdev)
991{
992 int err;
993
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200994 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
995 return 0;
996
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200997 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
998 if (err < 0)
999 return err;
1000
Marcel Holtmannf640ee92015-10-08 12:35:42 +02001001 if (hci_dev_test_flag(hdev, HCI_SETUP))
1002 hci_debugfs_create_basic(hdev);
1003
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001004 return 0;
1005}
1006
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008{
1009 __u8 scan = opt;
1010
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
1019 __u8 auth = opt;
1020
Johan Hedberg42c6b122013-03-05 20:37:49 +02001021 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
1029 __u8 encrypt = opt;
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001033 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035}
1036
Johan Hedberg42c6b122013-03-05 20:37:49 +02001037static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001038{
1039 __le16 policy = cpu_to_le16(opt);
1040
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001042
1043 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001045}
1046
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001047/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 * Device is held on return. */
1049struct hci_dev *hci_dev_get(int index)
1050{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001051 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
1053 BT_DBG("%d", index);
1054
1055 if (index < 0)
1056 return NULL;
1057
1058 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001059 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (d->id == index) {
1061 hdev = hci_dev_hold(d);
1062 break;
1063 }
1064 }
1065 read_unlock(&hci_dev_list_lock);
1066 return hdev;
1067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001070
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001071bool hci_discovery_active(struct hci_dev *hdev)
1072{
1073 struct discovery_state *discov = &hdev->discovery;
1074
Andre Guedes6fbe1952012-02-03 17:47:58 -03001075 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001076 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001077 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001078 return true;
1079
Andre Guedes6fbe1952012-02-03 17:47:58 -03001080 default:
1081 return false;
1082 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001083}
1084
Johan Hedbergff9ef572012-01-04 14:23:45 +02001085void hci_discovery_set_state(struct hci_dev *hdev, int state)
1086{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001087 int old_state = hdev->discovery.state;
1088
Johan Hedbergff9ef572012-01-04 14:23:45 +02001089 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1090
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001091 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001092 return;
1093
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001094 hdev->discovery.state = state;
1095
Johan Hedbergff9ef572012-01-04 14:23:45 +02001096 switch (state) {
1097 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001098 hci_update_background_scan(hdev);
1099
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001100 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001101 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001102 break;
1103 case DISCOVERY_STARTING:
1104 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001105 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001106 mgmt_discovering(hdev, 1);
1107 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001108 case DISCOVERY_RESOLVING:
1109 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001110 case DISCOVERY_STOPPING:
1111 break;
1112 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001113}
1114
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001115void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116{
Johan Hedberg30883512012-01-04 14:16:21 +02001117 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001118 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Johan Hedberg561aafb2012-01-04 13:31:59 +02001120 list_for_each_entry_safe(p, n, &cache->all, all) {
1121 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001122 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001124
1125 INIT_LIST_HEAD(&cache->unknown);
1126 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127}
1128
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001129struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1130 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Johan Hedberg30883512012-01-04 14:16:21 +02001132 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 struct inquiry_entry *e;
1134
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001135 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Johan Hedberg561aafb2012-01-04 13:31:59 +02001137 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001139 return e;
1140 }
1141
1142 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143}
1144
Johan Hedberg561aafb2012-01-04 13:31:59 +02001145struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001146 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001147{
Johan Hedberg30883512012-01-04 14:16:21 +02001148 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001149 struct inquiry_entry *e;
1150
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001151 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001152
1153 list_for_each_entry(e, &cache->unknown, list) {
1154 if (!bacmp(&e->data.bdaddr, bdaddr))
1155 return e;
1156 }
1157
1158 return NULL;
1159}
1160
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001161struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001162 bdaddr_t *bdaddr,
1163 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001164{
1165 struct discovery_state *cache = &hdev->discovery;
1166 struct inquiry_entry *e;
1167
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001168 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001169
1170 list_for_each_entry(e, &cache->resolve, list) {
1171 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1172 return e;
1173 if (!bacmp(&e->data.bdaddr, bdaddr))
1174 return e;
1175 }
1176
1177 return NULL;
1178}
1179
Johan Hedberga3d4e202012-01-09 00:53:02 +02001180void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001181 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001182{
1183 struct discovery_state *cache = &hdev->discovery;
1184 struct list_head *pos = &cache->resolve;
1185 struct inquiry_entry *p;
1186
1187 list_del(&ie->list);
1188
1189 list_for_each_entry(p, &cache->resolve, list) {
1190 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001191 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001192 break;
1193 pos = &p->list;
1194 }
1195
1196 list_add(&ie->list, pos);
1197}
1198
Marcel Holtmannaf589252014-07-01 14:11:20 +02001199u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1200 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
Johan Hedberg30883512012-01-04 14:16:21 +02001202 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001203 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001204 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001206 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
Johan Hedberg6928a922014-10-26 20:46:09 +01001208 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001209
Marcel Holtmannaf589252014-07-01 14:11:20 +02001210 if (!data->ssp_mode)
1211 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001212
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001213 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001214 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001215 if (!ie->data.ssp_mode)
1216 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001217
Johan Hedberga3d4e202012-01-09 00:53:02 +02001218 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001219 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001220 ie->data.rssi = data->rssi;
1221 hci_inquiry_cache_update_resolve(hdev, ie);
1222 }
1223
Johan Hedberg561aafb2012-01-04 13:31:59 +02001224 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001225 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001226
Johan Hedberg561aafb2012-01-04 13:31:59 +02001227 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001228 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001229 if (!ie) {
1230 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1231 goto done;
1232 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001233
1234 list_add(&ie->all, &cache->all);
1235
1236 if (name_known) {
1237 ie->name_state = NAME_KNOWN;
1238 } else {
1239 ie->name_state = NAME_NOT_KNOWN;
1240 list_add(&ie->list, &cache->unknown);
1241 }
1242
1243update:
1244 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001245 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001246 ie->name_state = NAME_KNOWN;
1247 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 }
1249
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001250 memcpy(&ie->data, data, sizeof(*data));
1251 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001253
1254 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001255 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001256
Marcel Holtmannaf589252014-07-01 14:11:20 +02001257done:
1258 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259}
1260
1261static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1262{
Johan Hedberg30883512012-01-04 14:16:21 +02001263 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 struct inquiry_info *info = (struct inquiry_info *) buf;
1265 struct inquiry_entry *e;
1266 int copied = 0;
1267
Johan Hedberg561aafb2012-01-04 13:31:59 +02001268 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001270
1271 if (copied >= num)
1272 break;
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 bacpy(&info->bdaddr, &data->bdaddr);
1275 info->pscan_rep_mode = data->pscan_rep_mode;
1276 info->pscan_period_mode = data->pscan_period_mode;
1277 info->pscan_mode = data->pscan_mode;
1278 memcpy(info->dev_class, data->dev_class, 3);
1279 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001280
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001282 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 }
1284
1285 BT_DBG("cache %p, copied %d", cache, copied);
1286 return copied;
1287}
1288
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290{
1291 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 struct hci_cp_inquiry cp;
1294
1295 BT_DBG("%s", hdev->name);
1296
1297 if (test_bit(HCI_INQUIRY, &hdev->flags))
1298 return;
1299
1300 /* Start Inquiry */
1301 memcpy(&cp.lap, &ir->lap, 3);
1302 cp.length = ir->length;
1303 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305}
1306
1307int hci_inquiry(void __user *arg)
1308{
1309 __u8 __user *ptr = arg;
1310 struct hci_inquiry_req ir;
1311 struct hci_dev *hdev;
1312 int err = 0, do_inquiry = 0, max_rsp;
1313 long timeo;
1314 __u8 *buf;
1315
1316 if (copy_from_user(&ir, ptr, sizeof(ir)))
1317 return -EFAULT;
1318
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001319 hdev = hci_dev_get(ir.dev_id);
1320 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 return -ENODEV;
1322
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001323 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001324 err = -EBUSY;
1325 goto done;
1326 }
1327
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001328 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001329 err = -EOPNOTSUPP;
1330 goto done;
1331 }
1332
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001333 if (hdev->dev_type != HCI_BREDR) {
1334 err = -EOPNOTSUPP;
1335 goto done;
1336 }
1337
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001338 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001339 err = -EOPNOTSUPP;
1340 goto done;
1341 }
1342
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001343 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001344 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001345 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001346 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 do_inquiry = 1;
1348 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001349 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Marcel Holtmann04837f62006-07-03 10:02:33 +02001351 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001352
1353 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001354 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1355 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001356 if (err < 0)
1357 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001358
1359 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1360 * cleared). If it is interrupted by a signal, return -EINTR.
1361 */
NeilBrown74316202014-07-07 15:16:04 +10001362 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001363 TASK_INTERRUPTIBLE))
1364 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001367 /* for unlimited number of responses we will use buffer with
1368 * 255 entries
1369 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1371
1372 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1373 * copy it to the user space.
1374 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001375 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001376 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 err = -ENOMEM;
1378 goto done;
1379 }
1380
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001381 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001383 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385 BT_DBG("num_rsp %d", ir.num_rsp);
1386
1387 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1388 ptr += sizeof(ir);
1389 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001390 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001392 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 err = -EFAULT;
1394
1395 kfree(buf);
1396
1397done:
1398 hci_dev_put(hdev);
1399 return err;
1400}
1401
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001402static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 int ret = 0;
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 BT_DBG("%s %p", hdev->name, hdev);
1407
1408 hci_req_lock(hdev);
1409
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001410 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001411 ret = -ENODEV;
1412 goto done;
1413 }
1414
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001415 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1416 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001417 /* Check for rfkill but allow the HCI setup stage to
1418 * proceed (which in itself doesn't cause any RF activity).
1419 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001420 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001421 ret = -ERFKILL;
1422 goto done;
1423 }
1424
1425 /* Check for valid public address or a configured static
1426 * random adddress, but let the HCI setup proceed to
1427 * be able to determine if there is a public address
1428 * or not.
1429 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001430 * In case of user channel usage, it is not important
1431 * if a public address or static random address is
1432 * available.
1433 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001434 * This check is only valid for BR/EDR controllers
1435 * since AMP controllers do not have an address.
1436 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001437 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001438 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001439 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1440 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1441 ret = -EADDRNOTAVAIL;
1442 goto done;
1443 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001444 }
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 if (test_bit(HCI_UP, &hdev->flags)) {
1447 ret = -EALREADY;
1448 goto done;
1449 }
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 if (hdev->open(hdev)) {
1452 ret = -EIO;
1453 goto done;
1454 }
1455
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001456 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001457 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001458
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001459 atomic_set(&hdev->cmd_cnt, 1);
1460 set_bit(HCI_INIT, &hdev->flags);
1461
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001462 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001463 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1464
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001465 if (hdev->setup)
1466 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001467
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001468 /* The transport driver can set these quirks before
1469 * creating the HCI device or in its setup callback.
1470 *
1471 * In case any of them is set, the controller has to
1472 * start up as unconfigured.
1473 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001474 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1475 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001476 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001477
1478 /* For an unconfigured controller it is required to
1479 * read at least the version information provided by
1480 * the Read Local Version Information command.
1481 *
1482 * If the set_bdaddr driver callback is provided, then
1483 * also the original Bluetooth public device address
1484 * will be read using the Read BD Address command.
1485 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001486 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001487 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001488 }
1489
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001490 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001491 /* If public address change is configured, ensure that
1492 * the address gets programmed. If the driver does not
1493 * support changing the public address, fail the power
1494 * on procedure.
1495 */
1496 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1497 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001498 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1499 else
1500 ret = -EADDRNOTAVAIL;
1501 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001502
1503 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001504 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001505 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001506 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001507 if (!ret && hdev->post_init)
1508 ret = hdev->post_init(hdev);
1509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 }
1511
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001512 /* If the HCI Reset command is clearing all diagnostic settings,
1513 * then they need to be reprogrammed after the init procedure
1514 * completed.
1515 */
1516 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1517 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1518 ret = hdev->set_diag(hdev, true);
1519
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001520 clear_bit(HCI_INIT, &hdev->flags);
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 if (!ret) {
1523 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001524 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001526 hci_sock_dev_event(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001527 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1528 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1529 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1530 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001531 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001532 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001533 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001534 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001535 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001536 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001538 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001539 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001540 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 skb_queue_purge(&hdev->cmd_q);
1543 skb_queue_purge(&hdev->rx_q);
1544
1545 if (hdev->flush)
1546 hdev->flush(hdev);
1547
1548 if (hdev->sent_cmd) {
1549 kfree_skb(hdev->sent_cmd);
1550 hdev->sent_cmd = NULL;
1551 }
1552
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001553 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001554 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001557 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
1559
1560done:
1561 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 return ret;
1563}
1564
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001565/* ---- HCI ioctl helpers ---- */
1566
1567int hci_dev_open(__u16 dev)
1568{
1569 struct hci_dev *hdev;
1570 int err;
1571
1572 hdev = hci_dev_get(dev);
1573 if (!hdev)
1574 return -ENODEV;
1575
Marcel Holtmann4a964402014-07-02 19:10:33 +02001576 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001577 * up as user channel. Trying to bring them up as normal devices
1578 * will result into a failure. Only user channel operation is
1579 * possible.
1580 *
1581 * When this function is called for a user channel, the flag
1582 * HCI_USER_CHANNEL will be set first before attempting to
1583 * open the device.
1584 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001585 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1586 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001587 err = -EOPNOTSUPP;
1588 goto done;
1589 }
1590
Johan Hedberge1d08f42013-10-01 22:44:50 +03001591 /* We need to ensure that no other power on/off work is pending
1592 * before proceeding to call hci_dev_do_open. This is
1593 * particularly important if the setup procedure has not yet
1594 * completed.
1595 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001596 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001597 cancel_delayed_work(&hdev->power_off);
1598
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001599 /* After this call it is guaranteed that the setup procedure
1600 * has finished. This means that error conditions like RFKILL
1601 * or no valid public or static random address apply.
1602 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001603 flush_workqueue(hdev->req_workqueue);
1604
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001605 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001606 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001607 * so that pairing works for them. Once the management interface
1608 * is in use this bit will be cleared again and userspace has
1609 * to explicitly enable it.
1610 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001611 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1612 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001613 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001614
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001615 err = hci_dev_do_open(hdev);
1616
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001617done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001618 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001619 return err;
1620}
1621
Johan Hedbergd7347f32014-07-04 12:37:23 +03001622/* This function requires the caller holds hdev->lock */
1623static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1624{
1625 struct hci_conn_params *p;
1626
Johan Hedbergf161dd42014-08-15 21:06:54 +03001627 list_for_each_entry(p, &hdev->le_conn_params, list) {
1628 if (p->conn) {
1629 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001630 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001631 p->conn = NULL;
1632 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001633 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001634 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001635
1636 BT_DBG("All LE pending actions cleared");
1637}
1638
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001639int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001641 bool auto_off;
1642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 BT_DBG("%s %p", hdev->name, hdev);
1644
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001645 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001646 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001647 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001648 /* Execute vendor specific shutdown routine */
1649 if (hdev->shutdown)
1650 hdev->shutdown(hdev);
1651 }
1652
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001653 cancel_delayed_work(&hdev->power_off);
1654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 hci_req_cancel(hdev, ENODEV);
1656 hci_req_lock(hdev);
1657
1658 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001659 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 hci_req_unlock(hdev);
1661 return 0;
1662 }
1663
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001664 /* Flush RX and TX works */
1665 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001666 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001668 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001669 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001670 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001671 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1672 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001673 }
1674
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001675 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001676 cancel_delayed_work(&hdev->service_cache);
1677
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001678 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001679 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001680
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001681 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001682 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001683
Florian Grandel5d900e42015-06-18 03:16:35 +02001684 if (hdev->adv_instance_timeout) {
1685 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1686 hdev->adv_instance_timeout = 0;
1687 }
1688
Johan Hedberg76727c02014-11-18 09:00:14 +02001689 /* Avoid potential lockdep warnings from the *_flush() calls by
1690 * ensuring the workqueue is empty up front.
1691 */
1692 drain_workqueue(hdev->workqueue);
1693
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001694 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001695
Johan Hedberg8f502f82015-01-28 19:56:02 +02001696 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1697
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001698 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1699
1700 if (!auto_off && hdev->dev_type == HCI_BREDR)
1701 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001702
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001703 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001704 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001705 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001706 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Marcel Holtmann64dae962015-01-28 14:10:28 -08001708 smp_unregister(hdev);
1709
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001710 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 if (hdev->flush)
1713 hdev->flush(hdev);
1714
1715 /* Reset device */
1716 skb_queue_purge(&hdev->cmd_q);
1717 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001718 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1719 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001721 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 clear_bit(HCI_INIT, &hdev->flags);
1723 }
1724
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001725 /* flush cmd work */
1726 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
1728 /* Drop queues */
1729 skb_queue_purge(&hdev->rx_q);
1730 skb_queue_purge(&hdev->cmd_q);
1731 skb_queue_purge(&hdev->raw_q);
1732
1733 /* Drop last sent command */
1734 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001735 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 kfree_skb(hdev->sent_cmd);
1737 hdev->sent_cmd = NULL;
1738 }
1739
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001740 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001741 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 /* After this point our queues are empty
1744 * and no tasks are scheduled. */
1745 hdev->close(hdev);
1746
Johan Hedberg35b973c2013-03-15 17:06:59 -05001747 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001748 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001749 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001750
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001751 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001752 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001753
Johan Hedberge59fda82012-02-22 18:11:53 +02001754 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001755 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001756 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 hci_req_unlock(hdev);
1759
1760 hci_dev_put(hdev);
1761 return 0;
1762}
1763
1764int hci_dev_close(__u16 dev)
1765{
1766 struct hci_dev *hdev;
1767 int err;
1768
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001769 hdev = hci_dev_get(dev);
1770 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001772
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001773 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001774 err = -EBUSY;
1775 goto done;
1776 }
1777
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001778 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001779 cancel_delayed_work(&hdev->power_off);
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001782
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001783done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 hci_dev_put(hdev);
1785 return err;
1786}
1787
Marcel Holtmann5c912492015-01-28 11:53:05 -08001788static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001790 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Marcel Holtmann5c912492015-01-28 11:53:05 -08001792 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
1794 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* Drop queues */
1797 skb_queue_purge(&hdev->rx_q);
1798 skb_queue_purge(&hdev->cmd_q);
1799
Johan Hedberg76727c02014-11-18 09:00:14 +02001800 /* Avoid potential lockdep warnings from the *_flush() calls by
1801 * ensuring the workqueue is empty up front.
1802 */
1803 drain_workqueue(hdev->workqueue);
1804
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001805 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001806 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001808 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 if (hdev->flush)
1811 hdev->flush(hdev);
1812
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001813 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001814 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001816 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 return ret;
1820}
1821
Marcel Holtmann5c912492015-01-28 11:53:05 -08001822int hci_dev_reset(__u16 dev)
1823{
1824 struct hci_dev *hdev;
1825 int err;
1826
1827 hdev = hci_dev_get(dev);
1828 if (!hdev)
1829 return -ENODEV;
1830
1831 if (!test_bit(HCI_UP, &hdev->flags)) {
1832 err = -ENETDOWN;
1833 goto done;
1834 }
1835
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001837 err = -EBUSY;
1838 goto done;
1839 }
1840
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001842 err = -EOPNOTSUPP;
1843 goto done;
1844 }
1845
1846 err = hci_dev_do_reset(hdev);
1847
1848done:
1849 hci_dev_put(hdev);
1850 return err;
1851}
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853int hci_dev_reset_stat(__u16 dev)
1854{
1855 struct hci_dev *hdev;
1856 int ret = 0;
1857
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001858 hdev = hci_dev_get(dev);
1859 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 return -ENODEV;
1861
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001862 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001863 ret = -EBUSY;
1864 goto done;
1865 }
1866
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001867 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001868 ret = -EOPNOTSUPP;
1869 goto done;
1870 }
1871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1873
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001874done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 return ret;
1877}
1878
Johan Hedberg123abc02014-07-10 12:09:07 +03001879static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1880{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001881 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001882
1883 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1884
1885 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001886 conn_changed = !hci_dev_test_and_set_flag(hdev,
1887 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001888 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001889 conn_changed = hci_dev_test_and_clear_flag(hdev,
1890 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001891
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001892 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001893 discov_changed = !hci_dev_test_and_set_flag(hdev,
1894 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001895 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001896 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001897 discov_changed = hci_dev_test_and_clear_flag(hdev,
1898 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001899 }
1900
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001901 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001902 return;
1903
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001904 if (conn_changed || discov_changed) {
1905 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001906 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001907
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001908 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001909 mgmt_update_adv_data(hdev);
1910
Johan Hedberg123abc02014-07-10 12:09:07 +03001911 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001912 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001913}
1914
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915int hci_dev_cmd(unsigned int cmd, void __user *arg)
1916{
1917 struct hci_dev *hdev;
1918 struct hci_dev_req dr;
1919 int err = 0;
1920
1921 if (copy_from_user(&dr, arg, sizeof(dr)))
1922 return -EFAULT;
1923
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001924 hdev = hci_dev_get(dr.dev_id);
1925 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 return -ENODEV;
1927
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001928 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001929 err = -EBUSY;
1930 goto done;
1931 }
1932
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001933 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001934 err = -EOPNOTSUPP;
1935 goto done;
1936 }
1937
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001938 if (hdev->dev_type != HCI_BREDR) {
1939 err = -EOPNOTSUPP;
1940 goto done;
1941 }
1942
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001943 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001944 err = -EOPNOTSUPP;
1945 goto done;
1946 }
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 switch (cmd) {
1949 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001950 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1951 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 break;
1953
1954 case HCISETENCRYPT:
1955 if (!lmp_encrypt_capable(hdev)) {
1956 err = -EOPNOTSUPP;
1957 break;
1958 }
1959
1960 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1961 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001962 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1963 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 if (err)
1965 break;
1966 }
1967
Johan Hedberg01178cd2013-03-05 20:37:41 +02001968 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1969 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 break;
1971
1972 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001973 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1974 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001975
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001976 /* Ensure that the connectable and discoverable states
1977 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001978 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001979 if (!err)
1980 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 break;
1982
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001983 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001984 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1985 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001986 break;
1987
1988 case HCISETLINKMODE:
1989 hdev->link_mode = ((__u16) dr.dev_opt) &
1990 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1991 break;
1992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 case HCISETPTYPE:
1994 hdev->pkt_type = (__u16) dr.dev_opt;
1995 break;
1996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001998 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1999 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 break;
2001
2002 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002003 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2004 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 break;
2006
2007 default:
2008 err = -EINVAL;
2009 break;
2010 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002011
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002012done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 hci_dev_put(hdev);
2014 return err;
2015}
2016
2017int hci_get_dev_list(void __user *arg)
2018{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002019 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 struct hci_dev_list_req *dl;
2021 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 int n = 0, size, err;
2023 __u16 dev_num;
2024
2025 if (get_user(dev_num, (__u16 __user *) arg))
2026 return -EFAULT;
2027
2028 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2029 return -EINVAL;
2030
2031 size = sizeof(*dl) + dev_num * sizeof(*dr);
2032
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002033 dl = kzalloc(size, GFP_KERNEL);
2034 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 return -ENOMEM;
2036
2037 dr = dl->dev_req;
2038
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002039 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002040 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002041 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002042
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002043 /* When the auto-off is configured it means the transport
2044 * is running, but in that case still indicate that the
2045 * device is actually down.
2046 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002047 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002048 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002051 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 if (++n >= dev_num)
2054 break;
2055 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002056 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 dl->dev_num = n;
2059 size = sizeof(*dl) + n * sizeof(*dr);
2060
2061 err = copy_to_user(arg, dl, size);
2062 kfree(dl);
2063
2064 return err ? -EFAULT : 0;
2065}
2066
2067int hci_get_dev_info(void __user *arg)
2068{
2069 struct hci_dev *hdev;
2070 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002071 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 int err = 0;
2073
2074 if (copy_from_user(&di, arg, sizeof(di)))
2075 return -EFAULT;
2076
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002077 hdev = hci_dev_get(di.dev_id);
2078 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 return -ENODEV;
2080
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002081 /* When the auto-off is configured it means the transport
2082 * is running, but in that case still indicate that the
2083 * device is actually down.
2084 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002085 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002086 flags = hdev->flags & ~BIT(HCI_UP);
2087 else
2088 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002089
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 strcpy(di.name, hdev->name);
2091 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002092 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002093 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002095 if (lmp_bredr_capable(hdev)) {
2096 di.acl_mtu = hdev->acl_mtu;
2097 di.acl_pkts = hdev->acl_pkts;
2098 di.sco_mtu = hdev->sco_mtu;
2099 di.sco_pkts = hdev->sco_pkts;
2100 } else {
2101 di.acl_mtu = hdev->le_mtu;
2102 di.acl_pkts = hdev->le_pkts;
2103 di.sco_mtu = 0;
2104 di.sco_pkts = 0;
2105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 di.link_policy = hdev->link_policy;
2107 di.link_mode = hdev->link_mode;
2108
2109 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2110 memcpy(&di.features, &hdev->features, sizeof(di.features));
2111
2112 if (copy_to_user(arg, &di, sizeof(di)))
2113 err = -EFAULT;
2114
2115 hci_dev_put(hdev);
2116
2117 return err;
2118}
2119
2120/* ---- Interface to HCI drivers ---- */
2121
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002122static int hci_rfkill_set_block(void *data, bool blocked)
2123{
2124 struct hci_dev *hdev = data;
2125
2126 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2127
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002128 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002129 return -EBUSY;
2130
Johan Hedberg5e130362013-09-13 08:58:17 +03002131 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002132 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002133 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2134 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002135 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002136 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002137 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002138 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002139
2140 return 0;
2141}
2142
2143static const struct rfkill_ops hci_rfkill_ops = {
2144 .set_block = hci_rfkill_set_block,
2145};
2146
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002147static void hci_power_on(struct work_struct *work)
2148{
2149 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002150 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002151
2152 BT_DBG("%s", hdev->name);
2153
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002154 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002155 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302156 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002157 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302158 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002159 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002160 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002161
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002162 /* During the HCI setup phase, a few error conditions are
2163 * ignored and they need to be checked now. If they are still
2164 * valid, it is important to turn the device back off.
2165 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002166 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2167 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002168 (hdev->dev_type == HCI_BREDR &&
2169 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2170 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002171 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002172 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002173 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002174 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2175 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002176 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002177
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002178 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002179 /* For unconfigured devices, set the HCI_RAW flag
2180 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002181 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002182 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002183 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002184
2185 /* For fully configured devices, this will send
2186 * the Index Added event. For unconfigured devices,
2187 * it will send Unconfigued Index Added event.
2188 *
2189 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2190 * and no event will be send.
2191 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002192 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002193 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002194 /* When the controller is now configured, then it
2195 * is important to clear the HCI_RAW flag.
2196 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002197 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002198 clear_bit(HCI_RAW, &hdev->flags);
2199
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002200 /* Powering on the controller with HCI_CONFIG set only
2201 * happens with the transition from unconfigured to
2202 * configured. This will send the Index Added event.
2203 */
2204 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002205 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002206}
2207
2208static void hci_power_off(struct work_struct *work)
2209{
Johan Hedberg32435532011-11-07 22:16:04 +02002210 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002211 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002212
2213 BT_DBG("%s", hdev->name);
2214
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002215 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002216}
2217
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002218static void hci_error_reset(struct work_struct *work)
2219{
2220 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2221
2222 BT_DBG("%s", hdev->name);
2223
2224 if (hdev->hw_error)
2225 hdev->hw_error(hdev, hdev->hw_error_code);
2226 else
2227 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2228 hdev->hw_error_code);
2229
2230 if (hci_dev_do_close(hdev))
2231 return;
2232
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002233 hci_dev_do_open(hdev);
2234}
2235
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002236static void hci_discov_off(struct work_struct *work)
2237{
2238 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002239
2240 hdev = container_of(work, struct hci_dev, discov_off.work);
2241
2242 BT_DBG("%s", hdev->name);
2243
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002244 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002245}
2246
Florian Grandel5d900e42015-06-18 03:16:35 +02002247static void hci_adv_timeout_expire(struct work_struct *work)
2248{
2249 struct hci_dev *hdev;
2250
2251 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2252
2253 BT_DBG("%s", hdev->name);
2254
2255 mgmt_adv_timeout_expired(hdev);
2256}
2257
Johan Hedberg35f74982014-02-18 17:14:32 +02002258void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002259{
Johan Hedberg48210022013-01-27 00:31:28 +02002260 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002261
Johan Hedberg48210022013-01-27 00:31:28 +02002262 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2263 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002264 kfree(uuid);
2265 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002266}
2267
Johan Hedberg35f74982014-02-18 17:14:32 +02002268void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002269{
Johan Hedberg0378b592014-11-19 15:22:22 +02002270 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002271
Johan Hedberg0378b592014-11-19 15:22:22 +02002272 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2273 list_del_rcu(&key->list);
2274 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002275 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002276}
2277
Johan Hedberg35f74982014-02-18 17:14:32 +02002278void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002279{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002280 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002281
Johan Hedberg970d0f12014-11-13 14:37:47 +02002282 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2283 list_del_rcu(&k->list);
2284 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002285 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002286}
2287
Johan Hedberg970c4e42014-02-18 10:19:33 +02002288void hci_smp_irks_clear(struct hci_dev *hdev)
2289{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002290 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002291
Johan Hedbergadae20c2014-11-13 14:37:48 +02002292 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2293 list_del_rcu(&k->list);
2294 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002295 }
2296}
2297
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002298struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2299{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002300 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002301
Johan Hedberg0378b592014-11-19 15:22:22 +02002302 rcu_read_lock();
2303 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2304 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2305 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002306 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002307 }
2308 }
2309 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002310
2311 return NULL;
2312}
2313
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302314static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002315 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002316{
2317 /* Legacy key */
2318 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302319 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002320
2321 /* Debug keys are insecure so don't store them persistently */
2322 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302323 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002324
2325 /* Changed combination key and there's no previous one */
2326 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302327 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002328
2329 /* Security mode 3 case */
2330 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302331 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002332
Johan Hedberge3befab2014-06-01 16:33:39 +03002333 /* BR/EDR key derived using SC from an LE link */
2334 if (conn->type == LE_LINK)
2335 return true;
2336
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002337 /* Neither local nor remote side had no-bonding as requirement */
2338 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302339 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002340
2341 /* Local side had dedicated bonding as requirement */
2342 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302343 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002344
2345 /* Remote side had dedicated bonding as requirement */
2346 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302347 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002348
2349 /* If none of the above criteria match, then don't store the key
2350 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302351 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002352}
2353
Johan Hedberge804d252014-07-16 11:42:28 +03002354static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002355{
Johan Hedberge804d252014-07-16 11:42:28 +03002356 if (type == SMP_LTK)
2357 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002358
Johan Hedberge804d252014-07-16 11:42:28 +03002359 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002360}
2361
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002362struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2363 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002364{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002365 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002366
Johan Hedberg970d0f12014-11-13 14:37:47 +02002367 rcu_read_lock();
2368 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002369 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2370 continue;
2371
Johan Hedberg923e2412014-12-03 12:43:39 +02002372 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002373 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002374 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002375 }
2376 }
2377 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002378
2379 return NULL;
2380}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002381
Johan Hedberg970c4e42014-02-18 10:19:33 +02002382struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2383{
2384 struct smp_irk *irk;
2385
Johan Hedbergadae20c2014-11-13 14:37:48 +02002386 rcu_read_lock();
2387 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2388 if (!bacmp(&irk->rpa, rpa)) {
2389 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002390 return irk;
2391 }
2392 }
2393
Johan Hedbergadae20c2014-11-13 14:37:48 +02002394 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2395 if (smp_irk_matches(hdev, irk->val, rpa)) {
2396 bacpy(&irk->rpa, rpa);
2397 rcu_read_unlock();
2398 return irk;
2399 }
2400 }
2401 rcu_read_unlock();
2402
Johan Hedberg970c4e42014-02-18 10:19:33 +02002403 return NULL;
2404}
2405
2406struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2407 u8 addr_type)
2408{
2409 struct smp_irk *irk;
2410
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002411 /* Identity Address must be public or static random */
2412 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2413 return NULL;
2414
Johan Hedbergadae20c2014-11-13 14:37:48 +02002415 rcu_read_lock();
2416 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002417 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002418 bacmp(bdaddr, &irk->bdaddr) == 0) {
2419 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002420 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002421 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002422 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002423 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002424
2425 return NULL;
2426}
2427
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002428struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002429 bdaddr_t *bdaddr, u8 *val, u8 type,
2430 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002431{
2432 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302433 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002434
2435 old_key = hci_find_link_key(hdev, bdaddr);
2436 if (old_key) {
2437 old_key_type = old_key->type;
2438 key = old_key;
2439 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002440 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002441 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002442 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002443 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002444 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002445 }
2446
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002447 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002448
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002449 /* Some buggy controller combinations generate a changed
2450 * combination key for legacy pairing even when there's no
2451 * previous key */
2452 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002453 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002454 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002455 if (conn)
2456 conn->key_type = type;
2457 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002458
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002459 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002460 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002461 key->pin_len = pin_len;
2462
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002463 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002464 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002465 else
2466 key->type = type;
2467
Johan Hedberg7652ff62014-06-24 13:15:49 +03002468 if (persistent)
2469 *persistent = hci_persistent_key(hdev, conn, type,
2470 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002471
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002472 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002473}
2474
Johan Hedbergca9142b2014-02-19 14:57:44 +02002475struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002476 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002477 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002478{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002479 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002480 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002481
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002482 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002483 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002484 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002485 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002486 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002487 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002488 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002489 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002490 }
2491
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002492 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002493 key->bdaddr_type = addr_type;
2494 memcpy(key->val, tk, sizeof(key->val));
2495 key->authenticated = authenticated;
2496 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002497 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002498 key->enc_size = enc_size;
2499 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002500
Johan Hedbergca9142b2014-02-19 14:57:44 +02002501 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002502}
2503
Johan Hedbergca9142b2014-02-19 14:57:44 +02002504struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2505 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002506{
2507 struct smp_irk *irk;
2508
2509 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2510 if (!irk) {
2511 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2512 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002513 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002514
2515 bacpy(&irk->bdaddr, bdaddr);
2516 irk->addr_type = addr_type;
2517
Johan Hedbergadae20c2014-11-13 14:37:48 +02002518 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002519 }
2520
2521 memcpy(irk->val, val, 16);
2522 bacpy(&irk->rpa, rpa);
2523
Johan Hedbergca9142b2014-02-19 14:57:44 +02002524 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002525}
2526
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002527int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2528{
2529 struct link_key *key;
2530
2531 key = hci_find_link_key(hdev, bdaddr);
2532 if (!key)
2533 return -ENOENT;
2534
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002535 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002536
Johan Hedberg0378b592014-11-19 15:22:22 +02002537 list_del_rcu(&key->list);
2538 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002539
2540 return 0;
2541}
2542
Johan Hedberge0b2b272014-02-18 17:14:31 +02002543int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002544{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002545 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002546 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002547
Johan Hedberg970d0f12014-11-13 14:37:47 +02002548 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002549 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002550 continue;
2551
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002552 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002553
Johan Hedberg970d0f12014-11-13 14:37:47 +02002554 list_del_rcu(&k->list);
2555 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002556 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002557 }
2558
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002559 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002560}
2561
Johan Hedberga7ec7332014-02-18 17:14:35 +02002562void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2563{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002564 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002565
Johan Hedbergadae20c2014-11-13 14:37:48 +02002566 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002567 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2568 continue;
2569
2570 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2571
Johan Hedbergadae20c2014-11-13 14:37:48 +02002572 list_del_rcu(&k->list);
2573 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002574 }
2575}
2576
Johan Hedberg55e76b32015-03-10 22:34:40 +02002577bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2578{
2579 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002580 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002581 u8 addr_type;
2582
2583 if (type == BDADDR_BREDR) {
2584 if (hci_find_link_key(hdev, bdaddr))
2585 return true;
2586 return false;
2587 }
2588
2589 /* Convert to HCI addr type which struct smp_ltk uses */
2590 if (type == BDADDR_LE_PUBLIC)
2591 addr_type = ADDR_LE_DEV_PUBLIC;
2592 else
2593 addr_type = ADDR_LE_DEV_RANDOM;
2594
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002595 irk = hci_get_irk(hdev, bdaddr, addr_type);
2596 if (irk) {
2597 bdaddr = &irk->bdaddr;
2598 addr_type = irk->addr_type;
2599 }
2600
Johan Hedberg55e76b32015-03-10 22:34:40 +02002601 rcu_read_lock();
2602 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002603 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2604 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002605 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002606 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002607 }
2608 rcu_read_unlock();
2609
2610 return false;
2611}
2612
Ville Tervo6bd32322011-02-16 16:32:41 +02002613/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002614static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002615{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002616 struct hci_dev *hdev = container_of(work, struct hci_dev,
2617 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002618
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002619 if (hdev->sent_cmd) {
2620 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2621 u16 opcode = __le16_to_cpu(sent->opcode);
2622
2623 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2624 } else {
2625 BT_ERR("%s command tx timeout", hdev->name);
2626 }
2627
Ville Tervo6bd32322011-02-16 16:32:41 +02002628 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002629 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002630}
2631
Szymon Janc2763eda2011-03-22 13:12:22 +01002632struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002633 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002634{
2635 struct oob_data *data;
2636
Johan Hedberg6928a922014-10-26 20:46:09 +01002637 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2638 if (bacmp(bdaddr, &data->bdaddr) != 0)
2639 continue;
2640 if (data->bdaddr_type != bdaddr_type)
2641 continue;
2642 return data;
2643 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002644
2645 return NULL;
2646}
2647
Johan Hedberg6928a922014-10-26 20:46:09 +01002648int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2649 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002650{
2651 struct oob_data *data;
2652
Johan Hedberg6928a922014-10-26 20:46:09 +01002653 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002654 if (!data)
2655 return -ENOENT;
2656
Johan Hedberg6928a922014-10-26 20:46:09 +01002657 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002658
2659 list_del(&data->list);
2660 kfree(data);
2661
2662 return 0;
2663}
2664
Johan Hedberg35f74982014-02-18 17:14:32 +02002665void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002666{
2667 struct oob_data *data, *n;
2668
2669 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2670 list_del(&data->list);
2671 kfree(data);
2672 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002673}
2674
Marcel Holtmann07988722014-01-10 02:07:29 -08002675int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002676 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002677 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002678{
2679 struct oob_data *data;
2680
Johan Hedberg6928a922014-10-26 20:46:09 +01002681 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002682 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002683 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002684 if (!data)
2685 return -ENOMEM;
2686
2687 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002688 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002689 list_add(&data->list, &hdev->remote_oob_data);
2690 }
2691
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002692 if (hash192 && rand192) {
2693 memcpy(data->hash192, hash192, sizeof(data->hash192));
2694 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002695 if (hash256 && rand256)
2696 data->present = 0x03;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002697 } else {
2698 memset(data->hash192, 0, sizeof(data->hash192));
2699 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002700 if (hash256 && rand256)
2701 data->present = 0x02;
2702 else
2703 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002704 }
2705
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002706 if (hash256 && rand256) {
2707 memcpy(data->hash256, hash256, sizeof(data->hash256));
2708 memcpy(data->rand256, rand256, sizeof(data->rand256));
2709 } else {
2710 memset(data->hash256, 0, sizeof(data->hash256));
2711 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002712 if (hash192 && rand192)
2713 data->present = 0x01;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002714 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002715
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002716 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002717
2718 return 0;
2719}
2720
Florian Grandeld2609b32015-06-18 03:16:34 +02002721/* This function requires the caller holds hdev->lock */
2722struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2723{
2724 struct adv_info *adv_instance;
2725
2726 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2727 if (adv_instance->instance == instance)
2728 return adv_instance;
2729 }
2730
2731 return NULL;
2732}
2733
2734/* This function requires the caller holds hdev->lock */
2735struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2736 struct adv_info *cur_instance;
2737
2738 cur_instance = hci_find_adv_instance(hdev, instance);
2739 if (!cur_instance)
2740 return NULL;
2741
2742 if (cur_instance == list_last_entry(&hdev->adv_instances,
2743 struct adv_info, list))
2744 return list_first_entry(&hdev->adv_instances,
2745 struct adv_info, list);
2746 else
2747 return list_next_entry(cur_instance, list);
2748}
2749
2750/* This function requires the caller holds hdev->lock */
2751int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2752{
2753 struct adv_info *adv_instance;
2754
2755 adv_instance = hci_find_adv_instance(hdev, instance);
2756 if (!adv_instance)
2757 return -ENOENT;
2758
2759 BT_DBG("%s removing %dMR", hdev->name, instance);
2760
Florian Grandel5d900e42015-06-18 03:16:35 +02002761 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2762 cancel_delayed_work(&hdev->adv_instance_expire);
2763 hdev->adv_instance_timeout = 0;
2764 }
2765
Florian Grandeld2609b32015-06-18 03:16:34 +02002766 list_del(&adv_instance->list);
2767 kfree(adv_instance);
2768
2769 hdev->adv_instance_cnt--;
2770
2771 return 0;
2772}
2773
2774/* This function requires the caller holds hdev->lock */
2775void hci_adv_instances_clear(struct hci_dev *hdev)
2776{
2777 struct adv_info *adv_instance, *n;
2778
Florian Grandel5d900e42015-06-18 03:16:35 +02002779 if (hdev->adv_instance_timeout) {
2780 cancel_delayed_work(&hdev->adv_instance_expire);
2781 hdev->adv_instance_timeout = 0;
2782 }
2783
Florian Grandeld2609b32015-06-18 03:16:34 +02002784 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2785 list_del(&adv_instance->list);
2786 kfree(adv_instance);
2787 }
2788
2789 hdev->adv_instance_cnt = 0;
2790}
2791
2792/* This function requires the caller holds hdev->lock */
2793int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2794 u16 adv_data_len, u8 *adv_data,
2795 u16 scan_rsp_len, u8 *scan_rsp_data,
2796 u16 timeout, u16 duration)
2797{
2798 struct adv_info *adv_instance;
2799
2800 adv_instance = hci_find_adv_instance(hdev, instance);
2801 if (adv_instance) {
2802 memset(adv_instance->adv_data, 0,
2803 sizeof(adv_instance->adv_data));
2804 memset(adv_instance->scan_rsp_data, 0,
2805 sizeof(adv_instance->scan_rsp_data));
2806 } else {
2807 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2808 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2809 return -EOVERFLOW;
2810
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002811 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002812 if (!adv_instance)
2813 return -ENOMEM;
2814
Florian Grandelfffd38b2015-06-18 03:16:47 +02002815 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002816 adv_instance->instance = instance;
2817 list_add(&adv_instance->list, &hdev->adv_instances);
2818 hdev->adv_instance_cnt++;
2819 }
2820
2821 adv_instance->flags = flags;
2822 adv_instance->adv_data_len = adv_data_len;
2823 adv_instance->scan_rsp_len = scan_rsp_len;
2824
2825 if (adv_data_len)
2826 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2827
2828 if (scan_rsp_len)
2829 memcpy(adv_instance->scan_rsp_data,
2830 scan_rsp_data, scan_rsp_len);
2831
2832 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002833 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002834
2835 if (duration == 0)
2836 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2837 else
2838 adv_instance->duration = duration;
2839
2840 BT_DBG("%s for %dMR", hdev->name, instance);
2841
2842 return 0;
2843}
2844
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002845struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002846 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002847{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002848 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002849
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002850 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002851 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002852 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002853 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002854
2855 return NULL;
2856}
2857
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002858void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002859{
2860 struct list_head *p, *n;
2861
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002862 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002863 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002864
2865 list_del(p);
2866 kfree(b);
2867 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002868}
2869
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002870int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002871{
2872 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002873
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002874 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002875 return -EBADF;
2876
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002877 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002878 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002879
Johan Hedberg27f70f32014-07-21 10:50:06 +03002880 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002881 if (!entry)
2882 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002883
2884 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002885 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002886
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002887 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002888
2889 return 0;
2890}
2891
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002892int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002893{
2894 struct bdaddr_list *entry;
2895
Johan Hedberg35f74982014-02-18 17:14:32 +02002896 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002897 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002898 return 0;
2899 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002900
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002901 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002902 if (!entry)
2903 return -ENOENT;
2904
2905 list_del(&entry->list);
2906 kfree(entry);
2907
2908 return 0;
2909}
2910
Andre Guedes15819a72014-02-03 13:56:18 -03002911/* This function requires the caller holds hdev->lock */
2912struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2913 bdaddr_t *addr, u8 addr_type)
2914{
2915 struct hci_conn_params *params;
2916
2917 list_for_each_entry(params, &hdev->le_conn_params, list) {
2918 if (bacmp(&params->addr, addr) == 0 &&
2919 params->addr_type == addr_type) {
2920 return params;
2921 }
2922 }
2923
2924 return NULL;
2925}
2926
2927/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002928struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2929 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002930{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002931 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002932
Johan Hedberg501f8822014-07-04 12:37:26 +03002933 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002934 if (bacmp(&param->addr, addr) == 0 &&
2935 param->addr_type == addr_type)
2936 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002937 }
2938
2939 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002940}
2941
2942/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002943struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2944 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002945{
2946 struct hci_conn_params *params;
2947
2948 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002949 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002950 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002951
2952 params = kzalloc(sizeof(*params), GFP_KERNEL);
2953 if (!params) {
2954 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002955 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002956 }
2957
2958 bacpy(&params->addr, addr);
2959 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002960
2961 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002962 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002963
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002964 params->conn_min_interval = hdev->le_conn_min_interval;
2965 params->conn_max_interval = hdev->le_conn_max_interval;
2966 params->conn_latency = hdev->le_conn_latency;
2967 params->supervision_timeout = hdev->le_supv_timeout;
2968 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2969
2970 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2971
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002972 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002973}
2974
Johan Hedbergf6c63242014-08-15 21:06:59 +03002975static void hci_conn_params_free(struct hci_conn_params *params)
2976{
2977 if (params->conn) {
2978 hci_conn_drop(params->conn);
2979 hci_conn_put(params->conn);
2980 }
2981
2982 list_del(&params->action);
2983 list_del(&params->list);
2984 kfree(params);
2985}
2986
Andre Guedes15819a72014-02-03 13:56:18 -03002987/* This function requires the caller holds hdev->lock */
2988void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2989{
2990 struct hci_conn_params *params;
2991
2992 params = hci_conn_params_lookup(hdev, addr, addr_type);
2993 if (!params)
2994 return;
2995
Johan Hedbergf6c63242014-08-15 21:06:59 +03002996 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002997
Johan Hedberg95305ba2014-07-04 12:37:21 +03002998 hci_update_background_scan(hdev);
2999
Andre Guedes15819a72014-02-03 13:56:18 -03003000 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3001}
3002
3003/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003004void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003005{
3006 struct hci_conn_params *params, *tmp;
3007
3008 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003009 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3010 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003011
3012 /* If trying to estabilish one time connection to disabled
3013 * device, leave the params, but mark them as just once.
3014 */
3015 if (params->explicit_connect) {
3016 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3017 continue;
3018 }
3019
Andre Guedes15819a72014-02-03 13:56:18 -03003020 list_del(&params->list);
3021 kfree(params);
3022 }
3023
Johan Hedberg55af49a82014-07-02 17:37:26 +03003024 BT_DBG("All LE disabled connection parameters were removed");
3025}
3026
3027/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003028void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003029{
3030 struct hci_conn_params *params, *tmp;
3031
Johan Hedbergf6c63242014-08-15 21:06:59 +03003032 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3033 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003034
Johan Hedberga2f41a82014-07-04 12:37:19 +03003035 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003036
Andre Guedes15819a72014-02-03 13:56:18 -03003037 BT_DBG("All LE connection parameters were removed");
3038}
3039
Marcel Holtmann1904a852015-01-11 13:50:44 -08003040static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003041{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003042 if (status) {
3043 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003044
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003045 hci_dev_lock(hdev);
3046 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3047 hci_dev_unlock(hdev);
3048 return;
3049 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003050}
3051
Marcel Holtmann1904a852015-01-11 13:50:44 -08003052static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3053 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003054{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003055 /* General inquiry access code (GIAC) */
3056 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003057 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003058 int err;
3059
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003060 if (status) {
3061 BT_ERR("Failed to disable LE scanning: status %d", status);
3062 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003063 }
3064
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003065 hdev->discovery.scan_start = 0;
3066
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003067 switch (hdev->discovery.type) {
3068 case DISCOV_TYPE_LE:
3069 hci_dev_lock(hdev);
3070 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3071 hci_dev_unlock(hdev);
3072 break;
3073
3074 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003075 hci_dev_lock(hdev);
3076
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003077 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3078 &hdev->quirks)) {
3079 /* If we were running LE only scan, change discovery
3080 * state. If we were running both LE and BR/EDR inquiry
3081 * simultaneously, and BR/EDR inquiry is already
3082 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003083 * will stop discovery when finished. If we will resolve
3084 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003085 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003086 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3087 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003088 hci_discovery_set_state(hdev,
3089 DISCOVERY_STOPPED);
3090 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003091 struct hci_request req;
3092
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003093 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003094
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003095 hci_req_init(&req, hdev);
3096
3097 memset(&cp, 0, sizeof(cp));
3098 memcpy(&cp.lap, lap, sizeof(cp.lap));
3099 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3100 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3101
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003102 err = hci_req_run(&req, inquiry_complete);
3103 if (err) {
3104 BT_ERR("Inquiry request failed: err %d", err);
3105 hci_discovery_set_state(hdev,
3106 DISCOVERY_STOPPED);
3107 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003108 }
3109
3110 hci_dev_unlock(hdev);
3111 break;
3112 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003113}
3114
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003115static void le_scan_disable_work(struct work_struct *work)
3116{
3117 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003118 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003119 struct hci_request req;
3120 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003121
3122 BT_DBG("%s", hdev->name);
3123
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003124 cancel_delayed_work_sync(&hdev->le_scan_restart);
3125
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003126 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003127
Andre Guedesb1efcc22014-02-26 20:21:40 -03003128 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003129
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003130 err = hci_req_run(&req, le_scan_disable_work_complete);
3131 if (err)
3132 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003133}
3134
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003135static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3136 u16 opcode)
3137{
3138 unsigned long timeout, duration, scan_start, now;
3139
3140 BT_DBG("%s", hdev->name);
3141
3142 if (status) {
3143 BT_ERR("Failed to restart LE scan: status %d", status);
3144 return;
3145 }
3146
3147 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3148 !hdev->discovery.scan_start)
3149 return;
3150
3151 /* When the scan was started, hdev->le_scan_disable has been queued
3152 * after duration from scan_start. During scan restart this job
3153 * has been canceled, and we need to queue it again after proper
3154 * timeout, to make sure that scan does not run indefinitely.
3155 */
3156 duration = hdev->discovery.scan_duration;
3157 scan_start = hdev->discovery.scan_start;
3158 now = jiffies;
3159 if (now - scan_start <= duration) {
3160 int elapsed;
3161
3162 if (now >= scan_start)
3163 elapsed = now - scan_start;
3164 else
3165 elapsed = ULONG_MAX - scan_start + now;
3166
3167 timeout = duration - elapsed;
3168 } else {
3169 timeout = 0;
3170 }
3171 queue_delayed_work(hdev->workqueue,
3172 &hdev->le_scan_disable, timeout);
3173}
3174
3175static void le_scan_restart_work(struct work_struct *work)
3176{
3177 struct hci_dev *hdev = container_of(work, struct hci_dev,
3178 le_scan_restart.work);
3179 struct hci_request req;
3180 struct hci_cp_le_set_scan_enable cp;
3181 int err;
3182
3183 BT_DBG("%s", hdev->name);
3184
3185 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003186 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003187 return;
3188
3189 hci_req_init(&req, hdev);
3190
3191 hci_req_add_le_scan_disable(&req);
3192
3193 memset(&cp, 0, sizeof(cp));
3194 cp.enable = LE_SCAN_ENABLE;
3195 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3196 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3197
3198 err = hci_req_run(&req, le_scan_restart_work_complete);
3199 if (err)
3200 BT_ERR("Restart LE scan request failed: err %d", err);
3201}
3202
Johan Hedberga1f4c312014-02-27 14:05:41 +02003203/* Copy the Identity Address of the controller.
3204 *
3205 * If the controller has a public BD_ADDR, then by default use that one.
3206 * If this is a LE only controller without a public address, default to
3207 * the static random address.
3208 *
3209 * For debugging purposes it is possible to force controllers with a
3210 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003211 *
3212 * In case BR/EDR has been disabled on a dual-mode controller and
3213 * userspace has configured a static address, then that address
3214 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003215 */
3216void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *bdaddr_type)
3218{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003219 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003220 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003221 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003222 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003223 bacpy(bdaddr, &hdev->static_addr);
3224 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3225 } else {
3226 bacpy(bdaddr, &hdev->bdaddr);
3227 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3228 }
3229}
3230
David Herrmann9be0dab2012-04-22 14:39:57 +02003231/* Alloc HCI device */
3232struct hci_dev *hci_alloc_dev(void)
3233{
3234 struct hci_dev *hdev;
3235
Johan Hedberg27f70f32014-07-21 10:50:06 +03003236 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003237 if (!hdev)
3238 return NULL;
3239
David Herrmannb1b813d2012-04-22 14:39:58 +02003240 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3241 hdev->esco_type = (ESCO_HV1);
3242 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003243 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3244 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003245 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003246 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3247 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003248 hdev->adv_instance_cnt = 0;
3249 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003250 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003251
David Herrmannb1b813d2012-04-22 14:39:58 +02003252 hdev->sniff_max_interval = 800;
3253 hdev->sniff_min_interval = 80;
3254
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003255 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003256 hdev->le_adv_min_interval = 0x0800;
3257 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003258 hdev->le_scan_interval = 0x0060;
3259 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003260 hdev->le_conn_min_interval = 0x0028;
3261 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003262 hdev->le_conn_latency = 0x0000;
3263 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003264 hdev->le_def_tx_len = 0x001b;
3265 hdev->le_def_tx_time = 0x0148;
3266 hdev->le_max_tx_len = 0x001b;
3267 hdev->le_max_tx_time = 0x0148;
3268 hdev->le_max_rx_len = 0x001b;
3269 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003270
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003271 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003272 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003273 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3274 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003275
David Herrmannb1b813d2012-04-22 14:39:58 +02003276 mutex_init(&hdev->lock);
3277 mutex_init(&hdev->req_lock);
3278
3279 INIT_LIST_HEAD(&hdev->mgmt_pending);
3280 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003281 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003282 INIT_LIST_HEAD(&hdev->uuids);
3283 INIT_LIST_HEAD(&hdev->link_keys);
3284 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003285 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003286 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003287 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003288 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003289 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003290 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003291 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003292 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003293
3294 INIT_WORK(&hdev->rx_work, hci_rx_work);
3295 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3296 INIT_WORK(&hdev->tx_work, hci_tx_work);
3297 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003298 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003299
David Herrmannb1b813d2012-04-22 14:39:58 +02003300 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3301 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3302 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003303 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003304 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003305
David Herrmannb1b813d2012-04-22 14:39:58 +02003306 skb_queue_head_init(&hdev->rx_q);
3307 skb_queue_head_init(&hdev->cmd_q);
3308 skb_queue_head_init(&hdev->raw_q);
3309
3310 init_waitqueue_head(&hdev->req_wait_q);
3311
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003312 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003313
David Herrmannb1b813d2012-04-22 14:39:58 +02003314 hci_init_sysfs(hdev);
3315 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003316
3317 return hdev;
3318}
3319EXPORT_SYMBOL(hci_alloc_dev);
3320
3321/* Free HCI device */
3322void hci_free_dev(struct hci_dev *hdev)
3323{
David Herrmann9be0dab2012-04-22 14:39:57 +02003324 /* will free via device release */
3325 put_device(&hdev->dev);
3326}
3327EXPORT_SYMBOL(hci_free_dev);
3328
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329/* Register HCI device */
3330int hci_register_dev(struct hci_dev *hdev)
3331{
David Herrmannb1b813d2012-04-22 14:39:58 +02003332 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333
Marcel Holtmann74292d52014-07-06 15:50:27 +02003334 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 return -EINVAL;
3336
Mat Martineau08add512011-11-02 16:18:36 -07003337 /* Do not allow HCI_AMP devices to register at index 0,
3338 * so the index can be used as the AMP controller ID.
3339 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003340 switch (hdev->dev_type) {
3341 case HCI_BREDR:
3342 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3343 break;
3344 case HCI_AMP:
3345 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3346 break;
3347 default:
3348 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003350
Sasha Levin3df92b32012-05-27 22:36:56 +02003351 if (id < 0)
3352 return id;
3353
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 sprintf(hdev->name, "hci%d", id);
3355 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003356
3357 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3358
Kees Cookd8537542013-07-03 15:04:57 -07003359 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3360 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003361 if (!hdev->workqueue) {
3362 error = -ENOMEM;
3363 goto err;
3364 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003365
Kees Cookd8537542013-07-03 15:04:57 -07003366 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3367 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003368 if (!hdev->req_workqueue) {
3369 destroy_workqueue(hdev->workqueue);
3370 error = -ENOMEM;
3371 goto err;
3372 }
3373
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003374 if (!IS_ERR_OR_NULL(bt_debugfs))
3375 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3376
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003377 dev_set_name(&hdev->dev, "%s", hdev->name);
3378
3379 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003380 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003381 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003383 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003384 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3385 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003386 if (hdev->rfkill) {
3387 if (rfkill_register(hdev->rfkill) < 0) {
3388 rfkill_destroy(hdev->rfkill);
3389 hdev->rfkill = NULL;
3390 }
3391 }
3392
Johan Hedberg5e130362013-09-13 08:58:17 +03003393 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003394 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003395
Guodong Xu90027962015-05-08 13:55:08 +08003396 bluetooth_led_names(hdev);
3397 bluetooth_led_init(hdev);
3398
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003399 hci_dev_set_flag(hdev, HCI_SETUP);
3400 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003401
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003402 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003403 /* Assume BR/EDR support until proven otherwise (such as
3404 * through reading supported features during init.
3405 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003406 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003407 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003408
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003409 write_lock(&hci_dev_list_lock);
3410 list_add(&hdev->list, &hci_dev_list);
3411 write_unlock(&hci_dev_list_lock);
3412
Marcel Holtmann4a964402014-07-02 19:10:33 +02003413 /* Devices that are marked for raw-only usage are unconfigured
3414 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003415 */
3416 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003417 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003418
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003419 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003420 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
Johan Hedberg19202572013-01-14 22:33:51 +02003422 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003423
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003425
David Herrmann33ca9542011-10-08 14:58:49 +02003426err_wqueue:
3427 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003428 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003429err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003430 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003431
David Herrmann33ca9542011-10-08 14:58:49 +02003432 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433}
3434EXPORT_SYMBOL(hci_register_dev);
3435
3436/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003437void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003439 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003440
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003441 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003443 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003444
Sasha Levin3df92b32012-05-27 22:36:56 +02003445 id = hdev->id;
3446
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003447 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003449 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450
3451 hci_dev_do_close(hdev);
3452
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003453 cancel_work_sync(&hdev->power_on);
3454
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003455 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003456 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3457 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003458 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003459 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003460 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003461 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003462
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003463 /* mgmt_index_removed should take care of emptying the
3464 * pending list */
3465 BUG_ON(!list_empty(&hdev->mgmt_pending));
3466
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003467 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468
Guodong Xu90027962015-05-08 13:55:08 +08003469 bluetooth_led_exit(hdev);
3470
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003471 if (hdev->rfkill) {
3472 rfkill_unregister(hdev->rfkill);
3473 rfkill_destroy(hdev->rfkill);
3474 }
3475
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003476 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003477
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003478 debugfs_remove_recursive(hdev->debugfs);
3479
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003480 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003481 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003482
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003483 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003484 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003485 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003486 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003487 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003488 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003489 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003490 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003491 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003492 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003493 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003494 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003495 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003496
David Herrmanndc946bd2012-01-07 15:47:24 +01003497 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003498
3499 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500}
3501EXPORT_SYMBOL(hci_unregister_dev);
3502
3503/* Suspend HCI device */
3504int hci_suspend_dev(struct hci_dev *hdev)
3505{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003506 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 return 0;
3508}
3509EXPORT_SYMBOL(hci_suspend_dev);
3510
3511/* Resume HCI device */
3512int hci_resume_dev(struct hci_dev *hdev)
3513{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003514 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 return 0;
3516}
3517EXPORT_SYMBOL(hci_resume_dev);
3518
Marcel Holtmann75e05692014-11-02 08:15:38 +01003519/* Reset HCI device */
3520int hci_reset_dev(struct hci_dev *hdev)
3521{
3522 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3523 struct sk_buff *skb;
3524
3525 skb = bt_skb_alloc(3, GFP_ATOMIC);
3526 if (!skb)
3527 return -ENOMEM;
3528
3529 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3530 memcpy(skb_put(skb, 3), hw_err, 3);
3531
3532 /* Send Hardware Error to upper stack */
3533 return hci_recv_frame(hdev, skb);
3534}
3535EXPORT_SYMBOL(hci_reset_dev);
3536
Marcel Holtmann76bca882009-11-18 00:40:39 +01003537/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003538int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003539{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003540 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003541 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003542 kfree_skb(skb);
3543 return -ENXIO;
3544 }
3545
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003546 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3547 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3548 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3549 kfree_skb(skb);
3550 return -EINVAL;
3551 }
3552
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003553 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003554 bt_cb(skb)->incoming = 1;
3555
3556 /* Time stamp */
3557 __net_timestamp(skb);
3558
Marcel Holtmann76bca882009-11-18 00:40:39 +01003559 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003560 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003561
Guodong Xu90027962015-05-08 13:55:08 +08003562 bluetooth_led_rx(hdev);
3563
Marcel Holtmann76bca882009-11-18 00:40:39 +01003564 return 0;
3565}
3566EXPORT_SYMBOL(hci_recv_frame);
3567
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003568/* Receive diagnostic message from HCI drivers */
3569int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3570{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003571 /* Mark as diagnostic packet */
3572 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3573
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003574 /* Time stamp */
3575 __net_timestamp(skb);
3576
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003577 skb_queue_tail(&hdev->rx_q, skb);
3578 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003579
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003580 return 0;
3581}
3582EXPORT_SYMBOL(hci_recv_diag);
3583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584/* ---- Interface to upper protocols ---- */
3585
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586int hci_register_cb(struct hci_cb *cb)
3587{
3588 BT_DBG("%p name %s", cb, cb->name);
3589
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003590 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003591 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003592 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
3594 return 0;
3595}
3596EXPORT_SYMBOL(hci_register_cb);
3597
3598int hci_unregister_cb(struct hci_cb *cb)
3599{
3600 BT_DBG("%p name %s", cb, cb->name);
3601
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003602 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003604 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605
3606 return 0;
3607}
3608EXPORT_SYMBOL(hci_unregister_cb);
3609
Marcel Holtmann51086992013-10-10 14:54:19 -07003610static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003612 int err;
3613
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003614 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003616 /* Time stamp */
3617 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003619 /* Send copy to monitor */
3620 hci_send_to_monitor(hdev, skb);
3621
3622 if (atomic_read(&hdev->promisc)) {
3623 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003624 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 }
3626
3627 /* Get rid of skb owner, prior to sending to the driver. */
3628 skb_orphan(skb);
3629
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003630 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3631 kfree_skb(skb);
3632 return;
3633 }
3634
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003635 err = hdev->send(hdev, skb);
3636 if (err < 0) {
3637 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3638 kfree_skb(skb);
3639 }
Guodong Xu90027962015-05-08 13:55:08 +08003640
3641 bluetooth_led_tx(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642}
3643
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003644/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003645int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3646 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003647{
3648 struct sk_buff *skb;
3649
3650 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3651
3652 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3653 if (!skb) {
3654 BT_ERR("%s no memory for command", hdev->name);
3655 return -ENOMEM;
3656 }
3657
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003658 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003659 * single-command requests.
3660 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01003661 bt_cb(skb)->hci.req_start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003664 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665
3666 return 0;
3667}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668
3669/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003670void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671{
3672 struct hci_command_hdr *hdr;
3673
3674 if (!hdev->sent_cmd)
3675 return NULL;
3676
3677 hdr = (void *) hdev->sent_cmd->data;
3678
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003679 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 return NULL;
3681
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003682 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
3684 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3685}
3686
Loic Poulainfbef1682015-09-29 15:05:44 +02003687/* Send HCI command and wait for command commplete event */
3688struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3689 const void *param, u32 timeout)
3690{
3691 struct sk_buff *skb;
3692
3693 if (!test_bit(HCI_UP, &hdev->flags))
3694 return ERR_PTR(-ENETDOWN);
3695
3696 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3697
3698 hci_req_lock(hdev);
3699 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3700 hci_req_unlock(hdev);
3701
3702 return skb;
3703}
3704EXPORT_SYMBOL(hci_cmd_sync);
3705
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706/* Send ACL data */
3707static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3708{
3709 struct hci_acl_hdr *hdr;
3710 int len = skb->len;
3711
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003712 skb_push(skb, HCI_ACL_HDR_SIZE);
3713 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003714 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003715 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3716 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717}
3718
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003719static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003720 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003722 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 struct hci_dev *hdev = conn->hdev;
3724 struct sk_buff *list;
3725
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003726 skb->len = skb_headlen(skb);
3727 skb->data_len = 0;
3728
3729 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003730
3731 switch (hdev->dev_type) {
3732 case HCI_BREDR:
3733 hci_add_acl_hdr(skb, conn->handle, flags);
3734 break;
3735 case HCI_AMP:
3736 hci_add_acl_hdr(skb, chan->handle, flags);
3737 break;
3738 default:
3739 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3740 return;
3741 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003743 list = skb_shinfo(skb)->frag_list;
3744 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 /* Non fragmented */
3746 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3747
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003748 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 } else {
3750 /* Fragmented */
3751 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3752
3753 skb_shinfo(skb)->frag_list = NULL;
3754
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003755 /* Queue all fragments atomically. We need to use spin_lock_bh
3756 * here because of 6LoWPAN links, as there this function is
3757 * called from softirq and using normal spin lock could cause
3758 * deadlocks.
3759 */
3760 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003762 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003763
3764 flags &= ~ACL_START;
3765 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 do {
3767 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003768
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003769 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003770 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771
3772 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3773
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003774 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 } while (list);
3776
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003777 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003779}
3780
3781void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3782{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003783 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003784
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003785 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003786
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003787 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003789 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
3792/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003793void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794{
3795 struct hci_dev *hdev = conn->hdev;
3796 struct hci_sco_hdr hdr;
3797
3798 BT_DBG("%s len %d", hdev->name, skb->len);
3799
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003800 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 hdr.dlen = skb->len;
3802
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003803 skb_push(skb, HCI_SCO_HDR_SIZE);
3804 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003805 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003807 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003808
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003810 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812
3813/* ---- HCI TX task (outgoing data) ---- */
3814
3815/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003816static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3817 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818{
3819 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003820 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003821 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003823 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003825
3826 rcu_read_lock();
3827
3828 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003829 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003831
3832 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3833 continue;
3834
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 num++;
3836
3837 if (c->sent < min) {
3838 min = c->sent;
3839 conn = c;
3840 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003841
3842 if (hci_conn_num(hdev, type) == num)
3843 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 }
3845
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003846 rcu_read_unlock();
3847
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003849 int cnt, q;
3850
3851 switch (conn->type) {
3852 case ACL_LINK:
3853 cnt = hdev->acl_cnt;
3854 break;
3855 case SCO_LINK:
3856 case ESCO_LINK:
3857 cnt = hdev->sco_cnt;
3858 break;
3859 case LE_LINK:
3860 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3861 break;
3862 default:
3863 cnt = 0;
3864 BT_ERR("Unknown link type");
3865 }
3866
3867 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868 *quote = q ? q : 1;
3869 } else
3870 *quote = 0;
3871
3872 BT_DBG("conn %p quote %d", conn, *quote);
3873 return conn;
3874}
3875
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003876static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877{
3878 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003879 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880
Ville Tervobae1f5d92011-02-10 22:38:53 -03003881 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003883 rcu_read_lock();
3884
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003886 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003887 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003888 BT_ERR("%s killing stalled connection %pMR",
3889 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003890 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 }
3892 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003893
3894 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895}
3896
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003897static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3898 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003899{
3900 struct hci_conn_hash *h = &hdev->conn_hash;
3901 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003902 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003903 struct hci_conn *conn;
3904 int cnt, q, conn_num = 0;
3905
3906 BT_DBG("%s", hdev->name);
3907
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003908 rcu_read_lock();
3909
3910 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003911 struct hci_chan *tmp;
3912
3913 if (conn->type != type)
3914 continue;
3915
3916 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3917 continue;
3918
3919 conn_num++;
3920
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003921 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003922 struct sk_buff *skb;
3923
3924 if (skb_queue_empty(&tmp->data_q))
3925 continue;
3926
3927 skb = skb_peek(&tmp->data_q);
3928 if (skb->priority < cur_prio)
3929 continue;
3930
3931 if (skb->priority > cur_prio) {
3932 num = 0;
3933 min = ~0;
3934 cur_prio = skb->priority;
3935 }
3936
3937 num++;
3938
3939 if (conn->sent < min) {
3940 min = conn->sent;
3941 chan = tmp;
3942 }
3943 }
3944
3945 if (hci_conn_num(hdev, type) == conn_num)
3946 break;
3947 }
3948
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003949 rcu_read_unlock();
3950
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003951 if (!chan)
3952 return NULL;
3953
3954 switch (chan->conn->type) {
3955 case ACL_LINK:
3956 cnt = hdev->acl_cnt;
3957 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003958 case AMP_LINK:
3959 cnt = hdev->block_cnt;
3960 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003961 case SCO_LINK:
3962 case ESCO_LINK:
3963 cnt = hdev->sco_cnt;
3964 break;
3965 case LE_LINK:
3966 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3967 break;
3968 default:
3969 cnt = 0;
3970 BT_ERR("Unknown link type");
3971 }
3972
3973 q = cnt / num;
3974 *quote = q ? q : 1;
3975 BT_DBG("chan %p quote %d", chan, *quote);
3976 return chan;
3977}
3978
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003979static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3980{
3981 struct hci_conn_hash *h = &hdev->conn_hash;
3982 struct hci_conn *conn;
3983 int num = 0;
3984
3985 BT_DBG("%s", hdev->name);
3986
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003987 rcu_read_lock();
3988
3989 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003990 struct hci_chan *chan;
3991
3992 if (conn->type != type)
3993 continue;
3994
3995 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3996 continue;
3997
3998 num++;
3999
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004000 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004001 struct sk_buff *skb;
4002
4003 if (chan->sent) {
4004 chan->sent = 0;
4005 continue;
4006 }
4007
4008 if (skb_queue_empty(&chan->data_q))
4009 continue;
4010
4011 skb = skb_peek(&chan->data_q);
4012 if (skb->priority >= HCI_PRIO_MAX - 1)
4013 continue;
4014
4015 skb->priority = HCI_PRIO_MAX - 1;
4016
4017 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004018 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004019 }
4020
4021 if (hci_conn_num(hdev, type) == num)
4022 break;
4023 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004024
4025 rcu_read_unlock();
4026
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004027}
4028
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004029static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4030{
4031 /* Calculate count of blocks used by this packet */
4032 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4033}
4034
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004035static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004037 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 /* ACL tx timeout must be longer than maximum
4039 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004040 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004041 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004042 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004044}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004046static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004047{
4048 unsigned int cnt = hdev->acl_cnt;
4049 struct hci_chan *chan;
4050 struct sk_buff *skb;
4051 int quote;
4052
4053 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004054
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004055 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004056 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004057 u32 priority = (skb_peek(&chan->data_q))->priority;
4058 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004059 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004060 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004061
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004062 /* Stop if priority has changed */
4063 if (skb->priority < priority)
4064 break;
4065
4066 skb = skb_dequeue(&chan->data_q);
4067
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004068 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004069 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004070
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004071 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 hdev->acl_last_tx = jiffies;
4073
4074 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004075 chan->sent++;
4076 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 }
4078 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004079
4080 if (cnt != hdev->acl_cnt)
4081 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082}
4083
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004084static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004085{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004086 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004087 struct hci_chan *chan;
4088 struct sk_buff *skb;
4089 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004090 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004091
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004092 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004093
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004094 BT_DBG("%s", hdev->name);
4095
4096 if (hdev->dev_type == HCI_AMP)
4097 type = AMP_LINK;
4098 else
4099 type = ACL_LINK;
4100
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004101 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004102 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004103 u32 priority = (skb_peek(&chan->data_q))->priority;
4104 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4105 int blocks;
4106
4107 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004108 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004109
4110 /* Stop if priority has changed */
4111 if (skb->priority < priority)
4112 break;
4113
4114 skb = skb_dequeue(&chan->data_q);
4115
4116 blocks = __get_blocks(hdev, skb);
4117 if (blocks > hdev->block_cnt)
4118 return;
4119
4120 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004121 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004122
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004123 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004124 hdev->acl_last_tx = jiffies;
4125
4126 hdev->block_cnt -= blocks;
4127 quote -= blocks;
4128
4129 chan->sent += blocks;
4130 chan->conn->sent += blocks;
4131 }
4132 }
4133
4134 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004135 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004136}
4137
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004138static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004139{
4140 BT_DBG("%s", hdev->name);
4141
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004142 /* No ACL link over BR/EDR controller */
4143 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4144 return;
4145
4146 /* No AMP link over AMP controller */
4147 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004148 return;
4149
4150 switch (hdev->flow_ctl_mode) {
4151 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4152 hci_sched_acl_pkt(hdev);
4153 break;
4154
4155 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4156 hci_sched_acl_blk(hdev);
4157 break;
4158 }
4159}
4160
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004162static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163{
4164 struct hci_conn *conn;
4165 struct sk_buff *skb;
4166 int quote;
4167
4168 BT_DBG("%s", hdev->name);
4169
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004170 if (!hci_conn_num(hdev, SCO_LINK))
4171 return;
4172
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4174 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4175 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004176 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177
4178 conn->sent++;
4179 if (conn->sent == ~0)
4180 conn->sent = 0;
4181 }
4182 }
4183}
4184
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004185static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004186{
4187 struct hci_conn *conn;
4188 struct sk_buff *skb;
4189 int quote;
4190
4191 BT_DBG("%s", hdev->name);
4192
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004193 if (!hci_conn_num(hdev, ESCO_LINK))
4194 return;
4195
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004196 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4197 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004198 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4199 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004200 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004201
4202 conn->sent++;
4203 if (conn->sent == ~0)
4204 conn->sent = 0;
4205 }
4206 }
4207}
4208
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004209static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004210{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004211 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004212 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004213 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004214
4215 BT_DBG("%s", hdev->name);
4216
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004217 if (!hci_conn_num(hdev, LE_LINK))
4218 return;
4219
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004220 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004221 /* LE tx timeout must be longer than maximum
4222 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004223 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004224 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004225 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004226 }
4227
4228 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004229 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004230 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004231 u32 priority = (skb_peek(&chan->data_q))->priority;
4232 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004233 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004234 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004235
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004236 /* Stop if priority has changed */
4237 if (skb->priority < priority)
4238 break;
4239
4240 skb = skb_dequeue(&chan->data_q);
4241
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004242 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004243 hdev->le_last_tx = jiffies;
4244
4245 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004246 chan->sent++;
4247 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004248 }
4249 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004250
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004251 if (hdev->le_pkts)
4252 hdev->le_cnt = cnt;
4253 else
4254 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004255
4256 if (cnt != tmp)
4257 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004258}
4259
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004260static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004262 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 struct sk_buff *skb;
4264
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004265 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004266 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004268 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004269 /* Schedule queues and send stuff to HCI driver */
4270 hci_sched_acl(hdev);
4271 hci_sched_sco(hdev);
4272 hci_sched_esco(hdev);
4273 hci_sched_le(hdev);
4274 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004275
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 /* Send next queued raw (unknown type) packet */
4277 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004278 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279}
4280
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004281/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282
4283/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004284static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285{
4286 struct hci_acl_hdr *hdr = (void *) skb->data;
4287 struct hci_conn *conn;
4288 __u16 handle, flags;
4289
4290 skb_pull(skb, HCI_ACL_HDR_SIZE);
4291
4292 handle = __le16_to_cpu(hdr->handle);
4293 flags = hci_flags(handle);
4294 handle = hci_handle(handle);
4295
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004296 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004297 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298
4299 hdev->stat.acl_rx++;
4300
4301 hci_dev_lock(hdev);
4302 conn = hci_conn_hash_lookup_handle(hdev, handle);
4303 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004304
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004306 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004307
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004309 l2cap_recv_acldata(conn, skb, flags);
4310 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004312 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004313 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 }
4315
4316 kfree_skb(skb);
4317}
4318
4319/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004320static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321{
4322 struct hci_sco_hdr *hdr = (void *) skb->data;
4323 struct hci_conn *conn;
4324 __u16 handle;
4325
4326 skb_pull(skb, HCI_SCO_HDR_SIZE);
4327
4328 handle = __le16_to_cpu(hdr->handle);
4329
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004330 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331
4332 hdev->stat.sco_rx++;
4333
4334 hci_dev_lock(hdev);
4335 conn = hci_conn_hash_lookup_handle(hdev, handle);
4336 hci_dev_unlock(hdev);
4337
4338 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004340 sco_recv_scodata(conn, skb);
4341 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004343 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004344 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 }
4346
4347 kfree_skb(skb);
4348}
4349
Johan Hedberg9238f362013-03-05 20:37:48 +02004350static bool hci_req_is_complete(struct hci_dev *hdev)
4351{
4352 struct sk_buff *skb;
4353
4354 skb = skb_peek(&hdev->cmd_q);
4355 if (!skb)
4356 return true;
4357
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004358 return bt_cb(skb)->hci.req_start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004359}
4360
Johan Hedberg42c6b122013-03-05 20:37:49 +02004361static void hci_resend_last(struct hci_dev *hdev)
4362{
4363 struct hci_command_hdr *sent;
4364 struct sk_buff *skb;
4365 u16 opcode;
4366
4367 if (!hdev->sent_cmd)
4368 return;
4369
4370 sent = (void *) hdev->sent_cmd->data;
4371 opcode = __le16_to_cpu(sent->opcode);
4372 if (opcode == HCI_OP_RESET)
4373 return;
4374
4375 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4376 if (!skb)
4377 return;
4378
4379 skb_queue_head(&hdev->cmd_q, skb);
4380 queue_work(hdev->workqueue, &hdev->cmd_work);
4381}
4382
Johan Hedberge62144872015-04-02 13:41:08 +03004383void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4384 hci_req_complete_t *req_complete,
4385 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004386{
Johan Hedberg9238f362013-03-05 20:37:48 +02004387 struct sk_buff *skb;
4388 unsigned long flags;
4389
4390 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4391
Johan Hedberg42c6b122013-03-05 20:37:49 +02004392 /* If the completed command doesn't match the last one that was
4393 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004394 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004395 if (!hci_sent_cmd_data(hdev, opcode)) {
4396 /* Some CSR based controllers generate a spontaneous
4397 * reset complete event during init and any pending
4398 * command will never be completed. In such a case we
4399 * need to resend whatever was the last sent
4400 * command.
4401 */
4402 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4403 hci_resend_last(hdev);
4404
Johan Hedberg9238f362013-03-05 20:37:48 +02004405 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004406 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004407
4408 /* If the command succeeded and there's still more commands in
4409 * this request the request is not yet complete.
4410 */
4411 if (!status && !hci_req_is_complete(hdev))
4412 return;
4413
4414 /* If this was the last command in a request the complete
4415 * callback would be found in hdev->sent_cmd instead of the
4416 * command queue (hdev->cmd_q).
4417 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004418 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4419 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004420 return;
4421 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004422
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004423 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4424 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004425 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004426 }
4427
4428 /* Remove all pending commands belonging to this request */
4429 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4430 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004431 if (bt_cb(skb)->hci.req_start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004432 __skb_queue_head(&hdev->cmd_q, skb);
4433 break;
4434 }
4435
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004436 *req_complete = bt_cb(skb)->hci.req_complete;
4437 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004438 kfree_skb(skb);
4439 }
4440 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004441}
4442
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004443static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004445 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 struct sk_buff *skb;
4447
4448 BT_DBG("%s", hdev->name);
4449
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004451 /* Send copy to monitor */
4452 hci_send_to_monitor(hdev, skb);
4453
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 if (atomic_read(&hdev->promisc)) {
4455 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004456 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 }
4458
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004459 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 kfree_skb(skb);
4461 continue;
4462 }
4463
4464 if (test_bit(HCI_INIT, &hdev->flags)) {
4465 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004466 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 case HCI_ACLDATA_PKT:
4468 case HCI_SCODATA_PKT:
4469 kfree_skb(skb);
4470 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 }
4473
4474 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004475 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004477 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 hci_event_packet(hdev, skb);
4479 break;
4480
4481 case HCI_ACLDATA_PKT:
4482 BT_DBG("%s ACL data packet", hdev->name);
4483 hci_acldata_packet(hdev, skb);
4484 break;
4485
4486 case HCI_SCODATA_PKT:
4487 BT_DBG("%s SCO data packet", hdev->name);
4488 hci_scodata_packet(hdev, skb);
4489 break;
4490
4491 default:
4492 kfree_skb(skb);
4493 break;
4494 }
4495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496}
4497
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004498static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004500 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 struct sk_buff *skb;
4502
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004503 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4504 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004507 if (atomic_read(&hdev->cmd_cnt)) {
4508 skb = skb_dequeue(&hdev->cmd_q);
4509 if (!skb)
4510 return;
4511
Wei Yongjun7585b972009-02-25 18:29:52 +08004512 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004514 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004515 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004517 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004518 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004519 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004520 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004521 schedule_delayed_work(&hdev->cmd_timer,
4522 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 } else {
4524 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004525 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 }
4527 }
4528}