blob: 5788e031b869c41472235da7a830ce7d3354164d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Marcel Holtmann47219832013-10-17 17:24:15 -0700194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700202
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700209
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
Marcel Holtmann041000b2013-10-17 12:02:31 -0700317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
Marcel Holtmann111902f2014-06-21 04:53:17 +0200362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800388 return -EALREADY;
389
Marcel Holtmann111902f2014-06-21 04:53:17 +0200390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700428 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700487 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700515 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
Marcel Holtmannac345812014-02-23 12:44:25 -0800591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200594 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
Johan Hedberga1f4c312014-02-27 14:05:41 +0200599 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800600
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800602 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700670{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700673
Marcel Holtmann111902f2014-06-21 04:53:17 +0200674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678}
679
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683{
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700697 return -EINVAL;
698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700701
Marcel Holtmann111902f2014-06-21 04:53:17 +0200702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800703
704 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700705}
706
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
Marcel Holtmann92202182013-10-18 16:38:10 -0700713
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Marcel Holtmann3698d702014-02-18 21:54:49 -0800739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800775 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800780 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700835 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
Marcel Holtmannf1649572014-06-30 12:34:38 +0200883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200939static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300940{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200941 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300945 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300947 p->auto_connect);
948 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200954static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300955{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200956 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300957}
958
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966/* ---- HCI requests ---- */
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
Fengguang Wu77a63e02013-04-20 16:24:31 +0300990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
Johan Hedberg75e84b72013-04-02 13:35:04 +03001021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001046 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001056 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001100 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 hci_req_init(&req, hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 hdev->req_status = HCI_REQ_PEND;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001126 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 */
Andre Guedes920c8302013-03-08 11:20:15 -03001133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001137 }
1138
Andre Guedesbc4445c2013-03-08 11:20:13 -03001139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Johan Hedberga5040ef2011-01-10 13:28:59 +02001163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 int ret;
1176
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Serialize all requests */
1181 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001182 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001204 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001214
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001215 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001217
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001224 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001226
1227 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001229
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001238{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 break;
1251
1252 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001264 struct hci_dev *hdev = req->hdev;
1265
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001292 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
Johan Hedberg2177bab2013-03-05 20:37:43 +02001317 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
1361 u8 mode;
1362
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366}
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 struct hci_dev *hdev = req->hdev;
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001443{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 struct hci_dev *hdev = req->hdev;
1445
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001450
1451 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001455
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
1462 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
1488 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504}
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 struct hci_cp_write_le_host_supported cp;
1529
Johan Hedbergc73eee92013-04-19 18:35:21 +03001530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544}
1545
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001554 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001564 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001580 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001581 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001595 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608
Andre Guedes9193c6e2014-07-01 18:10:09 -03001609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
1614 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1615 events);
1616
Johan Hedberg42c6b122013-03-05 20:37:49 +02001617 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001618 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001619
1620 /* Read features beyond page 1 if available */
1621 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1622 struct hci_cp_read_local_ext_features cp;
1623
1624 cp.page = p;
1625 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1626 sizeof(cp), &cp);
1627 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001628}
1629
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001630static void hci_init4_req(struct hci_request *req, unsigned long opt)
1631{
1632 struct hci_dev *hdev = req->hdev;
1633
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001634 /* Set event mask page 2 if the HCI command for it is supported */
1635 if (hdev->commands[22] & 0x04)
1636 hci_set_event_mask_page_2(req);
1637
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001638 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001639 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001640 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001641
1642 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001643 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001644 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001645 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1646 u8 support = 0x01;
1647 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1648 sizeof(support), &support);
1649 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001650}
1651
Johan Hedberg2177bab2013-03-05 20:37:43 +02001652static int __hci_init(struct hci_dev *hdev)
1653{
1654 int err;
1655
1656 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1657 if (err < 0)
1658 return err;
1659
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001660 /* The Device Under Test (DUT) mode is special and available for
1661 * all controller types. So just create it early on.
1662 */
1663 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1664 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1665 &dut_mode_fops);
1666 }
1667
Johan Hedberg2177bab2013-03-05 20:37:43 +02001668 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1669 * BR/EDR/LE type controllers. AMP controllers only need the
1670 * first stage init.
1671 */
1672 if (hdev->dev_type != HCI_BREDR)
1673 return 0;
1674
1675 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1676 if (err < 0)
1677 return err;
1678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001679 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1680 if (err < 0)
1681 return err;
1682
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001683 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1684 if (err < 0)
1685 return err;
1686
1687 /* Only create debugfs entries during the initial setup
1688 * phase and not every time the controller gets powered on.
1689 */
1690 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1691 return 0;
1692
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001693 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1694 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001695 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1696 &hdev->manufacturer);
1697 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1698 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001699 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1700 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001701 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1702
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001703 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1704 &conn_info_min_age_fops);
1705 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1706 &conn_info_max_age_fops);
1707
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001708 if (lmp_bredr_capable(hdev)) {
1709 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1710 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001711 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1712 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001713 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1714 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001715 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1716 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 }
1718
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001719 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001720 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1721 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001722 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1723 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001724 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1725 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001726 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001727
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001728 if (lmp_sniff_capable(hdev)) {
1729 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1730 hdev, &idle_timeout_fops);
1731 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1732 hdev, &sniff_min_interval_fops);
1733 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1734 hdev, &sniff_max_interval_fops);
1735 }
1736
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001737 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001738 debugfs_create_file("identity", 0400, hdev->debugfs,
1739 hdev, &identity_fops);
1740 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1741 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001742 debugfs_create_file("random_address", 0444, hdev->debugfs,
1743 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001744 debugfs_create_file("static_address", 0444, hdev->debugfs,
1745 hdev, &static_address_fops);
1746
1747 /* For controllers with a public address, provide a debug
1748 * option to force the usage of the configured static
1749 * address. By default the public address is used.
1750 */
1751 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1752 debugfs_create_file("force_static_address", 0644,
1753 hdev->debugfs, hdev,
1754 &force_static_address_fops);
1755
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001756 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1757 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001758 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1759 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001760 debugfs_create_file("identity_resolving_keys", 0400,
1761 hdev->debugfs, hdev,
1762 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001763 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1764 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001765 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1766 hdev, &conn_min_interval_fops);
1767 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1768 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001769 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1770 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001771 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1772 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001773 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1774 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001775 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1776 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001777 debugfs_create_u16("discov_interleaved_timeout", 0644,
1778 hdev->debugfs,
1779 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001780 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001781
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001782 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001783}
1784
Johan Hedberg42c6b122013-03-05 20:37:49 +02001785static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786{
1787 __u8 scan = opt;
1788
Johan Hedberg42c6b122013-03-05 20:37:49 +02001789 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
1791 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001792 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793}
1794
Johan Hedberg42c6b122013-03-05 20:37:49 +02001795static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796{
1797 __u8 auth = opt;
1798
Johan Hedberg42c6b122013-03-05 20:37:49 +02001799 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
1801 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803}
1804
Johan Hedberg42c6b122013-03-05 20:37:49 +02001805static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
1807 __u8 encrypt = opt;
1808
Johan Hedberg42c6b122013-03-05 20:37:49 +02001809 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001811 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001812 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813}
1814
Johan Hedberg42c6b122013-03-05 20:37:49 +02001815static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001816{
1817 __le16 policy = cpu_to_le16(opt);
1818
Johan Hedberg42c6b122013-03-05 20:37:49 +02001819 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001820
1821 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001822 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001823}
1824
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001825/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 * Device is held on return. */
1827struct hci_dev *hci_dev_get(int index)
1828{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001829 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 BT_DBG("%d", index);
1832
1833 if (index < 0)
1834 return NULL;
1835
1836 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001837 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 if (d->id == index) {
1839 hdev = hci_dev_hold(d);
1840 break;
1841 }
1842 }
1843 read_unlock(&hci_dev_list_lock);
1844 return hdev;
1845}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
1847/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001848
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001849bool hci_discovery_active(struct hci_dev *hdev)
1850{
1851 struct discovery_state *discov = &hdev->discovery;
1852
Andre Guedes6fbe1952012-02-03 17:47:58 -03001853 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001854 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001855 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001856 return true;
1857
Andre Guedes6fbe1952012-02-03 17:47:58 -03001858 default:
1859 return false;
1860 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001861}
1862
Johan Hedbergff9ef572012-01-04 14:23:45 +02001863void hci_discovery_set_state(struct hci_dev *hdev, int state)
1864{
1865 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1866
1867 if (hdev->discovery.state == state)
1868 return;
1869
1870 switch (state) {
1871 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001872 hci_update_background_scan(hdev);
1873
Andre Guedes7b99b652012-02-13 15:41:02 -03001874 if (hdev->discovery.state != DISCOVERY_STARTING)
1875 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001876 break;
1877 case DISCOVERY_STARTING:
1878 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001879 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001880 mgmt_discovering(hdev, 1);
1881 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001882 case DISCOVERY_RESOLVING:
1883 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001884 case DISCOVERY_STOPPING:
1885 break;
1886 }
1887
1888 hdev->discovery.state = state;
1889}
1890
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001891void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892{
Johan Hedberg30883512012-01-04 14:16:21 +02001893 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001894 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Johan Hedberg561aafb2012-01-04 13:31:59 +02001896 list_for_each_entry_safe(p, n, &cache->all, all) {
1897 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001898 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001900
1901 INIT_LIST_HEAD(&cache->unknown);
1902 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903}
1904
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001905struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1906 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
Johan Hedberg30883512012-01-04 14:16:21 +02001908 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 struct inquiry_entry *e;
1910
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001911 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
Johan Hedberg561aafb2012-01-04 13:31:59 +02001913 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001915 return e;
1916 }
1917
1918 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919}
1920
Johan Hedberg561aafb2012-01-04 13:31:59 +02001921struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001922 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001923{
Johan Hedberg30883512012-01-04 14:16:21 +02001924 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001925 struct inquiry_entry *e;
1926
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001927 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001928
1929 list_for_each_entry(e, &cache->unknown, list) {
1930 if (!bacmp(&e->data.bdaddr, bdaddr))
1931 return e;
1932 }
1933
1934 return NULL;
1935}
1936
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001937struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001938 bdaddr_t *bdaddr,
1939 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001940{
1941 struct discovery_state *cache = &hdev->discovery;
1942 struct inquiry_entry *e;
1943
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001944 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001945
1946 list_for_each_entry(e, &cache->resolve, list) {
1947 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1948 return e;
1949 if (!bacmp(&e->data.bdaddr, bdaddr))
1950 return e;
1951 }
1952
1953 return NULL;
1954}
1955
Johan Hedberga3d4e202012-01-09 00:53:02 +02001956void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001957 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001958{
1959 struct discovery_state *cache = &hdev->discovery;
1960 struct list_head *pos = &cache->resolve;
1961 struct inquiry_entry *p;
1962
1963 list_del(&ie->list);
1964
1965 list_for_each_entry(p, &cache->resolve, list) {
1966 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001967 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001968 break;
1969 pos = &p->list;
1970 }
1971
1972 list_add(&ie->list, pos);
1973}
1974
Marcel Holtmannaf589252014-07-01 14:11:20 +02001975u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1976 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
Johan Hedberg30883512012-01-04 14:16:21 +02001978 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001979 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001980 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001982 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Szymon Janc2b2fec42012-11-20 11:38:54 +01001984 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1985
Marcel Holtmannaf589252014-07-01 14:11:20 +02001986 if (!data->ssp_mode)
1987 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001988
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001989 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001990 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001991 if (!ie->data.ssp_mode)
1992 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001993
Johan Hedberga3d4e202012-01-09 00:53:02 +02001994 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001995 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001996 ie->data.rssi = data->rssi;
1997 hci_inquiry_cache_update_resolve(hdev, ie);
1998 }
1999
Johan Hedberg561aafb2012-01-04 13:31:59 +02002000 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002001 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002002
Johan Hedberg561aafb2012-01-04 13:31:59 +02002003 /* Entry not in the cache. Add new one. */
2004 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002005 if (!ie) {
2006 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2007 goto done;
2008 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002009
2010 list_add(&ie->all, &cache->all);
2011
2012 if (name_known) {
2013 ie->name_state = NAME_KNOWN;
2014 } else {
2015 ie->name_state = NAME_NOT_KNOWN;
2016 list_add(&ie->list, &cache->unknown);
2017 }
2018
2019update:
2020 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002021 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002022 ie->name_state = NAME_KNOWN;
2023 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 }
2025
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002026 memcpy(&ie->data, data, sizeof(*data));
2027 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002029
2030 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002031 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002032
Marcel Holtmannaf589252014-07-01 14:11:20 +02002033done:
2034 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035}
2036
2037static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2038{
Johan Hedberg30883512012-01-04 14:16:21 +02002039 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 struct inquiry_info *info = (struct inquiry_info *) buf;
2041 struct inquiry_entry *e;
2042 int copied = 0;
2043
Johan Hedberg561aafb2012-01-04 13:31:59 +02002044 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002046
2047 if (copied >= num)
2048 break;
2049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 bacpy(&info->bdaddr, &data->bdaddr);
2051 info->pscan_rep_mode = data->pscan_rep_mode;
2052 info->pscan_period_mode = data->pscan_period_mode;
2053 info->pscan_mode = data->pscan_mode;
2054 memcpy(info->dev_class, data->dev_class, 3);
2055 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002058 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 }
2060
2061 BT_DBG("cache %p, copied %d", cache, copied);
2062 return copied;
2063}
2064
Johan Hedberg42c6b122013-03-05 20:37:49 +02002065static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066{
2067 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002068 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 struct hci_cp_inquiry cp;
2070
2071 BT_DBG("%s", hdev->name);
2072
2073 if (test_bit(HCI_INQUIRY, &hdev->flags))
2074 return;
2075
2076 /* Start Inquiry */
2077 memcpy(&cp.lap, &ir->lap, 3);
2078 cp.length = ir->length;
2079 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002080 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081}
2082
Andre Guedes3e13fa12013-03-27 20:04:56 -03002083static int wait_inquiry(void *word)
2084{
2085 schedule();
2086 return signal_pending(current);
2087}
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089int hci_inquiry(void __user *arg)
2090{
2091 __u8 __user *ptr = arg;
2092 struct hci_inquiry_req ir;
2093 struct hci_dev *hdev;
2094 int err = 0, do_inquiry = 0, max_rsp;
2095 long timeo;
2096 __u8 *buf;
2097
2098 if (copy_from_user(&ir, ptr, sizeof(ir)))
2099 return -EFAULT;
2100
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002101 hdev = hci_dev_get(ir.dev_id);
2102 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 return -ENODEV;
2104
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002105 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2106 err = -EBUSY;
2107 goto done;
2108 }
2109
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002110 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2111 err = -EOPNOTSUPP;
2112 goto done;
2113 }
2114
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002115 if (hdev->dev_type != HCI_BREDR) {
2116 err = -EOPNOTSUPP;
2117 goto done;
2118 }
2119
Johan Hedberg56f87902013-10-02 13:43:13 +03002120 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2121 err = -EOPNOTSUPP;
2122 goto done;
2123 }
2124
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002125 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002126 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002127 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002128 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 do_inquiry = 1;
2130 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002131 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Marcel Holtmann04837f62006-07-03 10:02:33 +02002133 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002134
2135 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002136 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2137 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002138 if (err < 0)
2139 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002140
2141 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2142 * cleared). If it is interrupted by a signal, return -EINTR.
2143 */
2144 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2145 TASK_INTERRUPTIBLE))
2146 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002149 /* for unlimited number of responses we will use buffer with
2150 * 255 entries
2151 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2153
2154 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2155 * copy it to the user space.
2156 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002157 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 err = -ENOMEM;
2160 goto done;
2161 }
2162
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002163 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002165 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
2167 BT_DBG("num_rsp %d", ir.num_rsp);
2168
2169 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2170 ptr += sizeof(ir);
2171 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002172 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002174 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 err = -EFAULT;
2176
2177 kfree(buf);
2178
2179done:
2180 hci_dev_put(hdev);
2181 return err;
2182}
2183
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002184static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 int ret = 0;
2187
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 BT_DBG("%s %p", hdev->name, hdev);
2189
2190 hci_req_lock(hdev);
2191
Johan Hovold94324962012-03-15 14:48:41 +01002192 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2193 ret = -ENODEV;
2194 goto done;
2195 }
2196
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002197 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2198 /* Check for rfkill but allow the HCI setup stage to
2199 * proceed (which in itself doesn't cause any RF activity).
2200 */
2201 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2202 ret = -ERFKILL;
2203 goto done;
2204 }
2205
2206 /* Check for valid public address or a configured static
2207 * random adddress, but let the HCI setup proceed to
2208 * be able to determine if there is a public address
2209 * or not.
2210 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002211 * In case of user channel usage, it is not important
2212 * if a public address or static random address is
2213 * available.
2214 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002215 * This check is only valid for BR/EDR controllers
2216 * since AMP controllers do not have an address.
2217 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002218 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2219 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002220 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2221 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2222 ret = -EADDRNOTAVAIL;
2223 goto done;
2224 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002225 }
2226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 if (test_bit(HCI_UP, &hdev->flags)) {
2228 ret = -EALREADY;
2229 goto done;
2230 }
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 if (hdev->open(hdev)) {
2233 ret = -EIO;
2234 goto done;
2235 }
2236
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002237 atomic_set(&hdev->cmd_cnt, 1);
2238 set_bit(HCI_INIT, &hdev->flags);
2239
2240 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2241 ret = hdev->setup(hdev);
2242
2243 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002244 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002245 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002246 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
2248
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002249 clear_bit(HCI_INIT, &hdev->flags);
2250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 if (!ret) {
2252 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002253 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 set_bit(HCI_UP, &hdev->flags);
2255 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002256 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002257 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002258 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002259 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002260 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002261 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002262 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002263 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002265 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002266 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002267 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 skb_queue_purge(&hdev->cmd_q);
2270 skb_queue_purge(&hdev->rx_q);
2271
2272 if (hdev->flush)
2273 hdev->flush(hdev);
2274
2275 if (hdev->sent_cmd) {
2276 kfree_skb(hdev->sent_cmd);
2277 hdev->sent_cmd = NULL;
2278 }
2279
2280 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002281 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 }
2283
2284done:
2285 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 return ret;
2287}
2288
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002289/* ---- HCI ioctl helpers ---- */
2290
2291int hci_dev_open(__u16 dev)
2292{
2293 struct hci_dev *hdev;
2294 int err;
2295
2296 hdev = hci_dev_get(dev);
2297 if (!hdev)
2298 return -ENODEV;
2299
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002300 /* Devices that are marked for raw-only usage can only be powered
2301 * up as user channel. Trying to bring them up as normal devices
2302 * will result into a failure. Only user channel operation is
2303 * possible.
2304 *
2305 * When this function is called for a user channel, the flag
2306 * HCI_USER_CHANNEL will be set first before attempting to
2307 * open the device.
2308 */
2309 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2310 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2311 err = -EOPNOTSUPP;
2312 goto done;
2313 }
2314
Johan Hedberge1d08f42013-10-01 22:44:50 +03002315 /* We need to ensure that no other power on/off work is pending
2316 * before proceeding to call hci_dev_do_open. This is
2317 * particularly important if the setup procedure has not yet
2318 * completed.
2319 */
2320 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2321 cancel_delayed_work(&hdev->power_off);
2322
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002323 /* After this call it is guaranteed that the setup procedure
2324 * has finished. This means that error conditions like RFKILL
2325 * or no valid public or static random address apply.
2326 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002327 flush_workqueue(hdev->req_workqueue);
2328
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002329 err = hci_dev_do_open(hdev);
2330
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002331done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002332 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002333 return err;
2334}
2335
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336static int hci_dev_do_close(struct hci_dev *hdev)
2337{
2338 BT_DBG("%s %p", hdev->name, hdev);
2339
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002340 cancel_delayed_work(&hdev->power_off);
2341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 hci_req_cancel(hdev, ENODEV);
2343 hci_req_lock(hdev);
2344
2345 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002346 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 hci_req_unlock(hdev);
2348 return 0;
2349 }
2350
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002351 /* Flush RX and TX works */
2352 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002353 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002355 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002356 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002357 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002358 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002359 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002360 }
2361
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002363 cancel_delayed_work(&hdev->service_cache);
2364
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002365 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002366
2367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2368 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002369
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002370 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002371 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002373 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002374 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
2376 hci_notify(hdev, HCI_DEV_DOWN);
2377
2378 if (hdev->flush)
2379 hdev->flush(hdev);
2380
2381 /* Reset device */
2382 skb_queue_purge(&hdev->cmd_q);
2383 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002384 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002385 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002386 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002388 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 clear_bit(HCI_INIT, &hdev->flags);
2390 }
2391
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002392 /* flush cmd work */
2393 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 /* Drop queues */
2396 skb_queue_purge(&hdev->rx_q);
2397 skb_queue_purge(&hdev->cmd_q);
2398 skb_queue_purge(&hdev->raw_q);
2399
2400 /* Drop last sent command */
2401 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002402 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 kfree_skb(hdev->sent_cmd);
2404 hdev->sent_cmd = NULL;
2405 }
2406
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002407 kfree_skb(hdev->recv_evt);
2408 hdev->recv_evt = NULL;
2409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 /* After this point our queues are empty
2411 * and no tasks are scheduled. */
2412 hdev->close(hdev);
2413
Johan Hedberg35b973c2013-03-15 17:06:59 -05002414 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002415 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002416 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2417
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002418 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2419 if (hdev->dev_type == HCI_BREDR) {
2420 hci_dev_lock(hdev);
2421 mgmt_powered(hdev, 0);
2422 hci_dev_unlock(hdev);
2423 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002424 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002425
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002426 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002427 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002428
Johan Hedberge59fda82012-02-22 18:11:53 +02002429 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002430 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002431 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 hci_req_unlock(hdev);
2434
2435 hci_dev_put(hdev);
2436 return 0;
2437}
2438
2439int hci_dev_close(__u16 dev)
2440{
2441 struct hci_dev *hdev;
2442 int err;
2443
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002444 hdev = hci_dev_get(dev);
2445 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002447
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002448 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2449 err = -EBUSY;
2450 goto done;
2451 }
2452
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002453 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2454 cancel_delayed_work(&hdev->power_off);
2455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002457
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002458done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 hci_dev_put(hdev);
2460 return err;
2461}
2462
2463int hci_dev_reset(__u16 dev)
2464{
2465 struct hci_dev *hdev;
2466 int ret = 0;
2467
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002468 hdev = hci_dev_get(dev);
2469 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 return -ENODEV;
2471
2472 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Marcel Holtmann808a0492013-08-26 20:57:58 -07002474 if (!test_bit(HCI_UP, &hdev->flags)) {
2475 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002479 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 ret = -EBUSY;
2481 goto done;
2482 }
2483
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002484 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2485 ret = -EOPNOTSUPP;
2486 goto done;
2487 }
2488
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 /* Drop queues */
2490 skb_queue_purge(&hdev->rx_q);
2491 skb_queue_purge(&hdev->cmd_q);
2492
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002493 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002494 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002496 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
2498 if (hdev->flush)
2499 hdev->flush(hdev);
2500
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002501 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002502 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002504 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 hci_req_unlock(hdev);
2508 hci_dev_put(hdev);
2509 return ret;
2510}
2511
2512int hci_dev_reset_stat(__u16 dev)
2513{
2514 struct hci_dev *hdev;
2515 int ret = 0;
2516
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002517 hdev = hci_dev_get(dev);
2518 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 return -ENODEV;
2520
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002521 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2522 ret = -EBUSY;
2523 goto done;
2524 }
2525
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002526 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2527 ret = -EOPNOTSUPP;
2528 goto done;
2529 }
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2532
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002533done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 return ret;
2536}
2537
2538int hci_dev_cmd(unsigned int cmd, void __user *arg)
2539{
2540 struct hci_dev *hdev;
2541 struct hci_dev_req dr;
2542 int err = 0;
2543
2544 if (copy_from_user(&dr, arg, sizeof(dr)))
2545 return -EFAULT;
2546
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002547 hdev = hci_dev_get(dr.dev_id);
2548 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 return -ENODEV;
2550
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002551 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2552 err = -EBUSY;
2553 goto done;
2554 }
2555
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2557 err = -EOPNOTSUPP;
2558 goto done;
2559 }
2560
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002561 if (hdev->dev_type != HCI_BREDR) {
2562 err = -EOPNOTSUPP;
2563 goto done;
2564 }
2565
Johan Hedberg56f87902013-10-02 13:43:13 +03002566 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2567 err = -EOPNOTSUPP;
2568 goto done;
2569 }
2570
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 switch (cmd) {
2572 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002573 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2574 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 break;
2576
2577 case HCISETENCRYPT:
2578 if (!lmp_encrypt_capable(hdev)) {
2579 err = -EOPNOTSUPP;
2580 break;
2581 }
2582
2583 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2584 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002585 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2586 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 if (err)
2588 break;
2589 }
2590
Johan Hedberg01178cd2013-03-05 20:37:41 +02002591 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2592 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 break;
2594
2595 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002596 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2597 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 break;
2599
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002600 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002601 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002603 break;
2604
2605 case HCISETLINKMODE:
2606 hdev->link_mode = ((__u16) dr.dev_opt) &
2607 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2608 break;
2609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 case HCISETPTYPE:
2611 hdev->pkt_type = (__u16) dr.dev_opt;
2612 break;
2613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002615 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2616 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 break;
2618
2619 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002620 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2621 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 break;
2623
2624 default:
2625 err = -EINVAL;
2626 break;
2627 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002628
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002629done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 hci_dev_put(hdev);
2631 return err;
2632}
2633
2634int hci_get_dev_list(void __user *arg)
2635{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002636 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 struct hci_dev_list_req *dl;
2638 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 int n = 0, size, err;
2640 __u16 dev_num;
2641
2642 if (get_user(dev_num, (__u16 __user *) arg))
2643 return -EFAULT;
2644
2645 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2646 return -EINVAL;
2647
2648 size = sizeof(*dl) + dev_num * sizeof(*dr);
2649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002650 dl = kzalloc(size, GFP_KERNEL);
2651 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 return -ENOMEM;
2653
2654 dr = dl->dev_req;
2655
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002656 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002657 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002658 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002659 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002660
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002661 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2662 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 (dr + n)->dev_id = hdev->id;
2665 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002666
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 if (++n >= dev_num)
2668 break;
2669 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002670 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 dl->dev_num = n;
2673 size = sizeof(*dl) + n * sizeof(*dr);
2674
2675 err = copy_to_user(arg, dl, size);
2676 kfree(dl);
2677
2678 return err ? -EFAULT : 0;
2679}
2680
2681int hci_get_dev_info(void __user *arg)
2682{
2683 struct hci_dev *hdev;
2684 struct hci_dev_info di;
2685 int err = 0;
2686
2687 if (copy_from_user(&di, arg, sizeof(di)))
2688 return -EFAULT;
2689
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002690 hdev = hci_dev_get(di.dev_id);
2691 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 return -ENODEV;
2693
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002694 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002695 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002696
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002697 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2698 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002699
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 strcpy(di.name, hdev->name);
2701 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002702 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 di.flags = hdev->flags;
2704 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002705 if (lmp_bredr_capable(hdev)) {
2706 di.acl_mtu = hdev->acl_mtu;
2707 di.acl_pkts = hdev->acl_pkts;
2708 di.sco_mtu = hdev->sco_mtu;
2709 di.sco_pkts = hdev->sco_pkts;
2710 } else {
2711 di.acl_mtu = hdev->le_mtu;
2712 di.acl_pkts = hdev->le_pkts;
2713 di.sco_mtu = 0;
2714 di.sco_pkts = 0;
2715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 di.link_policy = hdev->link_policy;
2717 di.link_mode = hdev->link_mode;
2718
2719 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2720 memcpy(&di.features, &hdev->features, sizeof(di.features));
2721
2722 if (copy_to_user(arg, &di, sizeof(di)))
2723 err = -EFAULT;
2724
2725 hci_dev_put(hdev);
2726
2727 return err;
2728}
2729
2730/* ---- Interface to HCI drivers ---- */
2731
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002732static int hci_rfkill_set_block(void *data, bool blocked)
2733{
2734 struct hci_dev *hdev = data;
2735
2736 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2737
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002738 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2739 return -EBUSY;
2740
Johan Hedberg5e130362013-09-13 08:58:17 +03002741 if (blocked) {
2742 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002743 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2744 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002745 } else {
2746 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002747 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002748
2749 return 0;
2750}
2751
2752static const struct rfkill_ops hci_rfkill_ops = {
2753 .set_block = hci_rfkill_set_block,
2754};
2755
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002756static void hci_power_on(struct work_struct *work)
2757{
2758 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002759 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002760
2761 BT_DBG("%s", hdev->name);
2762
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002763 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002764 if (err < 0) {
2765 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002766 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002767 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002768
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002769 /* During the HCI setup phase, a few error conditions are
2770 * ignored and they need to be checked now. If they are still
2771 * valid, it is important to turn the device back off.
2772 */
2773 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2774 (hdev->dev_type == HCI_BREDR &&
2775 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2776 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002777 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2778 hci_dev_do_close(hdev);
2779 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002780 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2781 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002782 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002783
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002784 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2785 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2786 mgmt_index_added(hdev);
2787 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002788}
2789
2790static void hci_power_off(struct work_struct *work)
2791{
Johan Hedberg32435532011-11-07 22:16:04 +02002792 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002793 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002794
2795 BT_DBG("%s", hdev->name);
2796
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002797 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002798}
2799
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002800static void hci_discov_off(struct work_struct *work)
2801{
2802 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002803
2804 hdev = container_of(work, struct hci_dev, discov_off.work);
2805
2806 BT_DBG("%s", hdev->name);
2807
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002808 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002809}
2810
Johan Hedberg35f74982014-02-18 17:14:32 +02002811void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002812{
Johan Hedberg48210022013-01-27 00:31:28 +02002813 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002814
Johan Hedberg48210022013-01-27 00:31:28 +02002815 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2816 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002817 kfree(uuid);
2818 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002819}
2820
Johan Hedberg35f74982014-02-18 17:14:32 +02002821void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002822{
2823 struct list_head *p, *n;
2824
2825 list_for_each_safe(p, n, &hdev->link_keys) {
2826 struct link_key *key;
2827
2828 key = list_entry(p, struct link_key, list);
2829
2830 list_del(p);
2831 kfree(key);
2832 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002833}
2834
Johan Hedberg35f74982014-02-18 17:14:32 +02002835void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002836{
2837 struct smp_ltk *k, *tmp;
2838
2839 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2840 list_del(&k->list);
2841 kfree(k);
2842 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002843}
2844
Johan Hedberg970c4e42014-02-18 10:19:33 +02002845void hci_smp_irks_clear(struct hci_dev *hdev)
2846{
2847 struct smp_irk *k, *tmp;
2848
2849 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2850 list_del(&k->list);
2851 kfree(k);
2852 }
2853}
2854
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002855struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2856{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002857 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002858
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002859 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002860 if (bacmp(bdaddr, &k->bdaddr) == 0)
2861 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002862
2863 return NULL;
2864}
2865
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302866static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002867 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002868{
2869 /* Legacy key */
2870 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302871 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002872
2873 /* Debug keys are insecure so don't store them persistently */
2874 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302875 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002876
2877 /* Changed combination key and there's no previous one */
2878 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302879 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002880
2881 /* Security mode 3 case */
2882 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302883 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002884
2885 /* Neither local nor remote side had no-bonding as requirement */
2886 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302887 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002888
2889 /* Local side had dedicated bonding as requirement */
2890 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302891 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002892
2893 /* Remote side had dedicated bonding as requirement */
2894 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302895 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002896
2897 /* If none of the above criteria match, then don't store the key
2898 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302899 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002900}
2901
Johan Hedberg98a0b842014-01-30 19:40:00 -08002902static bool ltk_type_master(u8 type)
2903{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002904 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002905}
2906
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002907struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002908 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002909{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002910 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002911
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002912 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002913 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002914 continue;
2915
Johan Hedberg98a0b842014-01-30 19:40:00 -08002916 if (ltk_type_master(k->type) != master)
2917 continue;
2918
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002919 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002920 }
2921
2922 return NULL;
2923}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002924
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002925struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002926 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002927{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002928 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002929
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002930 list_for_each_entry(k, &hdev->long_term_keys, list)
2931 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002932 bacmp(bdaddr, &k->bdaddr) == 0 &&
2933 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002934 return k;
2935
2936 return NULL;
2937}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002938
Johan Hedberg970c4e42014-02-18 10:19:33 +02002939struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2940{
2941 struct smp_irk *irk;
2942
2943 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2944 if (!bacmp(&irk->rpa, rpa))
2945 return irk;
2946 }
2947
2948 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2949 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2950 bacpy(&irk->rpa, rpa);
2951 return irk;
2952 }
2953 }
2954
2955 return NULL;
2956}
2957
2958struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2959 u8 addr_type)
2960{
2961 struct smp_irk *irk;
2962
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002963 /* Identity Address must be public or static random */
2964 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2965 return NULL;
2966
Johan Hedberg970c4e42014-02-18 10:19:33 +02002967 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2968 if (addr_type == irk->addr_type &&
2969 bacmp(bdaddr, &irk->bdaddr) == 0)
2970 return irk;
2971 }
2972
2973 return NULL;
2974}
2975
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002976struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002977 bdaddr_t *bdaddr, u8 *val, u8 type,
2978 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002979{
2980 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302981 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002982
2983 old_key = hci_find_link_key(hdev, bdaddr);
2984 if (old_key) {
2985 old_key_type = old_key->type;
2986 key = old_key;
2987 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002988 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002989 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002990 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002991 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002992 list_add(&key->list, &hdev->link_keys);
2993 }
2994
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002995 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002996
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002997 /* Some buggy controller combinations generate a changed
2998 * combination key for legacy pairing even when there's no
2999 * previous key */
3000 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003001 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003002 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003003 if (conn)
3004 conn->key_type = type;
3005 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003006
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003007 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003008 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003009 key->pin_len = pin_len;
3010
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003011 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003012 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003013 else
3014 key->type = type;
3015
Johan Hedberg7652ff62014-06-24 13:15:49 +03003016 if (persistent)
3017 *persistent = hci_persistent_key(hdev, conn, type,
3018 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003019
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003020 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003021}
3022
Johan Hedbergca9142b2014-02-19 14:57:44 +02003023struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003024 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003025 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003026{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003027 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003028 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003029
Johan Hedberg98a0b842014-01-30 19:40:00 -08003030 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003031 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003032 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003033 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003034 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003035 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003036 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003037 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003038 }
3039
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003040 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003041 key->bdaddr_type = addr_type;
3042 memcpy(key->val, tk, sizeof(key->val));
3043 key->authenticated = authenticated;
3044 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003045 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003046 key->enc_size = enc_size;
3047 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003048
Johan Hedbergca9142b2014-02-19 14:57:44 +02003049 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003050}
3051
Johan Hedbergca9142b2014-02-19 14:57:44 +02003052struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3053 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003054{
3055 struct smp_irk *irk;
3056
3057 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3058 if (!irk) {
3059 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3060 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003061 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003062
3063 bacpy(&irk->bdaddr, bdaddr);
3064 irk->addr_type = addr_type;
3065
3066 list_add(&irk->list, &hdev->identity_resolving_keys);
3067 }
3068
3069 memcpy(irk->val, val, 16);
3070 bacpy(&irk->rpa, rpa);
3071
Johan Hedbergca9142b2014-02-19 14:57:44 +02003072 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003073}
3074
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003075int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3076{
3077 struct link_key *key;
3078
3079 key = hci_find_link_key(hdev, bdaddr);
3080 if (!key)
3081 return -ENOENT;
3082
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003083 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003084
3085 list_del(&key->list);
3086 kfree(key);
3087
3088 return 0;
3089}
3090
Johan Hedberge0b2b272014-02-18 17:14:31 +02003091int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003092{
3093 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003094 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003095
3096 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003097 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003098 continue;
3099
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003100 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003101
3102 list_del(&k->list);
3103 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003104 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003105 }
3106
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003107 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003108}
3109
Johan Hedberga7ec7332014-02-18 17:14:35 +02003110void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3111{
3112 struct smp_irk *k, *tmp;
3113
Johan Hedberg668b7b12014-02-21 16:03:31 +02003114 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003115 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3116 continue;
3117
3118 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3119
3120 list_del(&k->list);
3121 kfree(k);
3122 }
3123}
3124
Ville Tervo6bd32322011-02-16 16:32:41 +02003125/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003126static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003127{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003128 struct hci_dev *hdev = container_of(work, struct hci_dev,
3129 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003130
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003131 if (hdev->sent_cmd) {
3132 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3133 u16 opcode = __le16_to_cpu(sent->opcode);
3134
3135 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3136 } else {
3137 BT_ERR("%s command tx timeout", hdev->name);
3138 }
3139
Ville Tervo6bd32322011-02-16 16:32:41 +02003140 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003141 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003142}
3143
Szymon Janc2763eda2011-03-22 13:12:22 +01003144struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003145 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003146{
3147 struct oob_data *data;
3148
3149 list_for_each_entry(data, &hdev->remote_oob_data, list)
3150 if (bacmp(bdaddr, &data->bdaddr) == 0)
3151 return data;
3152
3153 return NULL;
3154}
3155
3156int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3157{
3158 struct oob_data *data;
3159
3160 data = hci_find_remote_oob_data(hdev, bdaddr);
3161 if (!data)
3162 return -ENOENT;
3163
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003164 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003165
3166 list_del(&data->list);
3167 kfree(data);
3168
3169 return 0;
3170}
3171
Johan Hedberg35f74982014-02-18 17:14:32 +02003172void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003173{
3174 struct oob_data *data, *n;
3175
3176 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3177 list_del(&data->list);
3178 kfree(data);
3179 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003180}
3181
Marcel Holtmann07988722014-01-10 02:07:29 -08003182int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003184{
3185 struct oob_data *data;
3186
3187 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003188 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003189 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003190 if (!data)
3191 return -ENOMEM;
3192
3193 bacpy(&data->bdaddr, bdaddr);
3194 list_add(&data->list, &hdev->remote_oob_data);
3195 }
3196
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003197 memcpy(data->hash192, hash, sizeof(data->hash192));
3198 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003199
Marcel Holtmann07988722014-01-10 02:07:29 -08003200 memset(data->hash256, 0, sizeof(data->hash256));
3201 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3202
3203 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3204
3205 return 0;
3206}
3207
3208int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3209 u8 *hash192, u8 *randomizer192,
3210 u8 *hash256, u8 *randomizer256)
3211{
3212 struct oob_data *data;
3213
3214 data = hci_find_remote_oob_data(hdev, bdaddr);
3215 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003216 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003217 if (!data)
3218 return -ENOMEM;
3219
3220 bacpy(&data->bdaddr, bdaddr);
3221 list_add(&data->list, &hdev->remote_oob_data);
3222 }
3223
3224 memcpy(data->hash192, hash192, sizeof(data->hash192));
3225 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3226
3227 memcpy(data->hash256, hash256, sizeof(data->hash256));
3228 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3229
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003230 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003231
3232 return 0;
3233}
3234
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003235struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3236 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003237{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003238 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003239
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003240 list_for_each_entry(b, &hdev->blacklist, list) {
3241 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003242 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003243 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003244
3245 return NULL;
3246}
3247
Marcel Holtmannc9507492014-02-27 19:35:54 -08003248static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003249{
3250 struct list_head *p, *n;
3251
3252 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003253 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254
3255 list_del(p);
3256 kfree(b);
3257 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258}
3259
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003260int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003261{
3262 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003263
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003264 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003265 return -EBADF;
3266
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003267 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003268 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269
3270 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003271 if (!entry)
3272 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273
3274 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003275 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276
3277 list_add(&entry->list, &hdev->blacklist);
3278
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003279 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003280}
3281
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003282int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283{
3284 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003285
Johan Hedberg35f74982014-02-18 17:14:32 +02003286 if (!bacmp(bdaddr, BDADDR_ANY)) {
3287 hci_blacklist_clear(hdev);
3288 return 0;
3289 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003290
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003291 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003292 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003293 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003294
3295 list_del(&entry->list);
3296 kfree(entry);
3297
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003298 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003299}
3300
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003301struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3302 bdaddr_t *bdaddr, u8 type)
3303{
3304 struct bdaddr_list *b;
3305
3306 list_for_each_entry(b, &hdev->le_white_list, list) {
3307 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3308 return b;
3309 }
3310
3311 return NULL;
3312}
3313
3314void hci_white_list_clear(struct hci_dev *hdev)
3315{
3316 struct list_head *p, *n;
3317
3318 list_for_each_safe(p, n, &hdev->le_white_list) {
3319 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3320
3321 list_del(p);
3322 kfree(b);
3323 }
3324}
3325
3326int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3327{
3328 struct bdaddr_list *entry;
3329
3330 if (!bacmp(bdaddr, BDADDR_ANY))
3331 return -EBADF;
3332
3333 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3334 if (!entry)
3335 return -ENOMEM;
3336
3337 bacpy(&entry->bdaddr, bdaddr);
3338 entry->bdaddr_type = type;
3339
3340 list_add(&entry->list, &hdev->le_white_list);
3341
3342 return 0;
3343}
3344
3345int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3346{
3347 struct bdaddr_list *entry;
3348
3349 if (!bacmp(bdaddr, BDADDR_ANY))
3350 return -EBADF;
3351
3352 entry = hci_white_list_lookup(hdev, bdaddr, type);
3353 if (!entry)
3354 return -ENOENT;
3355
3356 list_del(&entry->list);
3357 kfree(entry);
3358
3359 return 0;
3360}
3361
Andre Guedes15819a72014-02-03 13:56:18 -03003362/* This function requires the caller holds hdev->lock */
3363struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3364 bdaddr_t *addr, u8 addr_type)
3365{
3366 struct hci_conn_params *params;
3367
3368 list_for_each_entry(params, &hdev->le_conn_params, list) {
3369 if (bacmp(&params->addr, addr) == 0 &&
3370 params->addr_type == addr_type) {
3371 return params;
3372 }
3373 }
3374
3375 return NULL;
3376}
3377
Andre Guedescef952c2014-02-26 20:21:49 -03003378static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3379{
3380 struct hci_conn *conn;
3381
3382 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3383 if (!conn)
3384 return false;
3385
3386 if (conn->dst_type != type)
3387 return false;
3388
3389 if (conn->state != BT_CONNECTED)
3390 return false;
3391
3392 return true;
3393}
3394
Andre Guedesa9b0a042014-02-26 20:21:52 -03003395static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3396{
3397 if (addr_type == ADDR_LE_DEV_PUBLIC)
3398 return true;
3399
3400 /* Check for Random Static address type */
3401 if ((addr->b[5] & 0xc0) == 0xc0)
3402 return true;
3403
3404 return false;
3405}
3406
Andre Guedes15819a72014-02-03 13:56:18 -03003407/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003408struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3409 bdaddr_t *addr, u8 addr_type)
3410{
3411 struct bdaddr_list *entry;
3412
3413 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3414 if (bacmp(&entry->bdaddr, addr) == 0 &&
3415 entry->bdaddr_type == addr_type)
3416 return entry;
3417 }
3418
3419 return NULL;
3420}
3421
3422/* This function requires the caller holds hdev->lock */
3423void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3424{
3425 struct bdaddr_list *entry;
3426
3427 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3428 if (entry)
3429 goto done;
3430
3431 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3432 if (!entry) {
3433 BT_ERR("Out of memory");
3434 return;
3435 }
3436
3437 bacpy(&entry->bdaddr, addr);
3438 entry->bdaddr_type = addr_type;
3439
3440 list_add(&entry->list, &hdev->pend_le_conns);
3441
3442 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3443
3444done:
3445 hci_update_background_scan(hdev);
3446}
3447
3448/* This function requires the caller holds hdev->lock */
3449void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3450{
3451 struct bdaddr_list *entry;
3452
3453 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3454 if (!entry)
3455 goto done;
3456
3457 list_del(&entry->list);
3458 kfree(entry);
3459
3460 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3461
3462done:
3463 hci_update_background_scan(hdev);
3464}
3465
3466/* This function requires the caller holds hdev->lock */
3467void hci_pend_le_conns_clear(struct hci_dev *hdev)
3468{
3469 struct bdaddr_list *entry, *tmp;
3470
3471 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3472 list_del(&entry->list);
3473 kfree(entry);
3474 }
3475
3476 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003477
3478 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003479}
3480
3481/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003482struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3483 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003484{
3485 struct hci_conn_params *params;
3486
3487 if (!is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003488 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003489
3490 params = hci_conn_params_lookup(hdev, addr, addr_type);
3491 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003492 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003493
3494 params = kzalloc(sizeof(*params), GFP_KERNEL);
3495 if (!params) {
3496 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003497 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003498 }
3499
3500 bacpy(&params->addr, addr);
3501 params->addr_type = addr_type;
3502
3503 list_add(&params->list, &hdev->le_conn_params);
3504
3505 params->conn_min_interval = hdev->le_conn_min_interval;
3506 params->conn_max_interval = hdev->le_conn_max_interval;
3507 params->conn_latency = hdev->le_conn_latency;
3508 params->supervision_timeout = hdev->le_supv_timeout;
3509 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3510
3511 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3512
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003513 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003514}
3515
3516/* This function requires the caller holds hdev->lock */
3517int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003518 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003519{
3520 struct hci_conn_params *params;
3521
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003522 params = hci_conn_params_add(hdev, addr, addr_type);
3523 if (!params)
3524 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003525
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003526 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003527
Andre Guedescef952c2014-02-26 20:21:49 -03003528 switch (auto_connect) {
3529 case HCI_AUTO_CONN_DISABLED:
3530 case HCI_AUTO_CONN_LINK_LOSS:
3531 hci_pend_le_conn_del(hdev, addr, addr_type);
3532 break;
3533 case HCI_AUTO_CONN_ALWAYS:
3534 if (!is_connected(hdev, addr, addr_type))
3535 hci_pend_le_conn_add(hdev, addr, addr_type);
3536 break;
3537 }
Andre Guedes15819a72014-02-03 13:56:18 -03003538
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003539 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3540 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003541
3542 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003543}
3544
3545/* This function requires the caller holds hdev->lock */
3546void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3547{
3548 struct hci_conn_params *params;
3549
3550 params = hci_conn_params_lookup(hdev, addr, addr_type);
3551 if (!params)
3552 return;
3553
Andre Guedescef952c2014-02-26 20:21:49 -03003554 hci_pend_le_conn_del(hdev, addr, addr_type);
3555
Andre Guedes15819a72014-02-03 13:56:18 -03003556 list_del(&params->list);
3557 kfree(params);
3558
3559 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3560}
3561
3562/* This function requires the caller holds hdev->lock */
3563void hci_conn_params_clear(struct hci_dev *hdev)
3564{
3565 struct hci_conn_params *params, *tmp;
3566
3567 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3568 list_del(&params->list);
3569 kfree(params);
3570 }
3571
Marcel Holtmann1089b672014-06-29 13:41:50 +02003572 hci_pend_le_conns_clear(hdev);
3573
Andre Guedes15819a72014-02-03 13:56:18 -03003574 BT_DBG("All LE connection parameters were removed");
3575}
3576
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003577static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003578{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003579 if (status) {
3580 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003581
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003582 hci_dev_lock(hdev);
3583 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3584 hci_dev_unlock(hdev);
3585 return;
3586 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003587}
3588
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003589static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003590{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003591 /* General inquiry access code (GIAC) */
3592 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3593 struct hci_request req;
3594 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003595 int err;
3596
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003597 if (status) {
3598 BT_ERR("Failed to disable LE scanning: status %d", status);
3599 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003600 }
3601
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003602 switch (hdev->discovery.type) {
3603 case DISCOV_TYPE_LE:
3604 hci_dev_lock(hdev);
3605 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3606 hci_dev_unlock(hdev);
3607 break;
3608
3609 case DISCOV_TYPE_INTERLEAVED:
3610 hci_req_init(&req, hdev);
3611
3612 memset(&cp, 0, sizeof(cp));
3613 memcpy(&cp.lap, lap, sizeof(cp.lap));
3614 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3615 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3616
3617 hci_dev_lock(hdev);
3618
3619 hci_inquiry_cache_flush(hdev);
3620
3621 err = hci_req_run(&req, inquiry_complete);
3622 if (err) {
3623 BT_ERR("Inquiry request failed: err %d", err);
3624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3625 }
3626
3627 hci_dev_unlock(hdev);
3628 break;
3629 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003630}
3631
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003632static void le_scan_disable_work(struct work_struct *work)
3633{
3634 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003635 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003636 struct hci_request req;
3637 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003638
3639 BT_DBG("%s", hdev->name);
3640
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003641 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003642
Andre Guedesb1efcc22014-02-26 20:21:40 -03003643 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003644
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003645 err = hci_req_run(&req, le_scan_disable_work_complete);
3646 if (err)
3647 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003648}
3649
Johan Hedberg8d972502014-02-28 12:54:14 +02003650static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3651{
3652 struct hci_dev *hdev = req->hdev;
3653
3654 /* If we're advertising or initiating an LE connection we can't
3655 * go ahead and change the random address at this time. This is
3656 * because the eventual initiator address used for the
3657 * subsequently created connection will be undefined (some
3658 * controllers use the new address and others the one we had
3659 * when the operation started).
3660 *
3661 * In this kind of scenario skip the update and let the random
3662 * address be updated at the next cycle.
3663 */
3664 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3665 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3666 BT_DBG("Deferring random address update");
3667 return;
3668 }
3669
3670 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3671}
3672
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003673int hci_update_random_address(struct hci_request *req, bool require_privacy,
3674 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003675{
3676 struct hci_dev *hdev = req->hdev;
3677 int err;
3678
3679 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003680 * current RPA has expired or there is something else than
3681 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003682 */
3683 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003684 int to;
3685
3686 *own_addr_type = ADDR_LE_DEV_RANDOM;
3687
3688 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003689 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003690 return 0;
3691
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003692 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003693 if (err < 0) {
3694 BT_ERR("%s failed to generate new RPA", hdev->name);
3695 return err;
3696 }
3697
Johan Hedberg8d972502014-02-28 12:54:14 +02003698 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003699
3700 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3701 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3702
3703 return 0;
3704 }
3705
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003706 /* In case of required privacy without resolvable private address,
3707 * use an unresolvable private address. This is useful for active
3708 * scanning and non-connectable advertising.
3709 */
3710 if (require_privacy) {
3711 bdaddr_t urpa;
3712
3713 get_random_bytes(&urpa, 6);
3714 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3715
3716 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003717 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003718 return 0;
3719 }
3720
Johan Hedbergebd3a742014-02-23 19:42:21 +02003721 /* If forcing static address is in use or there is no public
3722 * address use the static address as random address (but skip
3723 * the HCI command if the current random address is already the
3724 * static one.
3725 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003726 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003727 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3728 *own_addr_type = ADDR_LE_DEV_RANDOM;
3729 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3730 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3731 &hdev->static_addr);
3732 return 0;
3733 }
3734
3735 /* Neither privacy nor static address is being used so use a
3736 * public address.
3737 */
3738 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3739
3740 return 0;
3741}
3742
Johan Hedberga1f4c312014-02-27 14:05:41 +02003743/* Copy the Identity Address of the controller.
3744 *
3745 * If the controller has a public BD_ADDR, then by default use that one.
3746 * If this is a LE only controller without a public address, default to
3747 * the static random address.
3748 *
3749 * For debugging purposes it is possible to force controllers with a
3750 * public address to use the static random address instead.
3751 */
3752void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3753 u8 *bdaddr_type)
3754{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003755 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003756 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3757 bacpy(bdaddr, &hdev->static_addr);
3758 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3759 } else {
3760 bacpy(bdaddr, &hdev->bdaddr);
3761 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3762 }
3763}
3764
David Herrmann9be0dab2012-04-22 14:39:57 +02003765/* Alloc HCI device */
3766struct hci_dev *hci_alloc_dev(void)
3767{
3768 struct hci_dev *hdev;
3769
3770 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3771 if (!hdev)
3772 return NULL;
3773
David Herrmannb1b813d2012-04-22 14:39:58 +02003774 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3775 hdev->esco_type = (ESCO_HV1);
3776 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003777 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3778 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003779 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3780 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003781
David Herrmannb1b813d2012-04-22 14:39:58 +02003782 hdev->sniff_max_interval = 800;
3783 hdev->sniff_min_interval = 80;
3784
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003785 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003786 hdev->le_scan_interval = 0x0060;
3787 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003788 hdev->le_conn_min_interval = 0x0028;
3789 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003790 hdev->le_conn_latency = 0x0000;
3791 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003792
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003793 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003794 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003795 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3796 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003797
David Herrmannb1b813d2012-04-22 14:39:58 +02003798 mutex_init(&hdev->lock);
3799 mutex_init(&hdev->req_lock);
3800
3801 INIT_LIST_HEAD(&hdev->mgmt_pending);
3802 INIT_LIST_HEAD(&hdev->blacklist);
3803 INIT_LIST_HEAD(&hdev->uuids);
3804 INIT_LIST_HEAD(&hdev->link_keys);
3805 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003806 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003807 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003808 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003809 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003810 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003811 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003812
3813 INIT_WORK(&hdev->rx_work, hci_rx_work);
3814 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3815 INIT_WORK(&hdev->tx_work, hci_tx_work);
3816 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003817
David Herrmannb1b813d2012-04-22 14:39:58 +02003818 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3819 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3820 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3821
David Herrmannb1b813d2012-04-22 14:39:58 +02003822 skb_queue_head_init(&hdev->rx_q);
3823 skb_queue_head_init(&hdev->cmd_q);
3824 skb_queue_head_init(&hdev->raw_q);
3825
3826 init_waitqueue_head(&hdev->req_wait_q);
3827
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003828 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003829
David Herrmannb1b813d2012-04-22 14:39:58 +02003830 hci_init_sysfs(hdev);
3831 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003832
3833 return hdev;
3834}
3835EXPORT_SYMBOL(hci_alloc_dev);
3836
3837/* Free HCI device */
3838void hci_free_dev(struct hci_dev *hdev)
3839{
David Herrmann9be0dab2012-04-22 14:39:57 +02003840 /* will free via device release */
3841 put_device(&hdev->dev);
3842}
3843EXPORT_SYMBOL(hci_free_dev);
3844
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845/* Register HCI device */
3846int hci_register_dev(struct hci_dev *hdev)
3847{
David Herrmannb1b813d2012-04-22 14:39:58 +02003848 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849
David Herrmann010666a2012-01-07 15:47:07 +01003850 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 return -EINVAL;
3852
Mat Martineau08add512011-11-02 16:18:36 -07003853 /* Do not allow HCI_AMP devices to register at index 0,
3854 * so the index can be used as the AMP controller ID.
3855 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003856 switch (hdev->dev_type) {
3857 case HCI_BREDR:
3858 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3859 break;
3860 case HCI_AMP:
3861 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3862 break;
3863 default:
3864 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003866
Sasha Levin3df92b32012-05-27 22:36:56 +02003867 if (id < 0)
3868 return id;
3869
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 sprintf(hdev->name, "hci%d", id);
3871 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003872
3873 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3874
Kees Cookd8537542013-07-03 15:04:57 -07003875 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3876 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003877 if (!hdev->workqueue) {
3878 error = -ENOMEM;
3879 goto err;
3880 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003881
Kees Cookd8537542013-07-03 15:04:57 -07003882 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3883 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003884 if (!hdev->req_workqueue) {
3885 destroy_workqueue(hdev->workqueue);
3886 error = -ENOMEM;
3887 goto err;
3888 }
3889
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003890 if (!IS_ERR_OR_NULL(bt_debugfs))
3891 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3892
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003893 dev_set_name(&hdev->dev, "%s", hdev->name);
3894
Johan Hedberg99780a72014-02-18 10:40:07 +02003895 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3896 CRYPTO_ALG_ASYNC);
3897 if (IS_ERR(hdev->tfm_aes)) {
3898 BT_ERR("Unable to create crypto context");
3899 error = PTR_ERR(hdev->tfm_aes);
3900 hdev->tfm_aes = NULL;
3901 goto err_wqueue;
3902 }
3903
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003904 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003905 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003906 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003908 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003909 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3910 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003911 if (hdev->rfkill) {
3912 if (rfkill_register(hdev->rfkill) < 0) {
3913 rfkill_destroy(hdev->rfkill);
3914 hdev->rfkill = NULL;
3915 }
3916 }
3917
Johan Hedberg5e130362013-09-13 08:58:17 +03003918 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3919 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3920
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003921 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003922 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003923
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003924 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003925 /* Assume BR/EDR support until proven otherwise (such as
3926 * through reading supported features during init.
3927 */
3928 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3929 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003930
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003931 write_lock(&hci_dev_list_lock);
3932 list_add(&hdev->list, &hci_dev_list);
3933 write_unlock(&hci_dev_list_lock);
3934
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003935 /* Devices that are marked for raw-only usage need to set
3936 * the HCI_RAW flag to indicate that only user channel is
3937 * supported.
3938 */
3939 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3940 set_bit(HCI_RAW, &hdev->flags);
3941
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003943 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944
Johan Hedberg19202572013-01-14 22:33:51 +02003945 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003948
Johan Hedberg99780a72014-02-18 10:40:07 +02003949err_tfm:
3950 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003951err_wqueue:
3952 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003953 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003954err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003955 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003956
David Herrmann33ca9542011-10-08 14:58:49 +02003957 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958}
3959EXPORT_SYMBOL(hci_register_dev);
3960
3961/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003962void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963{
Sasha Levin3df92b32012-05-27 22:36:56 +02003964 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003965
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003966 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967
Johan Hovold94324962012-03-15 14:48:41 +01003968 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3969
Sasha Levin3df92b32012-05-27 22:36:56 +02003970 id = hdev->id;
3971
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003972 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003974 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975
3976 hci_dev_do_close(hdev);
3977
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303978 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003979 kfree_skb(hdev->reassembly[i]);
3980
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003981 cancel_work_sync(&hdev->power_on);
3982
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003983 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003984 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3985 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003986 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003987 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003988 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003989 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003990
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003991 /* mgmt_index_removed should take care of emptying the
3992 * pending list */
3993 BUG_ON(!list_empty(&hdev->mgmt_pending));
3994
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 hci_notify(hdev, HCI_DEV_UNREG);
3996
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003997 if (hdev->rfkill) {
3998 rfkill_unregister(hdev->rfkill);
3999 rfkill_destroy(hdev->rfkill);
4000 }
4001
Johan Hedberg99780a72014-02-18 10:40:07 +02004002 if (hdev->tfm_aes)
4003 crypto_free_blkcipher(hdev->tfm_aes);
4004
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004005 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004006
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004007 debugfs_remove_recursive(hdev->debugfs);
4008
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004009 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004010 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004011
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004012 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004013 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004014 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004015 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004016 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004017 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004018 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004019 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004020 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004021 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004022
David Herrmanndc946bd2012-01-07 15:47:24 +01004023 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004024
4025 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026}
4027EXPORT_SYMBOL(hci_unregister_dev);
4028
4029/* Suspend HCI device */
4030int hci_suspend_dev(struct hci_dev *hdev)
4031{
4032 hci_notify(hdev, HCI_DEV_SUSPEND);
4033 return 0;
4034}
4035EXPORT_SYMBOL(hci_suspend_dev);
4036
4037/* Resume HCI device */
4038int hci_resume_dev(struct hci_dev *hdev)
4039{
4040 hci_notify(hdev, HCI_DEV_RESUME);
4041 return 0;
4042}
4043EXPORT_SYMBOL(hci_resume_dev);
4044
Marcel Holtmann76bca882009-11-18 00:40:39 +01004045/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004046int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004047{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004048 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004049 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004050 kfree_skb(skb);
4051 return -ENXIO;
4052 }
4053
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004054 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004055 bt_cb(skb)->incoming = 1;
4056
4057 /* Time stamp */
4058 __net_timestamp(skb);
4059
Marcel Holtmann76bca882009-11-18 00:40:39 +01004060 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004061 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004062
Marcel Holtmann76bca882009-11-18 00:40:39 +01004063 return 0;
4064}
4065EXPORT_SYMBOL(hci_recv_frame);
4066
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304067static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004068 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304069{
4070 int len = 0;
4071 int hlen = 0;
4072 int remain = count;
4073 struct sk_buff *skb;
4074 struct bt_skb_cb *scb;
4075
4076 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004077 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304078 return -EILSEQ;
4079
4080 skb = hdev->reassembly[index];
4081
4082 if (!skb) {
4083 switch (type) {
4084 case HCI_ACLDATA_PKT:
4085 len = HCI_MAX_FRAME_SIZE;
4086 hlen = HCI_ACL_HDR_SIZE;
4087 break;
4088 case HCI_EVENT_PKT:
4089 len = HCI_MAX_EVENT_SIZE;
4090 hlen = HCI_EVENT_HDR_SIZE;
4091 break;
4092 case HCI_SCODATA_PKT:
4093 len = HCI_MAX_SCO_SIZE;
4094 hlen = HCI_SCO_HDR_SIZE;
4095 break;
4096 }
4097
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004098 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304099 if (!skb)
4100 return -ENOMEM;
4101
4102 scb = (void *) skb->cb;
4103 scb->expect = hlen;
4104 scb->pkt_type = type;
4105
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304106 hdev->reassembly[index] = skb;
4107 }
4108
4109 while (count) {
4110 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004111 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304112
4113 memcpy(skb_put(skb, len), data, len);
4114
4115 count -= len;
4116 data += len;
4117 scb->expect -= len;
4118 remain = count;
4119
4120 switch (type) {
4121 case HCI_EVENT_PKT:
4122 if (skb->len == HCI_EVENT_HDR_SIZE) {
4123 struct hci_event_hdr *h = hci_event_hdr(skb);
4124 scb->expect = h->plen;
4125
4126 if (skb_tailroom(skb) < scb->expect) {
4127 kfree_skb(skb);
4128 hdev->reassembly[index] = NULL;
4129 return -ENOMEM;
4130 }
4131 }
4132 break;
4133
4134 case HCI_ACLDATA_PKT:
4135 if (skb->len == HCI_ACL_HDR_SIZE) {
4136 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4137 scb->expect = __le16_to_cpu(h->dlen);
4138
4139 if (skb_tailroom(skb) < scb->expect) {
4140 kfree_skb(skb);
4141 hdev->reassembly[index] = NULL;
4142 return -ENOMEM;
4143 }
4144 }
4145 break;
4146
4147 case HCI_SCODATA_PKT:
4148 if (skb->len == HCI_SCO_HDR_SIZE) {
4149 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4150 scb->expect = h->dlen;
4151
4152 if (skb_tailroom(skb) < scb->expect) {
4153 kfree_skb(skb);
4154 hdev->reassembly[index] = NULL;
4155 return -ENOMEM;
4156 }
4157 }
4158 break;
4159 }
4160
4161 if (scb->expect == 0) {
4162 /* Complete frame */
4163
4164 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004165 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304166
4167 hdev->reassembly[index] = NULL;
4168 return remain;
4169 }
4170 }
4171
4172 return remain;
4173}
4174
Marcel Holtmannef222012007-07-11 06:42:04 +02004175int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4176{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304177 int rem = 0;
4178
Marcel Holtmannef222012007-07-11 06:42:04 +02004179 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4180 return -EILSEQ;
4181
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004182 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004183 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304184 if (rem < 0)
4185 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004186
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304187 data += (count - rem);
4188 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004189 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004190
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304191 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004192}
4193EXPORT_SYMBOL(hci_recv_fragment);
4194
Suraj Sumangala99811512010-07-14 13:02:19 +05304195#define STREAM_REASSEMBLY 0
4196
4197int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4198{
4199 int type;
4200 int rem = 0;
4201
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004202 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304203 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4204
4205 if (!skb) {
4206 struct { char type; } *pkt;
4207
4208 /* Start of the frame */
4209 pkt = data;
4210 type = pkt->type;
4211
4212 data++;
4213 count--;
4214 } else
4215 type = bt_cb(skb)->pkt_type;
4216
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004217 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004218 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304219 if (rem < 0)
4220 return rem;
4221
4222 data += (count - rem);
4223 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004224 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304225
4226 return rem;
4227}
4228EXPORT_SYMBOL(hci_recv_stream_fragment);
4229
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230/* ---- Interface to upper protocols ---- */
4231
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232int hci_register_cb(struct hci_cb *cb)
4233{
4234 BT_DBG("%p name %s", cb, cb->name);
4235
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004236 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004238 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
4240 return 0;
4241}
4242EXPORT_SYMBOL(hci_register_cb);
4243
4244int hci_unregister_cb(struct hci_cb *cb)
4245{
4246 BT_DBG("%p name %s", cb, cb->name);
4247
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004248 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004250 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251
4252 return 0;
4253}
4254EXPORT_SYMBOL(hci_unregister_cb);
4255
Marcel Holtmann51086992013-10-10 14:54:19 -07004256static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004258 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004260 /* Time stamp */
4261 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004263 /* Send copy to monitor */
4264 hci_send_to_monitor(hdev, skb);
4265
4266 if (atomic_read(&hdev->promisc)) {
4267 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004268 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269 }
4270
4271 /* Get rid of skb owner, prior to sending to the driver. */
4272 skb_orphan(skb);
4273
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004274 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004275 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276}
4277
Johan Hedberg3119ae92013-03-05 20:37:44 +02004278void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4279{
4280 skb_queue_head_init(&req->cmd_q);
4281 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004282 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004283}
4284
4285int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4286{
4287 struct hci_dev *hdev = req->hdev;
4288 struct sk_buff *skb;
4289 unsigned long flags;
4290
4291 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4292
Andre Guedes5d73e032013-03-08 11:20:16 -03004293 /* If an error occured during request building, remove all HCI
4294 * commands queued on the HCI request queue.
4295 */
4296 if (req->err) {
4297 skb_queue_purge(&req->cmd_q);
4298 return req->err;
4299 }
4300
Johan Hedberg3119ae92013-03-05 20:37:44 +02004301 /* Do not allow empty requests */
4302 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004303 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004304
4305 skb = skb_peek_tail(&req->cmd_q);
4306 bt_cb(skb)->req.complete = complete;
4307
4308 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4309 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4310 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4311
4312 queue_work(hdev->workqueue, &hdev->cmd_work);
4313
4314 return 0;
4315}
4316
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004317static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004318 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319{
4320 int len = HCI_COMMAND_HDR_SIZE + plen;
4321 struct hci_command_hdr *hdr;
4322 struct sk_buff *skb;
4323
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004325 if (!skb)
4326 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327
4328 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004329 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 hdr->plen = plen;
4331
4332 if (plen)
4333 memcpy(skb_put(skb, plen), param, plen);
4334
4335 BT_DBG("skb len %d", skb->len);
4336
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004337 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004338
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004339 return skb;
4340}
4341
4342/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004343int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4344 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004345{
4346 struct sk_buff *skb;
4347
4348 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4349
4350 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4351 if (!skb) {
4352 BT_ERR("%s no memory for command", hdev->name);
4353 return -ENOMEM;
4354 }
4355
Johan Hedberg11714b32013-03-05 20:37:47 +02004356 /* Stand-alone HCI commands must be flaged as
4357 * single-command requests.
4358 */
4359 bt_cb(skb)->req.start = true;
4360
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004362 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363
4364 return 0;
4365}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
Johan Hedberg71c76a12013-03-05 20:37:46 +02004367/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004368void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4369 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004370{
4371 struct hci_dev *hdev = req->hdev;
4372 struct sk_buff *skb;
4373
4374 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4375
Andre Guedes34739c12013-03-08 11:20:18 -03004376 /* If an error occured during request building, there is no point in
4377 * queueing the HCI command. We can simply return.
4378 */
4379 if (req->err)
4380 return;
4381
Johan Hedberg71c76a12013-03-05 20:37:46 +02004382 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4383 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004384 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4385 hdev->name, opcode);
4386 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004387 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004388 }
4389
4390 if (skb_queue_empty(&req->cmd_q))
4391 bt_cb(skb)->req.start = true;
4392
Johan Hedberg02350a72013-04-03 21:50:29 +03004393 bt_cb(skb)->req.event = event;
4394
Johan Hedberg71c76a12013-03-05 20:37:46 +02004395 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004396}
4397
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004398void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4399 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004400{
4401 hci_req_add_ev(req, opcode, plen, param, 0);
4402}
4403
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004405void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406{
4407 struct hci_command_hdr *hdr;
4408
4409 if (!hdev->sent_cmd)
4410 return NULL;
4411
4412 hdr = (void *) hdev->sent_cmd->data;
4413
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004414 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415 return NULL;
4416
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004417 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
4419 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4420}
4421
4422/* Send ACL data */
4423static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4424{
4425 struct hci_acl_hdr *hdr;
4426 int len = skb->len;
4427
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004428 skb_push(skb, HCI_ACL_HDR_SIZE);
4429 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004430 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004431 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4432 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433}
4434
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004435static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004436 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004438 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 struct hci_dev *hdev = conn->hdev;
4440 struct sk_buff *list;
4441
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004442 skb->len = skb_headlen(skb);
4443 skb->data_len = 0;
4444
4445 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004446
4447 switch (hdev->dev_type) {
4448 case HCI_BREDR:
4449 hci_add_acl_hdr(skb, conn->handle, flags);
4450 break;
4451 case HCI_AMP:
4452 hci_add_acl_hdr(skb, chan->handle, flags);
4453 break;
4454 default:
4455 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4456 return;
4457 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004458
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004459 list = skb_shinfo(skb)->frag_list;
4460 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 /* Non fragmented */
4462 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4463
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004464 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 } else {
4466 /* Fragmented */
4467 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4468
4469 skb_shinfo(skb)->frag_list = NULL;
4470
4471 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004472 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004474 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004475
4476 flags &= ~ACL_START;
4477 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 do {
4479 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004480
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004481 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004482 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
4484 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4485
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004486 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487 } while (list);
4488
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004489 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004491}
4492
4493void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4494{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004495 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004496
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004497 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004498
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004499 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004501 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503
4504/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004505void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506{
4507 struct hci_dev *hdev = conn->hdev;
4508 struct hci_sco_hdr hdr;
4509
4510 BT_DBG("%s len %d", hdev->name, skb->len);
4511
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004512 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 hdr.dlen = skb->len;
4514
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004515 skb_push(skb, HCI_SCO_HDR_SIZE);
4516 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004517 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004519 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004520
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004522 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524
4525/* ---- HCI TX task (outgoing data) ---- */
4526
4527/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004528static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4529 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530{
4531 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004532 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004533 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004535 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004537
4538 rcu_read_lock();
4539
4540 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004541 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004543
4544 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4545 continue;
4546
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 num++;
4548
4549 if (c->sent < min) {
4550 min = c->sent;
4551 conn = c;
4552 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004553
4554 if (hci_conn_num(hdev, type) == num)
4555 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 }
4557
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004558 rcu_read_unlock();
4559
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004561 int cnt, q;
4562
4563 switch (conn->type) {
4564 case ACL_LINK:
4565 cnt = hdev->acl_cnt;
4566 break;
4567 case SCO_LINK:
4568 case ESCO_LINK:
4569 cnt = hdev->sco_cnt;
4570 break;
4571 case LE_LINK:
4572 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4573 break;
4574 default:
4575 cnt = 0;
4576 BT_ERR("Unknown link type");
4577 }
4578
4579 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 *quote = q ? q : 1;
4581 } else
4582 *quote = 0;
4583
4584 BT_DBG("conn %p quote %d", conn, *quote);
4585 return conn;
4586}
4587
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004588static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589{
4590 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004591 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592
Ville Tervobae1f5d92011-02-10 22:38:53 -03004593 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004595 rcu_read_lock();
4596
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004598 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004599 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004600 BT_ERR("%s killing stalled connection %pMR",
4601 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004602 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 }
4604 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004605
4606 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607}
4608
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004609static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4610 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004611{
4612 struct hci_conn_hash *h = &hdev->conn_hash;
4613 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004614 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004615 struct hci_conn *conn;
4616 int cnt, q, conn_num = 0;
4617
4618 BT_DBG("%s", hdev->name);
4619
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004620 rcu_read_lock();
4621
4622 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004623 struct hci_chan *tmp;
4624
4625 if (conn->type != type)
4626 continue;
4627
4628 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4629 continue;
4630
4631 conn_num++;
4632
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004633 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004634 struct sk_buff *skb;
4635
4636 if (skb_queue_empty(&tmp->data_q))
4637 continue;
4638
4639 skb = skb_peek(&tmp->data_q);
4640 if (skb->priority < cur_prio)
4641 continue;
4642
4643 if (skb->priority > cur_prio) {
4644 num = 0;
4645 min = ~0;
4646 cur_prio = skb->priority;
4647 }
4648
4649 num++;
4650
4651 if (conn->sent < min) {
4652 min = conn->sent;
4653 chan = tmp;
4654 }
4655 }
4656
4657 if (hci_conn_num(hdev, type) == conn_num)
4658 break;
4659 }
4660
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004661 rcu_read_unlock();
4662
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004663 if (!chan)
4664 return NULL;
4665
4666 switch (chan->conn->type) {
4667 case ACL_LINK:
4668 cnt = hdev->acl_cnt;
4669 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004670 case AMP_LINK:
4671 cnt = hdev->block_cnt;
4672 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004673 case SCO_LINK:
4674 case ESCO_LINK:
4675 cnt = hdev->sco_cnt;
4676 break;
4677 case LE_LINK:
4678 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4679 break;
4680 default:
4681 cnt = 0;
4682 BT_ERR("Unknown link type");
4683 }
4684
4685 q = cnt / num;
4686 *quote = q ? q : 1;
4687 BT_DBG("chan %p quote %d", chan, *quote);
4688 return chan;
4689}
4690
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004691static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4692{
4693 struct hci_conn_hash *h = &hdev->conn_hash;
4694 struct hci_conn *conn;
4695 int num = 0;
4696
4697 BT_DBG("%s", hdev->name);
4698
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004699 rcu_read_lock();
4700
4701 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004702 struct hci_chan *chan;
4703
4704 if (conn->type != type)
4705 continue;
4706
4707 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4708 continue;
4709
4710 num++;
4711
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004712 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004713 struct sk_buff *skb;
4714
4715 if (chan->sent) {
4716 chan->sent = 0;
4717 continue;
4718 }
4719
4720 if (skb_queue_empty(&chan->data_q))
4721 continue;
4722
4723 skb = skb_peek(&chan->data_q);
4724 if (skb->priority >= HCI_PRIO_MAX - 1)
4725 continue;
4726
4727 skb->priority = HCI_PRIO_MAX - 1;
4728
4729 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004730 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004731 }
4732
4733 if (hci_conn_num(hdev, type) == num)
4734 break;
4735 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004736
4737 rcu_read_unlock();
4738
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004739}
4740
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004741static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4742{
4743 /* Calculate count of blocks used by this packet */
4744 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4745}
4746
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004747static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004749 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 /* ACL tx timeout must be longer than maximum
4751 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004752 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004753 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004754 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004756}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004758static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004759{
4760 unsigned int cnt = hdev->acl_cnt;
4761 struct hci_chan *chan;
4762 struct sk_buff *skb;
4763 int quote;
4764
4765 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004766
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004767 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004768 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004769 u32 priority = (skb_peek(&chan->data_q))->priority;
4770 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004771 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004772 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004773
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004774 /* Stop if priority has changed */
4775 if (skb->priority < priority)
4776 break;
4777
4778 skb = skb_dequeue(&chan->data_q);
4779
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004780 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004781 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004782
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004783 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784 hdev->acl_last_tx = jiffies;
4785
4786 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004787 chan->sent++;
4788 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789 }
4790 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004791
4792 if (cnt != hdev->acl_cnt)
4793 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794}
4795
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004796static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004797{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004798 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004799 struct hci_chan *chan;
4800 struct sk_buff *skb;
4801 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004802 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004803
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004804 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004805
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004806 BT_DBG("%s", hdev->name);
4807
4808 if (hdev->dev_type == HCI_AMP)
4809 type = AMP_LINK;
4810 else
4811 type = ACL_LINK;
4812
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004813 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004814 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004815 u32 priority = (skb_peek(&chan->data_q))->priority;
4816 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4817 int blocks;
4818
4819 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004820 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004821
4822 /* Stop if priority has changed */
4823 if (skb->priority < priority)
4824 break;
4825
4826 skb = skb_dequeue(&chan->data_q);
4827
4828 blocks = __get_blocks(hdev, skb);
4829 if (blocks > hdev->block_cnt)
4830 return;
4831
4832 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004833 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004834
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004835 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004836 hdev->acl_last_tx = jiffies;
4837
4838 hdev->block_cnt -= blocks;
4839 quote -= blocks;
4840
4841 chan->sent += blocks;
4842 chan->conn->sent += blocks;
4843 }
4844 }
4845
4846 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004847 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004848}
4849
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004850static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004851{
4852 BT_DBG("%s", hdev->name);
4853
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004854 /* No ACL link over BR/EDR controller */
4855 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4856 return;
4857
4858 /* No AMP link over AMP controller */
4859 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004860 return;
4861
4862 switch (hdev->flow_ctl_mode) {
4863 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4864 hci_sched_acl_pkt(hdev);
4865 break;
4866
4867 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4868 hci_sched_acl_blk(hdev);
4869 break;
4870 }
4871}
4872
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004874static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875{
4876 struct hci_conn *conn;
4877 struct sk_buff *skb;
4878 int quote;
4879
4880 BT_DBG("%s", hdev->name);
4881
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004882 if (!hci_conn_num(hdev, SCO_LINK))
4883 return;
4884
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4886 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4887 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004888 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889
4890 conn->sent++;
4891 if (conn->sent == ~0)
4892 conn->sent = 0;
4893 }
4894 }
4895}
4896
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004897static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004898{
4899 struct hci_conn *conn;
4900 struct sk_buff *skb;
4901 int quote;
4902
4903 BT_DBG("%s", hdev->name);
4904
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004905 if (!hci_conn_num(hdev, ESCO_LINK))
4906 return;
4907
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004908 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4909 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004910 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4911 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004912 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004913
4914 conn->sent++;
4915 if (conn->sent == ~0)
4916 conn->sent = 0;
4917 }
4918 }
4919}
4920
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004921static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004922{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004923 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004924 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004925 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004926
4927 BT_DBG("%s", hdev->name);
4928
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004929 if (!hci_conn_num(hdev, LE_LINK))
4930 return;
4931
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004932 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004933 /* LE tx timeout must be longer than maximum
4934 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004935 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004936 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004937 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004938 }
4939
4940 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004941 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004942 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004943 u32 priority = (skb_peek(&chan->data_q))->priority;
4944 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004945 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004946 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004947
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004948 /* Stop if priority has changed */
4949 if (skb->priority < priority)
4950 break;
4951
4952 skb = skb_dequeue(&chan->data_q);
4953
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004954 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004955 hdev->le_last_tx = jiffies;
4956
4957 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004958 chan->sent++;
4959 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004960 }
4961 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004962
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004963 if (hdev->le_pkts)
4964 hdev->le_cnt = cnt;
4965 else
4966 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004967
4968 if (cnt != tmp)
4969 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004970}
4971
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004972static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004974 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 struct sk_buff *skb;
4976
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004977 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004978 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979
Marcel Holtmann52de5992013-09-03 18:08:38 -07004980 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4981 /* Schedule queues and send stuff to HCI driver */
4982 hci_sched_acl(hdev);
4983 hci_sched_sco(hdev);
4984 hci_sched_esco(hdev);
4985 hci_sched_le(hdev);
4986 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004987
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 /* Send next queued raw (unknown type) packet */
4989 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004990 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991}
4992
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004993/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994
4995/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004996static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997{
4998 struct hci_acl_hdr *hdr = (void *) skb->data;
4999 struct hci_conn *conn;
5000 __u16 handle, flags;
5001
5002 skb_pull(skb, HCI_ACL_HDR_SIZE);
5003
5004 handle = __le16_to_cpu(hdr->handle);
5005 flags = hci_flags(handle);
5006 handle = hci_handle(handle);
5007
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005008 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005009 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010
5011 hdev->stat.acl_rx++;
5012
5013 hci_dev_lock(hdev);
5014 conn = hci_conn_hash_lookup_handle(hdev, handle);
5015 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005016
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005018 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005019
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005021 l2cap_recv_acldata(conn, skb, flags);
5022 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005024 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005025 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 }
5027
5028 kfree_skb(skb);
5029}
5030
5031/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005032static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033{
5034 struct hci_sco_hdr *hdr = (void *) skb->data;
5035 struct hci_conn *conn;
5036 __u16 handle;
5037
5038 skb_pull(skb, HCI_SCO_HDR_SIZE);
5039
5040 handle = __le16_to_cpu(hdr->handle);
5041
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005042 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043
5044 hdev->stat.sco_rx++;
5045
5046 hci_dev_lock(hdev);
5047 conn = hci_conn_hash_lookup_handle(hdev, handle);
5048 hci_dev_unlock(hdev);
5049
5050 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005052 sco_recv_scodata(conn, skb);
5053 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005055 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005056 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 }
5058
5059 kfree_skb(skb);
5060}
5061
Johan Hedberg9238f362013-03-05 20:37:48 +02005062static bool hci_req_is_complete(struct hci_dev *hdev)
5063{
5064 struct sk_buff *skb;
5065
5066 skb = skb_peek(&hdev->cmd_q);
5067 if (!skb)
5068 return true;
5069
5070 return bt_cb(skb)->req.start;
5071}
5072
Johan Hedberg42c6b122013-03-05 20:37:49 +02005073static void hci_resend_last(struct hci_dev *hdev)
5074{
5075 struct hci_command_hdr *sent;
5076 struct sk_buff *skb;
5077 u16 opcode;
5078
5079 if (!hdev->sent_cmd)
5080 return;
5081
5082 sent = (void *) hdev->sent_cmd->data;
5083 opcode = __le16_to_cpu(sent->opcode);
5084 if (opcode == HCI_OP_RESET)
5085 return;
5086
5087 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5088 if (!skb)
5089 return;
5090
5091 skb_queue_head(&hdev->cmd_q, skb);
5092 queue_work(hdev->workqueue, &hdev->cmd_work);
5093}
5094
Johan Hedberg9238f362013-03-05 20:37:48 +02005095void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5096{
5097 hci_req_complete_t req_complete = NULL;
5098 struct sk_buff *skb;
5099 unsigned long flags;
5100
5101 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5102
Johan Hedberg42c6b122013-03-05 20:37:49 +02005103 /* If the completed command doesn't match the last one that was
5104 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005105 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005106 if (!hci_sent_cmd_data(hdev, opcode)) {
5107 /* Some CSR based controllers generate a spontaneous
5108 * reset complete event during init and any pending
5109 * command will never be completed. In such a case we
5110 * need to resend whatever was the last sent
5111 * command.
5112 */
5113 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5114 hci_resend_last(hdev);
5115
Johan Hedberg9238f362013-03-05 20:37:48 +02005116 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005117 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005118
5119 /* If the command succeeded and there's still more commands in
5120 * this request the request is not yet complete.
5121 */
5122 if (!status && !hci_req_is_complete(hdev))
5123 return;
5124
5125 /* If this was the last command in a request the complete
5126 * callback would be found in hdev->sent_cmd instead of the
5127 * command queue (hdev->cmd_q).
5128 */
5129 if (hdev->sent_cmd) {
5130 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005131
5132 if (req_complete) {
5133 /* We must set the complete callback to NULL to
5134 * avoid calling the callback more than once if
5135 * this function gets called again.
5136 */
5137 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5138
Johan Hedberg9238f362013-03-05 20:37:48 +02005139 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005140 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005141 }
5142
5143 /* Remove all pending commands belonging to this request */
5144 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5145 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5146 if (bt_cb(skb)->req.start) {
5147 __skb_queue_head(&hdev->cmd_q, skb);
5148 break;
5149 }
5150
5151 req_complete = bt_cb(skb)->req.complete;
5152 kfree_skb(skb);
5153 }
5154 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5155
5156call_complete:
5157 if (req_complete)
5158 req_complete(hdev, status);
5159}
5160
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005161static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005163 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 struct sk_buff *skb;
5165
5166 BT_DBG("%s", hdev->name);
5167
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005169 /* Send copy to monitor */
5170 hci_send_to_monitor(hdev, skb);
5171
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 if (atomic_read(&hdev->promisc)) {
5173 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005174 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 }
5176
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005177 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 kfree_skb(skb);
5179 continue;
5180 }
5181
5182 if (test_bit(HCI_INIT, &hdev->flags)) {
5183 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005184 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 case HCI_ACLDATA_PKT:
5186 case HCI_SCODATA_PKT:
5187 kfree_skb(skb);
5188 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 }
5191
5192 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005193 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005195 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196 hci_event_packet(hdev, skb);
5197 break;
5198
5199 case HCI_ACLDATA_PKT:
5200 BT_DBG("%s ACL data packet", hdev->name);
5201 hci_acldata_packet(hdev, skb);
5202 break;
5203
5204 case HCI_SCODATA_PKT:
5205 BT_DBG("%s SCO data packet", hdev->name);
5206 hci_scodata_packet(hdev, skb);
5207 break;
5208
5209 default:
5210 kfree_skb(skb);
5211 break;
5212 }
5213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214}
5215
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005216static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005218 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 struct sk_buff *skb;
5220
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005221 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5222 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005225 if (atomic_read(&hdev->cmd_cnt)) {
5226 skb = skb_dequeue(&hdev->cmd_q);
5227 if (!skb)
5228 return;
5229
Wei Yongjun7585b972009-02-25 18:29:52 +08005230 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005232 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005233 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005235 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005236 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005237 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005238 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005239 schedule_delayed_work(&hdev->cmd_timer,
5240 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 } else {
5242 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005243 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 }
5245 }
5246}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005247
5248void hci_req_add_le_scan_disable(struct hci_request *req)
5249{
5250 struct hci_cp_le_set_scan_enable cp;
5251
5252 memset(&cp, 0, sizeof(cp));
5253 cp.enable = LE_SCAN_DISABLE;
5254 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5255}
Andre Guedesa4790db2014-02-26 20:21:47 -03005256
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005257void hci_req_add_le_passive_scan(struct hci_request *req)
5258{
5259 struct hci_cp_le_set_scan_param param_cp;
5260 struct hci_cp_le_set_scan_enable enable_cp;
5261 struct hci_dev *hdev = req->hdev;
5262 u8 own_addr_type;
5263
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005264 /* Set require_privacy to false since no SCAN_REQ are send
5265 * during passive scanning. Not using an unresolvable address
5266 * here is important so that peer devices using direct
5267 * advertising with our address will be correctly reported
5268 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005269 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005270 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005271 return;
5272
5273 memset(&param_cp, 0, sizeof(param_cp));
5274 param_cp.type = LE_SCAN_PASSIVE;
5275 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5276 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5277 param_cp.own_address_type = own_addr_type;
5278 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5279 &param_cp);
5280
5281 memset(&enable_cp, 0, sizeof(enable_cp));
5282 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005283 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005284 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5285 &enable_cp);
5286}
5287
Andre Guedesa4790db2014-02-26 20:21:47 -03005288static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5289{
5290 if (status)
5291 BT_DBG("HCI request failed to update background scanning: "
5292 "status 0x%2.2x", status);
5293}
5294
5295/* This function controls the background scanning based on hdev->pend_le_conns
5296 * list. If there are pending LE connection we start the background scanning,
5297 * otherwise we stop it.
5298 *
5299 * This function requires the caller holds hdev->lock.
5300 */
5301void hci_update_background_scan(struct hci_dev *hdev)
5302{
Andre Guedesa4790db2014-02-26 20:21:47 -03005303 struct hci_request req;
5304 struct hci_conn *conn;
5305 int err;
5306
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005307 if (!test_bit(HCI_UP, &hdev->flags) ||
5308 test_bit(HCI_INIT, &hdev->flags) ||
5309 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005310 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005311 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005312 return;
5313
Andre Guedesa4790db2014-02-26 20:21:47 -03005314 hci_req_init(&req, hdev);
5315
5316 if (list_empty(&hdev->pend_le_conns)) {
5317 /* If there is no pending LE connections, we should stop
5318 * the background scanning.
5319 */
5320
5321 /* If controller is not scanning we are done. */
5322 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5323 return;
5324
5325 hci_req_add_le_scan_disable(&req);
5326
5327 BT_DBG("%s stopping background scanning", hdev->name);
5328 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005329 /* If there is at least one pending LE connection, we should
5330 * keep the background scan running.
5331 */
5332
Andre Guedesa4790db2014-02-26 20:21:47 -03005333 /* If controller is connecting, we should not start scanning
5334 * since some controllers are not able to scan and connect at
5335 * the same time.
5336 */
5337 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5338 if (conn)
5339 return;
5340
Andre Guedes4340a122014-03-10 18:26:24 -03005341 /* If controller is currently scanning, we stop it to ensure we
5342 * don't miss any advertising (due to duplicates filter).
5343 */
5344 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5345 hci_req_add_le_scan_disable(&req);
5346
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005347 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005348
5349 BT_DBG("%s starting background scanning", hdev->name);
5350 }
5351
5352 err = hci_req_run(&req, update_background_scan_complete);
5353 if (err)
5354 BT_ERR("Failed to run HCI request: err %d", err);
5355}