blob: 0601fcbd21eb88acd720b928645c53d0b8a25067 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Marcel Holtmann47219832013-10-17 17:24:15 -0700194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700202
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700209
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
Marcel Holtmann041000b2013-10-17 12:02:31 -0700317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
Marcel Holtmann111902f2014-06-21 04:53:17 +0200362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800388 return -EALREADY;
389
Marcel Holtmann111902f2014-06-21 04:53:17 +0200390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700428 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700487 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700515 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
Marcel Holtmannac345812014-02-23 12:44:25 -0800591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200594 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
Johan Hedberga1f4c312014-02-27 14:05:41 +0200599 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800600
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800602 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700670{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700673
Marcel Holtmann111902f2014-06-21 04:53:17 +0200674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678}
679
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683{
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700697 return -EINVAL;
698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700701
Marcel Holtmann111902f2014-06-21 04:53:17 +0200702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800703
704 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700705}
706
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
Marcel Holtmann92202182013-10-18 16:38:10 -0700713
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Marcel Holtmann3698d702014-02-18 21:54:49 -0800739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800775 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800780 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700835 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
Marcel Holtmannf1649572014-06-30 12:34:38 +0200883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200939static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300940{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200941 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300945 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300947 p->auto_connect);
948 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200954static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300955{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200956 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300957}
958
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966/* ---- HCI requests ---- */
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
Fengguang Wu77a63e02013-04-20 16:24:31 +0300990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
Johan Hedberg75e84b72013-04-02 13:35:04 +03001021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001046 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001056 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001100 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 hci_req_init(&req, hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 hdev->req_status = HCI_REQ_PEND;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001126 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 */
Andre Guedes920c8302013-03-08 11:20:15 -03001133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001137 }
1138
Andre Guedesbc4445c2013-03-08 11:20:13 -03001139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Johan Hedberga5040ef2011-01-10 13:28:59 +02001163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 int ret;
1176
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Serialize all requests */
1181 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001182 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001204 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001214
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001215 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001217
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001224 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001226
1227 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001229
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001238{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 break;
1251
1252 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001264 struct hci_dev *hdev = req->hdev;
1265
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001292 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
Johan Hedberg2177bab2013-03-05 20:37:43 +02001317 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
1361 u8 mode;
1362
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366}
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 struct hci_dev *hdev = req->hdev;
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001443{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 struct hci_dev *hdev = req->hdev;
1445
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001450
1451 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001455
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
1462 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
1488 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504}
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 struct hci_cp_write_le_host_supported cp;
1529
Johan Hedbergc73eee92013-04-19 18:35:21 +03001530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544}
1545
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001554 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001564 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001580 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001581 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001595 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608
Andre Guedes9193c6e2014-07-01 18:10:09 -03001609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
Andre Guedes9193c6e2014-07-01 18:10:09 -03001623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001627 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001637}
1638
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001647 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001648 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001650
1651 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001652 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001659}
1660
Johan Hedberg2177bab2013-03-05 20:37:43 +02001661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001726 }
1727
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001728 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001735 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001736
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001746 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001789 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001790
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001791 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001792}
1793
Johan Hedberg42c6b122013-03-05 20:37:49 +02001794static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795{
1796 __u8 scan = opt;
1797
Johan Hedberg42c6b122013-03-05 20:37:49 +02001798 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
Johan Hedberg42c6b122013-03-05 20:37:49 +02001804static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805{
1806 __u8 auth = opt;
1807
Johan Hedberg42c6b122013-03-05 20:37:49 +02001808 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812}
1813
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815{
1816 __u8 encrypt = opt;
1817
Johan Hedberg42c6b122013-03-05 20:37:49 +02001818 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001820 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822}
1823
Johan Hedberg42c6b122013-03-05 20:37:49 +02001824static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001825{
1826 __le16 policy = cpu_to_le16(opt);
1827
Johan Hedberg42c6b122013-03-05 20:37:49 +02001828 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001829
1830 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001832}
1833
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001834/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 * Device is held on return. */
1836struct hci_dev *hci_dev_get(int index)
1837{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001838 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
1840 BT_DBG("%d", index);
1841
1842 if (index < 0)
1843 return NULL;
1844
1845 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001846 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1850 }
1851 }
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1854}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
1856/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001857
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001858bool hci_discovery_active(struct hci_dev *hdev)
1859{
1860 struct discovery_state *discov = &hdev->discovery;
1861
Andre Guedes6fbe1952012-02-03 17:47:58 -03001862 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001863 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001864 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001865 return true;
1866
Andre Guedes6fbe1952012-02-03 17:47:58 -03001867 default:
1868 return false;
1869 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001870}
1871
Johan Hedbergff9ef572012-01-04 14:23:45 +02001872void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873{
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876 if (hdev->discovery.state == state)
1877 return;
1878
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001881 hci_update_background_scan(hdev);
1882
Andre Guedes7b99b652012-02-13 15:41:02 -03001883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001885 break;
1886 case DISCOVERY_STARTING:
1887 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001888 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001889 mgmt_discovering(hdev, 1);
1890 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001891 case DISCOVERY_RESOLVING:
1892 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001893 case DISCOVERY_STOPPING:
1894 break;
1895 }
1896
1897 hdev->discovery.state = state;
1898}
1899
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001900void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901{
Johan Hedberg30883512012-01-04 14:16:21 +02001902 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001903 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
Johan Hedberg561aafb2012-01-04 13:31:59 +02001905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001907 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001909
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912}
1913
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001914struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
Johan Hedberg30883512012-01-04 14:16:21 +02001917 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 struct inquiry_entry *e;
1919
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001920 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
Johan Hedberg561aafb2012-01-04 13:31:59 +02001922 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001924 return e;
1925 }
1926
1927 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928}
1929
Johan Hedberg561aafb2012-01-04 13:31:59 +02001930struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001931 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001932{
Johan Hedberg30883512012-01-04 14:16:21 +02001933 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001934 struct inquiry_entry *e;
1935
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001936 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001937
1938 list_for_each_entry(e, &cache->unknown, list) {
1939 if (!bacmp(&e->data.bdaddr, bdaddr))
1940 return e;
1941 }
1942
1943 return NULL;
1944}
1945
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001946struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001947 bdaddr_t *bdaddr,
1948 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001949{
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1952
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001954
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1960 }
1961
1962 return NULL;
1963}
1964
Johan Hedberga3d4e202012-01-09 00:53:02 +02001965void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001966 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001967{
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1971
1972 list_del(&ie->list);
1973
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001976 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001977 break;
1978 pos = &p->list;
1979 }
1980
1981 list_add(&ie->list, pos);
1982}
1983
Marcel Holtmannaf589252014-07-01 14:11:20 +02001984u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986{
Johan Hedberg30883512012-01-04 14:16:21 +02001987 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001988 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001989 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Szymon Janc2b2fec42012-11-20 11:38:54 +01001993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
Marcel Holtmannaf589252014-07-01 14:11:20 +02001995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001997
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001999 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002002
Johan Hedberga3d4e202012-01-09 00:53:02 +02002003 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002004 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2007 }
2008
Johan Hedberg561aafb2012-01-04 13:31:59 +02002009 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002010 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002011
Johan Hedberg561aafb2012-01-04 13:31:59 +02002012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2017 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002018
2019 list_add(&ie->all, &cache->all);
2020
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2026 }
2027
2028update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002030 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 }
2034
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002038
2039 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002041
Marcel Holtmannaf589252014-07-01 14:11:20 +02002042done:
2043 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044}
2045
2046static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047{
Johan Hedberg30883512012-01-04 14:16:21 +02002048 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002055
2056 if (copied >= num)
2057 break;
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002067 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 }
2069
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2072}
2073
Johan Hedberg42c6b122013-03-05 20:37:49 +02002074static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075{
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002077 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 struct hci_cp_inquiry cp;
2079
2080 BT_DBG("%s", hdev->name);
2081
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2084
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090}
2091
Andre Guedes3e13fa12013-03-27 20:04:56 -03002092static int wait_inquiry(void *word)
2093{
2094 schedule();
2095 return signal_pending(current);
2096}
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098int hci_inquiry(void __user *arg)
2099{
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2106
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2109
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 return -ENODEV;
2113
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2117 }
2118
Marcel Holtmann4a964402014-07-02 19:10:33 +02002119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2127 }
2128
Johan Hedberg56f87902013-10-02 13:43:13 +03002129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2132 }
2133
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002134 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002137 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 do_inquiry = 1;
2139 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002140 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Marcel Holtmann04837f62006-07-03 10:02:33 +02002142 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002143
2144 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002147 if (err < 0)
2148 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002149
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2152 */
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2165 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002167 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 err = -ENOMEM;
2169 goto done;
2170 }
2171
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002172 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002174 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002181 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002183 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 err = -EFAULT;
2185
2186 kfree(buf);
2187
2188done:
2189 hci_dev_put(hdev);
2190 return err;
2191}
2192
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002193static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 int ret = 0;
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 BT_DBG("%s %p", hdev->name, hdev);
2198
2199 hci_req_lock(hdev);
2200
Johan Hovold94324962012-03-15 14:48:41 +01002201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2204 }
2205
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2209 */
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2213 }
2214
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2219 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2223 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2226 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2233 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002234 }
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2239 }
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2244 }
2245
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2248
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2251
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002252 /* If public address change is configured, ensure that the
2253 * address gets programmed. If the driver does not support
2254 * changing the public address, fail the power on procedure.
2255 */
2256 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257 if (hdev->set_bdaddr)
2258 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259 else
2260 ret = -EADDRNOTAVAIL;
2261 }
2262
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002263 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002264 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002266 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
2268
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002269 clear_bit(HCI_INIT, &hdev->flags);
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 if (!ret) {
2272 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002273 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 set_bit(HCI_UP, &hdev->flags);
2275 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002277 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002278 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002279 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002280 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002281 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002282 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002283 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002284 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002286 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002287 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002288 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->rx_q);
2292
2293 if (hdev->flush)
2294 hdev->flush(hdev);
2295
2296 if (hdev->sent_cmd) {
2297 kfree_skb(hdev->sent_cmd);
2298 hdev->sent_cmd = NULL;
2299 }
2300
2301 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002302 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 }
2304
2305done:
2306 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 return ret;
2308}
2309
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002310/* ---- HCI ioctl helpers ---- */
2311
2312int hci_dev_open(__u16 dev)
2313{
2314 struct hci_dev *hdev;
2315 int err;
2316
2317 hdev = hci_dev_get(dev);
2318 if (!hdev)
2319 return -ENODEV;
2320
Marcel Holtmann4a964402014-07-02 19:10:33 +02002321 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002322 * up as user channel. Trying to bring them up as normal devices
2323 * will result into a failure. Only user channel operation is
2324 * possible.
2325 *
2326 * When this function is called for a user channel, the flag
2327 * HCI_USER_CHANNEL will be set first before attempting to
2328 * open the device.
2329 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002330 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002331 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332 err = -EOPNOTSUPP;
2333 goto done;
2334 }
2335
Johan Hedberge1d08f42013-10-01 22:44:50 +03002336 /* We need to ensure that no other power on/off work is pending
2337 * before proceeding to call hci_dev_do_open. This is
2338 * particularly important if the setup procedure has not yet
2339 * completed.
2340 */
2341 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342 cancel_delayed_work(&hdev->power_off);
2343
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002344 /* After this call it is guaranteed that the setup procedure
2345 * has finished. This means that error conditions like RFKILL
2346 * or no valid public or static random address apply.
2347 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002348 flush_workqueue(hdev->req_workqueue);
2349
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002350 err = hci_dev_do_open(hdev);
2351
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002352done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002353 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002354 return err;
2355}
2356
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357static int hci_dev_do_close(struct hci_dev *hdev)
2358{
2359 BT_DBG("%s %p", hdev->name, hdev);
2360
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002361 cancel_delayed_work(&hdev->power_off);
2362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 hci_req_cancel(hdev, ENODEV);
2364 hci_req_lock(hdev);
2365
2366 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002367 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 hci_req_unlock(hdev);
2369 return 0;
2370 }
2371
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002372 /* Flush RX and TX works */
2373 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002374 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002376 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002377 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002378 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002379 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002380 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002381 }
2382
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002383 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002384 cancel_delayed_work(&hdev->service_cache);
2385
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002386 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002387
2388 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2389 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002390
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002391 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002392 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002394 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002395 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
2397 hci_notify(hdev, HCI_DEV_DOWN);
2398
2399 if (hdev->flush)
2400 hdev->flush(hdev);
2401
2402 /* Reset device */
2403 skb_queue_purge(&hdev->cmd_q);
2404 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002405 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2406 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002407 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002409 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 clear_bit(HCI_INIT, &hdev->flags);
2411 }
2412
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002413 /* flush cmd work */
2414 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415
2416 /* Drop queues */
2417 skb_queue_purge(&hdev->rx_q);
2418 skb_queue_purge(&hdev->cmd_q);
2419 skb_queue_purge(&hdev->raw_q);
2420
2421 /* Drop last sent command */
2422 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002423 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 kfree_skb(hdev->sent_cmd);
2425 hdev->sent_cmd = NULL;
2426 }
2427
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002428 kfree_skb(hdev->recv_evt);
2429 hdev->recv_evt = NULL;
2430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 /* After this point our queues are empty
2432 * and no tasks are scheduled. */
2433 hdev->close(hdev);
2434
Johan Hedberg35b973c2013-03-15 17:06:59 -05002435 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002436 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002437 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2438
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002439 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2440 if (hdev->dev_type == HCI_BREDR) {
2441 hci_dev_lock(hdev);
2442 mgmt_powered(hdev, 0);
2443 hci_dev_unlock(hdev);
2444 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002445 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002446
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002447 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002448 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002449
Johan Hedberge59fda82012-02-22 18:11:53 +02002450 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002451 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002452 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 hci_req_unlock(hdev);
2455
2456 hci_dev_put(hdev);
2457 return 0;
2458}
2459
2460int hci_dev_close(__u16 dev)
2461{
2462 struct hci_dev *hdev;
2463 int err;
2464
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002465 hdev = hci_dev_get(dev);
2466 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002468
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002469 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2470 err = -EBUSY;
2471 goto done;
2472 }
2473
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002474 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2475 cancel_delayed_work(&hdev->power_off);
2476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002478
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002479done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 hci_dev_put(hdev);
2481 return err;
2482}
2483
2484int hci_dev_reset(__u16 dev)
2485{
2486 struct hci_dev *hdev;
2487 int ret = 0;
2488
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002489 hdev = hci_dev_get(dev);
2490 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 return -ENODEV;
2492
2493 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Marcel Holtmann808a0492013-08-26 20:57:58 -07002495 if (!test_bit(HCI_UP, &hdev->flags)) {
2496 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 ret = -EBUSY;
2502 goto done;
2503 }
2504
Marcel Holtmann4a964402014-07-02 19:10:33 +02002505 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002506 ret = -EOPNOTSUPP;
2507 goto done;
2508 }
2509
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 /* Drop queues */
2511 skb_queue_purge(&hdev->rx_q);
2512 skb_queue_purge(&hdev->cmd_q);
2513
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002514 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002515 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002517 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
2519 if (hdev->flush)
2520 hdev->flush(hdev);
2521
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002522 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002523 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002525 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
2527done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 hci_req_unlock(hdev);
2529 hci_dev_put(hdev);
2530 return ret;
2531}
2532
2533int hci_dev_reset_stat(__u16 dev)
2534{
2535 struct hci_dev *hdev;
2536 int ret = 0;
2537
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002538 hdev = hci_dev_get(dev);
2539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 return -ENODEV;
2541
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002542 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543 ret = -EBUSY;
2544 goto done;
2545 }
2546
Marcel Holtmann4a964402014-07-02 19:10:33 +02002547 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002548 ret = -EOPNOTSUPP;
2549 goto done;
2550 }
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002554done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 return ret;
2557}
2558
2559int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560{
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2564
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2567
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 return -ENODEV;
2571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2575 }
2576
Marcel Holtmann4a964402014-07-02 19:10:33 +02002577 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002578 err = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002582 if (hdev->dev_type != HCI_BREDR) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2585 }
2586
Johan Hedberg56f87902013-10-02 13:43:13 +03002587 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2588 err = -EOPNOTSUPP;
2589 goto done;
2590 }
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 switch (cmd) {
2593 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002594 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 break;
2597
2598 case HCISETENCRYPT:
2599 if (!lmp_encrypt_capable(hdev)) {
2600 err = -EOPNOTSUPP;
2601 break;
2602 }
2603
2604 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2605 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002606 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 if (err)
2609 break;
2610 }
2611
Johan Hedberg01178cd2013-03-05 20:37:41 +02002612 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 break;
2615
2616 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002617 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 break;
2620
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002621 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002622 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2623 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002624 break;
2625
2626 case HCISETLINKMODE:
2627 hdev->link_mode = ((__u16) dr.dev_opt) &
2628 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2629 break;
2630
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 case HCISETPTYPE:
2632 hdev->pkt_type = (__u16) dr.dev_opt;
2633 break;
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002636 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 break;
2639
2640 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002641 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2642 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 break;
2644
2645 default:
2646 err = -EINVAL;
2647 break;
2648 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002649
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002650done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 hci_dev_put(hdev);
2652 return err;
2653}
2654
2655int hci_get_dev_list(void __user *arg)
2656{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002657 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 struct hci_dev_list_req *dl;
2659 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 int n = 0, size, err;
2661 __u16 dev_num;
2662
2663 if (get_user(dev_num, (__u16 __user *) arg))
2664 return -EFAULT;
2665
2666 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2667 return -EINVAL;
2668
2669 size = sizeof(*dl) + dev_num * sizeof(*dr);
2670
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002671 dl = kzalloc(size, GFP_KERNEL);
2672 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 return -ENOMEM;
2674
2675 dr = dl->dev_req;
2676
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002677 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002678 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002679 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002680 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002681
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002682 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 (dr + n)->dev_id = hdev->id;
2686 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 if (++n >= dev_num)
2689 break;
2690 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002691 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
2693 dl->dev_num = n;
2694 size = sizeof(*dl) + n * sizeof(*dr);
2695
2696 err = copy_to_user(arg, dl, size);
2697 kfree(dl);
2698
2699 return err ? -EFAULT : 0;
2700}
2701
2702int hci_get_dev_info(void __user *arg)
2703{
2704 struct hci_dev *hdev;
2705 struct hci_dev_info di;
2706 int err = 0;
2707
2708 if (copy_from_user(&di, arg, sizeof(di)))
2709 return -EFAULT;
2710
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002711 hdev = hci_dev_get(di.dev_id);
2712 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 return -ENODEV;
2714
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002716 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002717
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002718 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2719 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 strcpy(di.name, hdev->name);
2722 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002723 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 di.flags = hdev->flags;
2725 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002726 if (lmp_bredr_capable(hdev)) {
2727 di.acl_mtu = hdev->acl_mtu;
2728 di.acl_pkts = hdev->acl_pkts;
2729 di.sco_mtu = hdev->sco_mtu;
2730 di.sco_pkts = hdev->sco_pkts;
2731 } else {
2732 di.acl_mtu = hdev->le_mtu;
2733 di.acl_pkts = hdev->le_pkts;
2734 di.sco_mtu = 0;
2735 di.sco_pkts = 0;
2736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 di.link_policy = hdev->link_policy;
2738 di.link_mode = hdev->link_mode;
2739
2740 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2741 memcpy(&di.features, &hdev->features, sizeof(di.features));
2742
2743 if (copy_to_user(arg, &di, sizeof(di)))
2744 err = -EFAULT;
2745
2746 hci_dev_put(hdev);
2747
2748 return err;
2749}
2750
2751/* ---- Interface to HCI drivers ---- */
2752
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002753static int hci_rfkill_set_block(void *data, bool blocked)
2754{
2755 struct hci_dev *hdev = data;
2756
2757 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2758
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002759 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2760 return -EBUSY;
2761
Johan Hedberg5e130362013-09-13 08:58:17 +03002762 if (blocked) {
2763 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002764 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2765 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002766 } else {
2767 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002768 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002769
2770 return 0;
2771}
2772
2773static const struct rfkill_ops hci_rfkill_ops = {
2774 .set_block = hci_rfkill_set_block,
2775};
2776
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002777static void hci_power_on(struct work_struct *work)
2778{
2779 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002780 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002781
2782 BT_DBG("%s", hdev->name);
2783
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002784 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002785 if (err < 0) {
2786 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002787 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002788 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002789
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002790 /* During the HCI setup phase, a few error conditions are
2791 * ignored and they need to be checked now. If they are still
2792 * valid, it is important to turn the device back off.
2793 */
2794 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002795 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002796 (hdev->dev_type == HCI_BREDR &&
2797 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2798 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002799 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2800 hci_dev_do_close(hdev);
2801 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002802 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2803 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002804 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002805
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002806 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002807 /* For unconfigured devices, set the HCI_RAW flag
2808 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002809 */
2810 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2811 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002812
2813 /* For fully configured devices, this will send
2814 * the Index Added event. For unconfigured devices,
2815 * it will send Unconfigued Index Added event.
2816 *
2817 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2818 * and no event will be send.
2819 */
2820 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002821 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002822}
2823
2824static void hci_power_off(struct work_struct *work)
2825{
Johan Hedberg32435532011-11-07 22:16:04 +02002826 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002827 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002828
2829 BT_DBG("%s", hdev->name);
2830
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002831 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002832}
2833
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002834static void hci_discov_off(struct work_struct *work)
2835{
2836 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002837
2838 hdev = container_of(work, struct hci_dev, discov_off.work);
2839
2840 BT_DBG("%s", hdev->name);
2841
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002842 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002843}
2844
Johan Hedberg35f74982014-02-18 17:14:32 +02002845void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002846{
Johan Hedberg48210022013-01-27 00:31:28 +02002847 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002848
Johan Hedberg48210022013-01-27 00:31:28 +02002849 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2850 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002851 kfree(uuid);
2852 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002853}
2854
Johan Hedberg35f74982014-02-18 17:14:32 +02002855void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002856{
2857 struct list_head *p, *n;
2858
2859 list_for_each_safe(p, n, &hdev->link_keys) {
2860 struct link_key *key;
2861
2862 key = list_entry(p, struct link_key, list);
2863
2864 list_del(p);
2865 kfree(key);
2866 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002867}
2868
Johan Hedberg35f74982014-02-18 17:14:32 +02002869void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002870{
2871 struct smp_ltk *k, *tmp;
2872
2873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2874 list_del(&k->list);
2875 kfree(k);
2876 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002877}
2878
Johan Hedberg970c4e42014-02-18 10:19:33 +02002879void hci_smp_irks_clear(struct hci_dev *hdev)
2880{
2881 struct smp_irk *k, *tmp;
2882
2883 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2884 list_del(&k->list);
2885 kfree(k);
2886 }
2887}
2888
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002889struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2890{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002891 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002892
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002893 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002894 if (bacmp(bdaddr, &k->bdaddr) == 0)
2895 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002896
2897 return NULL;
2898}
2899
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302900static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002901 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002902{
2903 /* Legacy key */
2904 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302905 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002906
2907 /* Debug keys are insecure so don't store them persistently */
2908 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302909 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002910
2911 /* Changed combination key and there's no previous one */
2912 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302913 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002914
2915 /* Security mode 3 case */
2916 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302917 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002918
2919 /* Neither local nor remote side had no-bonding as requirement */
2920 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302921 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002922
2923 /* Local side had dedicated bonding as requirement */
2924 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302925 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002926
2927 /* Remote side had dedicated bonding as requirement */
2928 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302929 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002930
2931 /* If none of the above criteria match, then don't store the key
2932 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302933 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002934}
2935
Johan Hedberg98a0b842014-01-30 19:40:00 -08002936static bool ltk_type_master(u8 type)
2937{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002938 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002939}
2940
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002941struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002942 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002943{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002944 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002945
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002946 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002947 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002948 continue;
2949
Johan Hedberg98a0b842014-01-30 19:40:00 -08002950 if (ltk_type_master(k->type) != master)
2951 continue;
2952
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002953 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002954 }
2955
2956 return NULL;
2957}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002958
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002959struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002960 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002961{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002962 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002963
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002964 list_for_each_entry(k, &hdev->long_term_keys, list)
2965 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002966 bacmp(bdaddr, &k->bdaddr) == 0 &&
2967 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002968 return k;
2969
2970 return NULL;
2971}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002972
Johan Hedberg970c4e42014-02-18 10:19:33 +02002973struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2974{
2975 struct smp_irk *irk;
2976
2977 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978 if (!bacmp(&irk->rpa, rpa))
2979 return irk;
2980 }
2981
2982 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2983 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2984 bacpy(&irk->rpa, rpa);
2985 return irk;
2986 }
2987 }
2988
2989 return NULL;
2990}
2991
2992struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2993 u8 addr_type)
2994{
2995 struct smp_irk *irk;
2996
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002997 /* Identity Address must be public or static random */
2998 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2999 return NULL;
3000
Johan Hedberg970c4e42014-02-18 10:19:33 +02003001 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3002 if (addr_type == irk->addr_type &&
3003 bacmp(bdaddr, &irk->bdaddr) == 0)
3004 return irk;
3005 }
3006
3007 return NULL;
3008}
3009
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003010struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003011 bdaddr_t *bdaddr, u8 *val, u8 type,
3012 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003013{
3014 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303015 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003016
3017 old_key = hci_find_link_key(hdev, bdaddr);
3018 if (old_key) {
3019 old_key_type = old_key->type;
3020 key = old_key;
3021 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003022 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003023 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003024 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003025 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003026 list_add(&key->list, &hdev->link_keys);
3027 }
3028
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003029 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003030
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003031 /* Some buggy controller combinations generate a changed
3032 * combination key for legacy pairing even when there's no
3033 * previous key */
3034 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003035 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003036 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003037 if (conn)
3038 conn->key_type = type;
3039 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003040
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003041 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003042 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003043 key->pin_len = pin_len;
3044
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003045 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003046 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003047 else
3048 key->type = type;
3049
Johan Hedberg7652ff62014-06-24 13:15:49 +03003050 if (persistent)
3051 *persistent = hci_persistent_key(hdev, conn, type,
3052 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003053
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003054 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003055}
3056
Johan Hedbergca9142b2014-02-19 14:57:44 +02003057struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003058 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003059 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003060{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003061 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003062 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003063
Johan Hedberg98a0b842014-01-30 19:40:00 -08003064 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003065 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003066 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003067 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003068 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003069 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003070 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003071 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003072 }
3073
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003074 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003075 key->bdaddr_type = addr_type;
3076 memcpy(key->val, tk, sizeof(key->val));
3077 key->authenticated = authenticated;
3078 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003079 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003080 key->enc_size = enc_size;
3081 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003082
Johan Hedbergca9142b2014-02-19 14:57:44 +02003083 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003084}
3085
Johan Hedbergca9142b2014-02-19 14:57:44 +02003086struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3087 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003088{
3089 struct smp_irk *irk;
3090
3091 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3092 if (!irk) {
3093 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3094 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003095 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003096
3097 bacpy(&irk->bdaddr, bdaddr);
3098 irk->addr_type = addr_type;
3099
3100 list_add(&irk->list, &hdev->identity_resolving_keys);
3101 }
3102
3103 memcpy(irk->val, val, 16);
3104 bacpy(&irk->rpa, rpa);
3105
Johan Hedbergca9142b2014-02-19 14:57:44 +02003106 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003107}
3108
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003109int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3110{
3111 struct link_key *key;
3112
3113 key = hci_find_link_key(hdev, bdaddr);
3114 if (!key)
3115 return -ENOENT;
3116
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003118
3119 list_del(&key->list);
3120 kfree(key);
3121
3122 return 0;
3123}
3124
Johan Hedberge0b2b272014-02-18 17:14:31 +02003125int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003126{
3127 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003128 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003129
3130 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003131 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003132 continue;
3133
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003134 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003135
3136 list_del(&k->list);
3137 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003138 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003139 }
3140
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003141 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003142}
3143
Johan Hedberga7ec7332014-02-18 17:14:35 +02003144void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3145{
3146 struct smp_irk *k, *tmp;
3147
Johan Hedberg668b7b12014-02-21 16:03:31 +02003148 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003149 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3150 continue;
3151
3152 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3153
3154 list_del(&k->list);
3155 kfree(k);
3156 }
3157}
3158
Ville Tervo6bd32322011-02-16 16:32:41 +02003159/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003160static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003161{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003162 struct hci_dev *hdev = container_of(work, struct hci_dev,
3163 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003164
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003165 if (hdev->sent_cmd) {
3166 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3167 u16 opcode = __le16_to_cpu(sent->opcode);
3168
3169 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3170 } else {
3171 BT_ERR("%s command tx timeout", hdev->name);
3172 }
3173
Ville Tervo6bd32322011-02-16 16:32:41 +02003174 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003175 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003176}
3177
Szymon Janc2763eda2011-03-22 13:12:22 +01003178struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003179 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003180{
3181 struct oob_data *data;
3182
3183 list_for_each_entry(data, &hdev->remote_oob_data, list)
3184 if (bacmp(bdaddr, &data->bdaddr) == 0)
3185 return data;
3186
3187 return NULL;
3188}
3189
3190int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3191{
3192 struct oob_data *data;
3193
3194 data = hci_find_remote_oob_data(hdev, bdaddr);
3195 if (!data)
3196 return -ENOENT;
3197
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003198 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003199
3200 list_del(&data->list);
3201 kfree(data);
3202
3203 return 0;
3204}
3205
Johan Hedberg35f74982014-02-18 17:14:32 +02003206void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003207{
3208 struct oob_data *data, *n;
3209
3210 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3211 list_del(&data->list);
3212 kfree(data);
3213 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003214}
3215
Marcel Holtmann07988722014-01-10 02:07:29 -08003216int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003218{
3219 struct oob_data *data;
3220
3221 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003222 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003223 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003224 if (!data)
3225 return -ENOMEM;
3226
3227 bacpy(&data->bdaddr, bdaddr);
3228 list_add(&data->list, &hdev->remote_oob_data);
3229 }
3230
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003231 memcpy(data->hash192, hash, sizeof(data->hash192));
3232 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003233
Marcel Holtmann07988722014-01-10 02:07:29 -08003234 memset(data->hash256, 0, sizeof(data->hash256));
3235 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3236
3237 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3238
3239 return 0;
3240}
3241
3242int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3243 u8 *hash192, u8 *randomizer192,
3244 u8 *hash256, u8 *randomizer256)
3245{
3246 struct oob_data *data;
3247
3248 data = hci_find_remote_oob_data(hdev, bdaddr);
3249 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003250 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003251 if (!data)
3252 return -ENOMEM;
3253
3254 bacpy(&data->bdaddr, bdaddr);
3255 list_add(&data->list, &hdev->remote_oob_data);
3256 }
3257
3258 memcpy(data->hash192, hash192, sizeof(data->hash192));
3259 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3260
3261 memcpy(data->hash256, hash256, sizeof(data->hash256));
3262 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3263
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003264 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003265
3266 return 0;
3267}
3268
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003269struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3270 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003271{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003272 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003274 list_for_each_entry(b, &hdev->blacklist, list) {
3275 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003277 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003278
3279 return NULL;
3280}
3281
Marcel Holtmannc9507492014-02-27 19:35:54 -08003282static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283{
3284 struct list_head *p, *n;
3285
3286 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003287 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003288
3289 list_del(p);
3290 kfree(b);
3291 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292}
3293
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003294int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003295{
3296 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003297
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003298 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003299 return -EBADF;
3300
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003301 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003302 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003303
3304 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003305 if (!entry)
3306 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003307
3308 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003309 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003310
3311 list_add(&entry->list, &hdev->blacklist);
3312
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003313 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003314}
3315
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003316int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003317{
3318 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003319
Johan Hedberg35f74982014-02-18 17:14:32 +02003320 if (!bacmp(bdaddr, BDADDR_ANY)) {
3321 hci_blacklist_clear(hdev);
3322 return 0;
3323 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003324
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003325 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003326 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003327 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003328
3329 list_del(&entry->list);
3330 kfree(entry);
3331
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003332 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003333}
3334
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003335struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3336 bdaddr_t *bdaddr, u8 type)
3337{
3338 struct bdaddr_list *b;
3339
3340 list_for_each_entry(b, &hdev->le_white_list, list) {
3341 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3342 return b;
3343 }
3344
3345 return NULL;
3346}
3347
3348void hci_white_list_clear(struct hci_dev *hdev)
3349{
3350 struct list_head *p, *n;
3351
3352 list_for_each_safe(p, n, &hdev->le_white_list) {
3353 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3354
3355 list_del(p);
3356 kfree(b);
3357 }
3358}
3359
3360int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3361{
3362 struct bdaddr_list *entry;
3363
3364 if (!bacmp(bdaddr, BDADDR_ANY))
3365 return -EBADF;
3366
3367 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3368 if (!entry)
3369 return -ENOMEM;
3370
3371 bacpy(&entry->bdaddr, bdaddr);
3372 entry->bdaddr_type = type;
3373
3374 list_add(&entry->list, &hdev->le_white_list);
3375
3376 return 0;
3377}
3378
3379int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3380{
3381 struct bdaddr_list *entry;
3382
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3384 return -EBADF;
3385
3386 entry = hci_white_list_lookup(hdev, bdaddr, type);
3387 if (!entry)
3388 return -ENOENT;
3389
3390 list_del(&entry->list);
3391 kfree(entry);
3392
3393 return 0;
3394}
3395
Andre Guedes15819a72014-02-03 13:56:18 -03003396/* This function requires the caller holds hdev->lock */
3397struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3398 bdaddr_t *addr, u8 addr_type)
3399{
3400 struct hci_conn_params *params;
3401
Johan Hedberg738f6182014-07-03 19:33:51 +03003402 /* The conn params list only contains identity addresses */
3403 if (!hci_is_identity_address(addr, addr_type))
3404 return NULL;
3405
Andre Guedes15819a72014-02-03 13:56:18 -03003406 list_for_each_entry(params, &hdev->le_conn_params, list) {
3407 if (bacmp(&params->addr, addr) == 0 &&
3408 params->addr_type == addr_type) {
3409 return params;
3410 }
3411 }
3412
3413 return NULL;
3414}
3415
Andre Guedescef952c2014-02-26 20:21:49 -03003416static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3417{
3418 struct hci_conn *conn;
3419
3420 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3421 if (!conn)
3422 return false;
3423
3424 if (conn->dst_type != type)
3425 return false;
3426
3427 if (conn->state != BT_CONNECTED)
3428 return false;
3429
3430 return true;
3431}
3432
Andre Guedes15819a72014-02-03 13:56:18 -03003433/* This function requires the caller holds hdev->lock */
Johan Hedberg912b42e2014-07-03 19:33:49 +03003434struct hci_conn_params *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3435 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003436{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003437 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003438
Johan Hedberg738f6182014-07-03 19:33:51 +03003439 /* The list only contains identity addresses */
3440 if (!hci_is_identity_address(addr, addr_type))
3441 return NULL;
3442
Johan Hedberg93450c72014-07-04 12:37:17 +03003443 list_for_each_entry(param, &hdev->pend_le_conns, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003444 if (bacmp(&param->addr, addr) == 0 &&
3445 param->addr_type == addr_type)
3446 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003447 }
3448
3449 return NULL;
3450}
3451
3452/* This function requires the caller holds hdev->lock */
Johan Hedberg912b42e2014-07-03 19:33:49 +03003453void hci_pend_le_conn_add(struct hci_dev *hdev, struct hci_conn_params *params)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003454{
Johan Hedberg93450c72014-07-04 12:37:17 +03003455 list_del_init(&params->action);
3456 list_add(&params->action, &hdev->pend_le_conns);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003457
Johan Hedberg912b42e2014-07-03 19:33:49 +03003458 BT_DBG("addr %pMR (type %u)", &params->addr, params->addr_type);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003459
Marcel Holtmann4b109662014-06-29 13:41:49 +02003460 hci_update_background_scan(hdev);
3461}
3462
3463/* This function requires the caller holds hdev->lock */
Johan Hedberg912b42e2014-07-03 19:33:49 +03003464void hci_pend_le_conn_del(struct hci_dev *hdev, struct hci_conn_params *params)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003465{
Johan Hedberg93450c72014-07-04 12:37:17 +03003466 list_del_init(&params->action);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003467
Johan Hedberg912b42e2014-07-03 19:33:49 +03003468 BT_DBG("addr %pMR (type %u)", &params->addr, params->addr_type);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003469
Marcel Holtmann4b109662014-06-29 13:41:49 +02003470 hci_update_background_scan(hdev);
3471}
3472
3473/* This function requires the caller holds hdev->lock */
3474void hci_pend_le_conns_clear(struct hci_dev *hdev)
3475{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003476 while (!list_empty(&hdev->pend_le_conns))
3477 list_del_init(hdev->pend_le_conns.next);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003478
3479 BT_DBG("All LE pending connections cleared");
3480}
3481
3482/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003483struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3484 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003485{
3486 struct hci_conn_params *params;
3487
Johan Hedbergc46245b2014-07-02 17:37:33 +03003488 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003489 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003490
3491 params = hci_conn_params_lookup(hdev, addr, addr_type);
3492 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003493 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003494
3495 params = kzalloc(sizeof(*params), GFP_KERNEL);
3496 if (!params) {
3497 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003498 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003499 }
3500
3501 bacpy(&params->addr, addr);
3502 params->addr_type = addr_type;
3503
3504 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003505 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003506
3507 params->conn_min_interval = hdev->le_conn_min_interval;
3508 params->conn_max_interval = hdev->le_conn_max_interval;
3509 params->conn_latency = hdev->le_conn_latency;
3510 params->supervision_timeout = hdev->le_supv_timeout;
3511 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3512
3513 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3514
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003515 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003516}
3517
3518/* This function requires the caller holds hdev->lock */
3519int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003520 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003521{
3522 struct hci_conn_params *params;
3523
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003524 params = hci_conn_params_add(hdev, addr, addr_type);
3525 if (!params)
3526 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003527
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003528 if (params->auto_connect == auto_connect)
3529 return 0;
3530
Johan Hedberg851efca2014-07-02 22:42:00 +03003531 if (params->auto_connect == HCI_AUTO_CONN_REPORT &&
3532 auto_connect != HCI_AUTO_CONN_REPORT)
Johan Hedberg66f84552014-07-04 12:37:18 +03003533 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003534
Andre Guedescef952c2014-02-26 20:21:49 -03003535 switch (auto_connect) {
3536 case HCI_AUTO_CONN_DISABLED:
3537 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg912b42e2014-07-03 19:33:49 +03003538 hci_pend_le_conn_del(hdev, params);
Andre Guedescef952c2014-02-26 20:21:49 -03003539 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003540 case HCI_AUTO_CONN_REPORT:
Johan Hedberg66f84552014-07-04 12:37:18 +03003541 if (params->auto_connect != HCI_AUTO_CONN_REPORT) {
3542 list_del_init(&params->action);
3543 list_add(&params->action,
3544 &hdev->pend_le_reports);
3545 }
Johan Hedberg912b42e2014-07-03 19:33:49 +03003546 hci_pend_le_conn_del(hdev, params);
Johan Hedberg851efca2014-07-02 22:42:00 +03003547 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003548 case HCI_AUTO_CONN_ALWAYS:
3549 if (!is_connected(hdev, addr, addr_type))
Johan Hedberg912b42e2014-07-03 19:33:49 +03003550 hci_pend_le_conn_add(hdev, params);
Andre Guedescef952c2014-02-26 20:21:49 -03003551 break;
3552 }
Andre Guedes15819a72014-02-03 13:56:18 -03003553
Johan Hedberg851efca2014-07-02 22:42:00 +03003554 params->auto_connect = auto_connect;
3555
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003556 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3557 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003558
3559 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003560}
3561
3562/* This function requires the caller holds hdev->lock */
3563void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3564{
3565 struct hci_conn_params *params;
3566
3567 params = hci_conn_params_lookup(hdev, addr, addr_type);
3568 if (!params)
3569 return;
3570
Johan Hedberg851efca2014-07-02 22:42:00 +03003571 if (params->auto_connect == HCI_AUTO_CONN_REPORT)
Johan Hedberg66f84552014-07-04 12:37:18 +03003572 list_del_init(&params->action);
Johan Hedberg851efca2014-07-02 22:42:00 +03003573
Johan Hedberg912b42e2014-07-03 19:33:49 +03003574 hci_pend_le_conn_del(hdev, params);
Andre Guedescef952c2014-02-26 20:21:49 -03003575
Andre Guedes15819a72014-02-03 13:56:18 -03003576 list_del(&params->list);
3577 kfree(params);
3578
3579 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3580}
3581
3582/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003583void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3584{
3585 struct hci_conn_params *params, *tmp;
3586
3587 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3588 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3589 continue;
3590 list_del(&params->list);
3591 kfree(params);
3592 }
3593
3594 BT_DBG("All LE disabled connection parameters were removed");
3595}
3596
3597/* This function requires the caller holds hdev->lock */
3598void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3599{
3600 struct hci_conn_params *params, *tmp;
3601
3602 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3603 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3604 continue;
Johan Hedberga2f41a82014-07-04 12:37:19 +03003605 list_del(&params->action);
Johan Hedberg55af49a82014-07-02 17:37:26 +03003606 list_del(&params->list);
3607 kfree(params);
3608 }
3609
Johan Hedberga2f41a82014-07-04 12:37:19 +03003610 hci_update_background_scan(hdev);
Johan Hedberg55af49a82014-07-02 17:37:26 +03003611
3612 BT_DBG("All enabled LE connection parameters were removed");
3613}
3614
3615/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003616void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003617{
3618 struct hci_conn_params *params, *tmp;
3619
3620 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003621 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003622 list_del(&params->list);
3623 kfree(params);
3624 }
3625
Johan Hedberga2f41a82014-07-04 12:37:19 +03003626 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003627
Andre Guedes15819a72014-02-03 13:56:18 -03003628 BT_DBG("All LE connection parameters were removed");
3629}
3630
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003631static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003632{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003633 if (status) {
3634 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003635
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003636 hci_dev_lock(hdev);
3637 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3638 hci_dev_unlock(hdev);
3639 return;
3640 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003641}
3642
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003643static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003644{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003645 /* General inquiry access code (GIAC) */
3646 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3647 struct hci_request req;
3648 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003649 int err;
3650
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003651 if (status) {
3652 BT_ERR("Failed to disable LE scanning: status %d", status);
3653 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003654 }
3655
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003656 switch (hdev->discovery.type) {
3657 case DISCOV_TYPE_LE:
3658 hci_dev_lock(hdev);
3659 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3660 hci_dev_unlock(hdev);
3661 break;
3662
3663 case DISCOV_TYPE_INTERLEAVED:
3664 hci_req_init(&req, hdev);
3665
3666 memset(&cp, 0, sizeof(cp));
3667 memcpy(&cp.lap, lap, sizeof(cp.lap));
3668 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3669 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3670
3671 hci_dev_lock(hdev);
3672
3673 hci_inquiry_cache_flush(hdev);
3674
3675 err = hci_req_run(&req, inquiry_complete);
3676 if (err) {
3677 BT_ERR("Inquiry request failed: err %d", err);
3678 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3679 }
3680
3681 hci_dev_unlock(hdev);
3682 break;
3683 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003684}
3685
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003686static void le_scan_disable_work(struct work_struct *work)
3687{
3688 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003689 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003690 struct hci_request req;
3691 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003692
3693 BT_DBG("%s", hdev->name);
3694
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003695 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003696
Andre Guedesb1efcc22014-02-26 20:21:40 -03003697 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003698
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003699 err = hci_req_run(&req, le_scan_disable_work_complete);
3700 if (err)
3701 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003702}
3703
Johan Hedberg8d972502014-02-28 12:54:14 +02003704static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3705{
3706 struct hci_dev *hdev = req->hdev;
3707
3708 /* If we're advertising or initiating an LE connection we can't
3709 * go ahead and change the random address at this time. This is
3710 * because the eventual initiator address used for the
3711 * subsequently created connection will be undefined (some
3712 * controllers use the new address and others the one we had
3713 * when the operation started).
3714 *
3715 * In this kind of scenario skip the update and let the random
3716 * address be updated at the next cycle.
3717 */
3718 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3719 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3720 BT_DBG("Deferring random address update");
3721 return;
3722 }
3723
3724 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3725}
3726
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003727int hci_update_random_address(struct hci_request *req, bool require_privacy,
3728 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003729{
3730 struct hci_dev *hdev = req->hdev;
3731 int err;
3732
3733 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003734 * current RPA has expired or there is something else than
3735 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003736 */
3737 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003738 int to;
3739
3740 *own_addr_type = ADDR_LE_DEV_RANDOM;
3741
3742 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003743 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003744 return 0;
3745
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003746 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003747 if (err < 0) {
3748 BT_ERR("%s failed to generate new RPA", hdev->name);
3749 return err;
3750 }
3751
Johan Hedberg8d972502014-02-28 12:54:14 +02003752 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003753
3754 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3755 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3756
3757 return 0;
3758 }
3759
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003760 /* In case of required privacy without resolvable private address,
3761 * use an unresolvable private address. This is useful for active
3762 * scanning and non-connectable advertising.
3763 */
3764 if (require_privacy) {
3765 bdaddr_t urpa;
3766
3767 get_random_bytes(&urpa, 6);
3768 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3769
3770 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003771 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003772 return 0;
3773 }
3774
Johan Hedbergebd3a742014-02-23 19:42:21 +02003775 /* If forcing static address is in use or there is no public
3776 * address use the static address as random address (but skip
3777 * the HCI command if the current random address is already the
3778 * static one.
3779 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003780 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003781 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3782 *own_addr_type = ADDR_LE_DEV_RANDOM;
3783 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3784 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3785 &hdev->static_addr);
3786 return 0;
3787 }
3788
3789 /* Neither privacy nor static address is being used so use a
3790 * public address.
3791 */
3792 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3793
3794 return 0;
3795}
3796
Johan Hedberga1f4c312014-02-27 14:05:41 +02003797/* Copy the Identity Address of the controller.
3798 *
3799 * If the controller has a public BD_ADDR, then by default use that one.
3800 * If this is a LE only controller without a public address, default to
3801 * the static random address.
3802 *
3803 * For debugging purposes it is possible to force controllers with a
3804 * public address to use the static random address instead.
3805 */
3806void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3807 u8 *bdaddr_type)
3808{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003809 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003810 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3811 bacpy(bdaddr, &hdev->static_addr);
3812 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3813 } else {
3814 bacpy(bdaddr, &hdev->bdaddr);
3815 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3816 }
3817}
3818
David Herrmann9be0dab2012-04-22 14:39:57 +02003819/* Alloc HCI device */
3820struct hci_dev *hci_alloc_dev(void)
3821{
3822 struct hci_dev *hdev;
3823
3824 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3825 if (!hdev)
3826 return NULL;
3827
David Herrmannb1b813d2012-04-22 14:39:58 +02003828 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3829 hdev->esco_type = (ESCO_HV1);
3830 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003831 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3832 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003833 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003834 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3835 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003836
David Herrmannb1b813d2012-04-22 14:39:58 +02003837 hdev->sniff_max_interval = 800;
3838 hdev->sniff_min_interval = 80;
3839
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003840 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003841 hdev->le_scan_interval = 0x0060;
3842 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003843 hdev->le_conn_min_interval = 0x0028;
3844 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003845 hdev->le_conn_latency = 0x0000;
3846 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003847
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003848 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003849 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003850 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3851 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003852
David Herrmannb1b813d2012-04-22 14:39:58 +02003853 mutex_init(&hdev->lock);
3854 mutex_init(&hdev->req_lock);
3855
3856 INIT_LIST_HEAD(&hdev->mgmt_pending);
3857 INIT_LIST_HEAD(&hdev->blacklist);
3858 INIT_LIST_HEAD(&hdev->uuids);
3859 INIT_LIST_HEAD(&hdev->link_keys);
3860 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003861 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003862 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003863 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003864 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003865 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003866 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003867 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003868
3869 INIT_WORK(&hdev->rx_work, hci_rx_work);
3870 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3871 INIT_WORK(&hdev->tx_work, hci_tx_work);
3872 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003873
David Herrmannb1b813d2012-04-22 14:39:58 +02003874 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3875 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3876 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3877
David Herrmannb1b813d2012-04-22 14:39:58 +02003878 skb_queue_head_init(&hdev->rx_q);
3879 skb_queue_head_init(&hdev->cmd_q);
3880 skb_queue_head_init(&hdev->raw_q);
3881
3882 init_waitqueue_head(&hdev->req_wait_q);
3883
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003884 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003885
David Herrmannb1b813d2012-04-22 14:39:58 +02003886 hci_init_sysfs(hdev);
3887 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003888
3889 return hdev;
3890}
3891EXPORT_SYMBOL(hci_alloc_dev);
3892
3893/* Free HCI device */
3894void hci_free_dev(struct hci_dev *hdev)
3895{
David Herrmann9be0dab2012-04-22 14:39:57 +02003896 /* will free via device release */
3897 put_device(&hdev->dev);
3898}
3899EXPORT_SYMBOL(hci_free_dev);
3900
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901/* Register HCI device */
3902int hci_register_dev(struct hci_dev *hdev)
3903{
David Herrmannb1b813d2012-04-22 14:39:58 +02003904 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905
David Herrmann010666a2012-01-07 15:47:07 +01003906 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907 return -EINVAL;
3908
Mat Martineau08add512011-11-02 16:18:36 -07003909 /* Do not allow HCI_AMP devices to register at index 0,
3910 * so the index can be used as the AMP controller ID.
3911 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003912 switch (hdev->dev_type) {
3913 case HCI_BREDR:
3914 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3915 break;
3916 case HCI_AMP:
3917 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3918 break;
3919 default:
3920 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003922
Sasha Levin3df92b32012-05-27 22:36:56 +02003923 if (id < 0)
3924 return id;
3925
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 sprintf(hdev->name, "hci%d", id);
3927 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003928
3929 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3930
Kees Cookd8537542013-07-03 15:04:57 -07003931 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3932 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003933 if (!hdev->workqueue) {
3934 error = -ENOMEM;
3935 goto err;
3936 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003937
Kees Cookd8537542013-07-03 15:04:57 -07003938 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3939 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003940 if (!hdev->req_workqueue) {
3941 destroy_workqueue(hdev->workqueue);
3942 error = -ENOMEM;
3943 goto err;
3944 }
3945
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003946 if (!IS_ERR_OR_NULL(bt_debugfs))
3947 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3948
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003949 dev_set_name(&hdev->dev, "%s", hdev->name);
3950
Johan Hedberg99780a72014-02-18 10:40:07 +02003951 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3952 CRYPTO_ALG_ASYNC);
3953 if (IS_ERR(hdev->tfm_aes)) {
3954 BT_ERR("Unable to create crypto context");
3955 error = PTR_ERR(hdev->tfm_aes);
3956 hdev->tfm_aes = NULL;
3957 goto err_wqueue;
3958 }
3959
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003960 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003961 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003962 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003964 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003965 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3966 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003967 if (hdev->rfkill) {
3968 if (rfkill_register(hdev->rfkill) < 0) {
3969 rfkill_destroy(hdev->rfkill);
3970 hdev->rfkill = NULL;
3971 }
3972 }
3973
Johan Hedberg5e130362013-09-13 08:58:17 +03003974 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3975 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3976
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003977 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003978 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003979
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003980 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003981 /* Assume BR/EDR support until proven otherwise (such as
3982 * through reading supported features during init.
3983 */
3984 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3985 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003986
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003987 write_lock(&hci_dev_list_lock);
3988 list_add(&hdev->list, &hci_dev_list);
3989 write_unlock(&hci_dev_list_lock);
3990
Marcel Holtmann4a964402014-07-02 19:10:33 +02003991 /* Devices that are marked for raw-only usage are unconfigured
3992 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003993 */
3994 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003995 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003998 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999
Johan Hedberg19202572013-01-14 22:33:51 +02004000 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004001
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004003
Johan Hedberg99780a72014-02-18 10:40:07 +02004004err_tfm:
4005 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004006err_wqueue:
4007 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004008 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004009err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004010 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004011
David Herrmann33ca9542011-10-08 14:58:49 +02004012 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013}
4014EXPORT_SYMBOL(hci_register_dev);
4015
4016/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004017void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
Sasha Levin3df92b32012-05-27 22:36:56 +02004019 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004020
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004021 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
Johan Hovold94324962012-03-15 14:48:41 +01004023 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4024
Sasha Levin3df92b32012-05-27 22:36:56 +02004025 id = hdev->id;
4026
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004027 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004029 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030
4031 hci_dev_do_close(hdev);
4032
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304033 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004034 kfree_skb(hdev->reassembly[i]);
4035
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004036 cancel_work_sync(&hdev->power_on);
4037
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004038 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02004039 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004040 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004041 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004042 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004043 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004044
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004045 /* mgmt_index_removed should take care of emptying the
4046 * pending list */
4047 BUG_ON(!list_empty(&hdev->mgmt_pending));
4048
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 hci_notify(hdev, HCI_DEV_UNREG);
4050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004051 if (hdev->rfkill) {
4052 rfkill_unregister(hdev->rfkill);
4053 rfkill_destroy(hdev->rfkill);
4054 }
4055
Johan Hedberg99780a72014-02-18 10:40:07 +02004056 if (hdev->tfm_aes)
4057 crypto_free_blkcipher(hdev->tfm_aes);
4058
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004059 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004060
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004061 debugfs_remove_recursive(hdev->debugfs);
4062
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004063 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004064 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004065
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004066 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004067 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004068 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004069 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004070 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004071 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004072 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004073 hci_white_list_clear(hdev);
Johan Hedberg373110c2014-07-02 17:37:25 +03004074 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004075 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004076
David Herrmanndc946bd2012-01-07 15:47:24 +01004077 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004078
4079 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080}
4081EXPORT_SYMBOL(hci_unregister_dev);
4082
4083/* Suspend HCI device */
4084int hci_suspend_dev(struct hci_dev *hdev)
4085{
4086 hci_notify(hdev, HCI_DEV_SUSPEND);
4087 return 0;
4088}
4089EXPORT_SYMBOL(hci_suspend_dev);
4090
4091/* Resume HCI device */
4092int hci_resume_dev(struct hci_dev *hdev)
4093{
4094 hci_notify(hdev, HCI_DEV_RESUME);
4095 return 0;
4096}
4097EXPORT_SYMBOL(hci_resume_dev);
4098
Marcel Holtmann76bca882009-11-18 00:40:39 +01004099/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004100int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004101{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004102 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004103 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004104 kfree_skb(skb);
4105 return -ENXIO;
4106 }
4107
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004108 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004109 bt_cb(skb)->incoming = 1;
4110
4111 /* Time stamp */
4112 __net_timestamp(skb);
4113
Marcel Holtmann76bca882009-11-18 00:40:39 +01004114 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004115 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004116
Marcel Holtmann76bca882009-11-18 00:40:39 +01004117 return 0;
4118}
4119EXPORT_SYMBOL(hci_recv_frame);
4120
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304121static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004122 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304123{
4124 int len = 0;
4125 int hlen = 0;
4126 int remain = count;
4127 struct sk_buff *skb;
4128 struct bt_skb_cb *scb;
4129
4130 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004131 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304132 return -EILSEQ;
4133
4134 skb = hdev->reassembly[index];
4135
4136 if (!skb) {
4137 switch (type) {
4138 case HCI_ACLDATA_PKT:
4139 len = HCI_MAX_FRAME_SIZE;
4140 hlen = HCI_ACL_HDR_SIZE;
4141 break;
4142 case HCI_EVENT_PKT:
4143 len = HCI_MAX_EVENT_SIZE;
4144 hlen = HCI_EVENT_HDR_SIZE;
4145 break;
4146 case HCI_SCODATA_PKT:
4147 len = HCI_MAX_SCO_SIZE;
4148 hlen = HCI_SCO_HDR_SIZE;
4149 break;
4150 }
4151
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004152 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304153 if (!skb)
4154 return -ENOMEM;
4155
4156 scb = (void *) skb->cb;
4157 scb->expect = hlen;
4158 scb->pkt_type = type;
4159
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304160 hdev->reassembly[index] = skb;
4161 }
4162
4163 while (count) {
4164 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004165 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304166
4167 memcpy(skb_put(skb, len), data, len);
4168
4169 count -= len;
4170 data += len;
4171 scb->expect -= len;
4172 remain = count;
4173
4174 switch (type) {
4175 case HCI_EVENT_PKT:
4176 if (skb->len == HCI_EVENT_HDR_SIZE) {
4177 struct hci_event_hdr *h = hci_event_hdr(skb);
4178 scb->expect = h->plen;
4179
4180 if (skb_tailroom(skb) < scb->expect) {
4181 kfree_skb(skb);
4182 hdev->reassembly[index] = NULL;
4183 return -ENOMEM;
4184 }
4185 }
4186 break;
4187
4188 case HCI_ACLDATA_PKT:
4189 if (skb->len == HCI_ACL_HDR_SIZE) {
4190 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4191 scb->expect = __le16_to_cpu(h->dlen);
4192
4193 if (skb_tailroom(skb) < scb->expect) {
4194 kfree_skb(skb);
4195 hdev->reassembly[index] = NULL;
4196 return -ENOMEM;
4197 }
4198 }
4199 break;
4200
4201 case HCI_SCODATA_PKT:
4202 if (skb->len == HCI_SCO_HDR_SIZE) {
4203 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4204 scb->expect = h->dlen;
4205
4206 if (skb_tailroom(skb) < scb->expect) {
4207 kfree_skb(skb);
4208 hdev->reassembly[index] = NULL;
4209 return -ENOMEM;
4210 }
4211 }
4212 break;
4213 }
4214
4215 if (scb->expect == 0) {
4216 /* Complete frame */
4217
4218 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004219 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304220
4221 hdev->reassembly[index] = NULL;
4222 return remain;
4223 }
4224 }
4225
4226 return remain;
4227}
4228
Marcel Holtmannef222012007-07-11 06:42:04 +02004229int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4230{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304231 int rem = 0;
4232
Marcel Holtmannef222012007-07-11 06:42:04 +02004233 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4234 return -EILSEQ;
4235
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004236 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004237 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304238 if (rem < 0)
4239 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004240
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304241 data += (count - rem);
4242 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004243 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004244
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304245 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004246}
4247EXPORT_SYMBOL(hci_recv_fragment);
4248
Suraj Sumangala99811512010-07-14 13:02:19 +05304249#define STREAM_REASSEMBLY 0
4250
4251int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4252{
4253 int type;
4254 int rem = 0;
4255
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004256 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304257 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4258
4259 if (!skb) {
4260 struct { char type; } *pkt;
4261
4262 /* Start of the frame */
4263 pkt = data;
4264 type = pkt->type;
4265
4266 data++;
4267 count--;
4268 } else
4269 type = bt_cb(skb)->pkt_type;
4270
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004271 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004272 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304273 if (rem < 0)
4274 return rem;
4275
4276 data += (count - rem);
4277 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004278 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304279
4280 return rem;
4281}
4282EXPORT_SYMBOL(hci_recv_stream_fragment);
4283
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284/* ---- Interface to upper protocols ---- */
4285
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286int hci_register_cb(struct hci_cb *cb)
4287{
4288 BT_DBG("%p name %s", cb, cb->name);
4289
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004290 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004292 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
4294 return 0;
4295}
4296EXPORT_SYMBOL(hci_register_cb);
4297
4298int hci_unregister_cb(struct hci_cb *cb)
4299{
4300 BT_DBG("%p name %s", cb, cb->name);
4301
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004302 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004304 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305
4306 return 0;
4307}
4308EXPORT_SYMBOL(hci_unregister_cb);
4309
Marcel Holtmann51086992013-10-10 14:54:19 -07004310static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004312 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004314 /* Time stamp */
4315 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004317 /* Send copy to monitor */
4318 hci_send_to_monitor(hdev, skb);
4319
4320 if (atomic_read(&hdev->promisc)) {
4321 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004322 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 }
4324
4325 /* Get rid of skb owner, prior to sending to the driver. */
4326 skb_orphan(skb);
4327
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004328 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004329 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330}
4331
Johan Hedberg3119ae92013-03-05 20:37:44 +02004332void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4333{
4334 skb_queue_head_init(&req->cmd_q);
4335 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004336 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004337}
4338
4339int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4340{
4341 struct hci_dev *hdev = req->hdev;
4342 struct sk_buff *skb;
4343 unsigned long flags;
4344
4345 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4346
Andre Guedes5d73e032013-03-08 11:20:16 -03004347 /* If an error occured during request building, remove all HCI
4348 * commands queued on the HCI request queue.
4349 */
4350 if (req->err) {
4351 skb_queue_purge(&req->cmd_q);
4352 return req->err;
4353 }
4354
Johan Hedberg3119ae92013-03-05 20:37:44 +02004355 /* Do not allow empty requests */
4356 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004357 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004358
4359 skb = skb_peek_tail(&req->cmd_q);
4360 bt_cb(skb)->req.complete = complete;
4361
4362 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4363 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4364 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4365
4366 queue_work(hdev->workqueue, &hdev->cmd_work);
4367
4368 return 0;
4369}
4370
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004371static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004372 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373{
4374 int len = HCI_COMMAND_HDR_SIZE + plen;
4375 struct hci_command_hdr *hdr;
4376 struct sk_buff *skb;
4377
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004379 if (!skb)
4380 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
4382 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004383 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 hdr->plen = plen;
4385
4386 if (plen)
4387 memcpy(skb_put(skb, plen), param, plen);
4388
4389 BT_DBG("skb len %d", skb->len);
4390
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004391 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004392
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004393 return skb;
4394}
4395
4396/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004397int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4398 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004399{
4400 struct sk_buff *skb;
4401
4402 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4403
4404 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4405 if (!skb) {
4406 BT_ERR("%s no memory for command", hdev->name);
4407 return -ENOMEM;
4408 }
4409
Johan Hedberg11714b32013-03-05 20:37:47 +02004410 /* Stand-alone HCI commands must be flaged as
4411 * single-command requests.
4412 */
4413 bt_cb(skb)->req.start = true;
4414
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004416 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417
4418 return 0;
4419}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420
Johan Hedberg71c76a12013-03-05 20:37:46 +02004421/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004422void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4423 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004424{
4425 struct hci_dev *hdev = req->hdev;
4426 struct sk_buff *skb;
4427
4428 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4429
Andre Guedes34739c12013-03-08 11:20:18 -03004430 /* If an error occured during request building, there is no point in
4431 * queueing the HCI command. We can simply return.
4432 */
4433 if (req->err)
4434 return;
4435
Johan Hedberg71c76a12013-03-05 20:37:46 +02004436 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4437 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004438 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4439 hdev->name, opcode);
4440 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004441 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004442 }
4443
4444 if (skb_queue_empty(&req->cmd_q))
4445 bt_cb(skb)->req.start = true;
4446
Johan Hedberg02350a72013-04-03 21:50:29 +03004447 bt_cb(skb)->req.event = event;
4448
Johan Hedberg71c76a12013-03-05 20:37:46 +02004449 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004450}
4451
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004452void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4453 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004454{
4455 hci_req_add_ev(req, opcode, plen, param, 0);
4456}
4457
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004459void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460{
4461 struct hci_command_hdr *hdr;
4462
4463 if (!hdev->sent_cmd)
4464 return NULL;
4465
4466 hdr = (void *) hdev->sent_cmd->data;
4467
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004468 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 return NULL;
4470
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004471 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
4473 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4474}
4475
4476/* Send ACL data */
4477static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4478{
4479 struct hci_acl_hdr *hdr;
4480 int len = skb->len;
4481
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004482 skb_push(skb, HCI_ACL_HDR_SIZE);
4483 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004484 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004485 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4486 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487}
4488
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004489static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004490 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004492 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 struct hci_dev *hdev = conn->hdev;
4494 struct sk_buff *list;
4495
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004496 skb->len = skb_headlen(skb);
4497 skb->data_len = 0;
4498
4499 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004500
4501 switch (hdev->dev_type) {
4502 case HCI_BREDR:
4503 hci_add_acl_hdr(skb, conn->handle, flags);
4504 break;
4505 case HCI_AMP:
4506 hci_add_acl_hdr(skb, chan->handle, flags);
4507 break;
4508 default:
4509 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4510 return;
4511 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004512
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004513 list = skb_shinfo(skb)->frag_list;
4514 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 /* Non fragmented */
4516 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4517
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004518 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519 } else {
4520 /* Fragmented */
4521 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4522
4523 skb_shinfo(skb)->frag_list = NULL;
4524
4525 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004526 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004528 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004529
4530 flags &= ~ACL_START;
4531 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532 do {
4533 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004534
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004535 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004536 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537
4538 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4539
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004540 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541 } while (list);
4542
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004543 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004545}
4546
4547void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4548{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004549 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004550
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004551 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004552
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004553 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004555 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557
4558/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004559void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560{
4561 struct hci_dev *hdev = conn->hdev;
4562 struct hci_sco_hdr hdr;
4563
4564 BT_DBG("%s len %d", hdev->name, skb->len);
4565
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004566 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 hdr.dlen = skb->len;
4568
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004569 skb_push(skb, HCI_SCO_HDR_SIZE);
4570 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004571 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004573 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004574
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004576 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
4579/* ---- HCI TX task (outgoing data) ---- */
4580
4581/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004582static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4583 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584{
4585 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004586 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004587 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004589 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004591
4592 rcu_read_lock();
4593
4594 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004595 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004597
4598 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4599 continue;
4600
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 num++;
4602
4603 if (c->sent < min) {
4604 min = c->sent;
4605 conn = c;
4606 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004607
4608 if (hci_conn_num(hdev, type) == num)
4609 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 }
4611
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004612 rcu_read_unlock();
4613
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004615 int cnt, q;
4616
4617 switch (conn->type) {
4618 case ACL_LINK:
4619 cnt = hdev->acl_cnt;
4620 break;
4621 case SCO_LINK:
4622 case ESCO_LINK:
4623 cnt = hdev->sco_cnt;
4624 break;
4625 case LE_LINK:
4626 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4627 break;
4628 default:
4629 cnt = 0;
4630 BT_ERR("Unknown link type");
4631 }
4632
4633 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634 *quote = q ? q : 1;
4635 } else
4636 *quote = 0;
4637
4638 BT_DBG("conn %p quote %d", conn, *quote);
4639 return conn;
4640}
4641
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004642static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643{
4644 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004645 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646
Ville Tervobae1f5d92011-02-10 22:38:53 -03004647 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004649 rcu_read_lock();
4650
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004652 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004653 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004654 BT_ERR("%s killing stalled connection %pMR",
4655 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004656 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 }
4658 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004659
4660 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661}
4662
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004663static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4664 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004665{
4666 struct hci_conn_hash *h = &hdev->conn_hash;
4667 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004668 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004669 struct hci_conn *conn;
4670 int cnt, q, conn_num = 0;
4671
4672 BT_DBG("%s", hdev->name);
4673
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004674 rcu_read_lock();
4675
4676 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004677 struct hci_chan *tmp;
4678
4679 if (conn->type != type)
4680 continue;
4681
4682 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4683 continue;
4684
4685 conn_num++;
4686
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004687 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004688 struct sk_buff *skb;
4689
4690 if (skb_queue_empty(&tmp->data_q))
4691 continue;
4692
4693 skb = skb_peek(&tmp->data_q);
4694 if (skb->priority < cur_prio)
4695 continue;
4696
4697 if (skb->priority > cur_prio) {
4698 num = 0;
4699 min = ~0;
4700 cur_prio = skb->priority;
4701 }
4702
4703 num++;
4704
4705 if (conn->sent < min) {
4706 min = conn->sent;
4707 chan = tmp;
4708 }
4709 }
4710
4711 if (hci_conn_num(hdev, type) == conn_num)
4712 break;
4713 }
4714
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004715 rcu_read_unlock();
4716
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004717 if (!chan)
4718 return NULL;
4719
4720 switch (chan->conn->type) {
4721 case ACL_LINK:
4722 cnt = hdev->acl_cnt;
4723 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004724 case AMP_LINK:
4725 cnt = hdev->block_cnt;
4726 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004727 case SCO_LINK:
4728 case ESCO_LINK:
4729 cnt = hdev->sco_cnt;
4730 break;
4731 case LE_LINK:
4732 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4733 break;
4734 default:
4735 cnt = 0;
4736 BT_ERR("Unknown link type");
4737 }
4738
4739 q = cnt / num;
4740 *quote = q ? q : 1;
4741 BT_DBG("chan %p quote %d", chan, *quote);
4742 return chan;
4743}
4744
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004745static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4746{
4747 struct hci_conn_hash *h = &hdev->conn_hash;
4748 struct hci_conn *conn;
4749 int num = 0;
4750
4751 BT_DBG("%s", hdev->name);
4752
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004753 rcu_read_lock();
4754
4755 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004756 struct hci_chan *chan;
4757
4758 if (conn->type != type)
4759 continue;
4760
4761 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4762 continue;
4763
4764 num++;
4765
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004766 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004767 struct sk_buff *skb;
4768
4769 if (chan->sent) {
4770 chan->sent = 0;
4771 continue;
4772 }
4773
4774 if (skb_queue_empty(&chan->data_q))
4775 continue;
4776
4777 skb = skb_peek(&chan->data_q);
4778 if (skb->priority >= HCI_PRIO_MAX - 1)
4779 continue;
4780
4781 skb->priority = HCI_PRIO_MAX - 1;
4782
4783 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004784 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004785 }
4786
4787 if (hci_conn_num(hdev, type) == num)
4788 break;
4789 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004790
4791 rcu_read_unlock();
4792
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004793}
4794
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004795static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4796{
4797 /* Calculate count of blocks used by this packet */
4798 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4799}
4800
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004801static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004802{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004803 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 /* ACL tx timeout must be longer than maximum
4805 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004806 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004807 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004808 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004810}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004812static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004813{
4814 unsigned int cnt = hdev->acl_cnt;
4815 struct hci_chan *chan;
4816 struct sk_buff *skb;
4817 int quote;
4818
4819 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004820
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004821 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004822 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004823 u32 priority = (skb_peek(&chan->data_q))->priority;
4824 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004826 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004827
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004828 /* Stop if priority has changed */
4829 if (skb->priority < priority)
4830 break;
4831
4832 skb = skb_dequeue(&chan->data_q);
4833
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004834 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004835 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004836
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004837 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838 hdev->acl_last_tx = jiffies;
4839
4840 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004841 chan->sent++;
4842 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843 }
4844 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004845
4846 if (cnt != hdev->acl_cnt)
4847 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848}
4849
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004850static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004851{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004852 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004853 struct hci_chan *chan;
4854 struct sk_buff *skb;
4855 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004856 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004857
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004858 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004859
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004860 BT_DBG("%s", hdev->name);
4861
4862 if (hdev->dev_type == HCI_AMP)
4863 type = AMP_LINK;
4864 else
4865 type = ACL_LINK;
4866
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004867 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004868 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004869 u32 priority = (skb_peek(&chan->data_q))->priority;
4870 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4871 int blocks;
4872
4873 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004874 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004875
4876 /* Stop if priority has changed */
4877 if (skb->priority < priority)
4878 break;
4879
4880 skb = skb_dequeue(&chan->data_q);
4881
4882 blocks = __get_blocks(hdev, skb);
4883 if (blocks > hdev->block_cnt)
4884 return;
4885
4886 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004887 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004888
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004889 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004890 hdev->acl_last_tx = jiffies;
4891
4892 hdev->block_cnt -= blocks;
4893 quote -= blocks;
4894
4895 chan->sent += blocks;
4896 chan->conn->sent += blocks;
4897 }
4898 }
4899
4900 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004901 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004902}
4903
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004904static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004905{
4906 BT_DBG("%s", hdev->name);
4907
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004908 /* No ACL link over BR/EDR controller */
4909 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4910 return;
4911
4912 /* No AMP link over AMP controller */
4913 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004914 return;
4915
4916 switch (hdev->flow_ctl_mode) {
4917 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4918 hci_sched_acl_pkt(hdev);
4919 break;
4920
4921 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4922 hci_sched_acl_blk(hdev);
4923 break;
4924 }
4925}
4926
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004928static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929{
4930 struct hci_conn *conn;
4931 struct sk_buff *skb;
4932 int quote;
4933
4934 BT_DBG("%s", hdev->name);
4935
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004936 if (!hci_conn_num(hdev, SCO_LINK))
4937 return;
4938
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4940 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4941 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004942 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943
4944 conn->sent++;
4945 if (conn->sent == ~0)
4946 conn->sent = 0;
4947 }
4948 }
4949}
4950
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004951static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004952{
4953 struct hci_conn *conn;
4954 struct sk_buff *skb;
4955 int quote;
4956
4957 BT_DBG("%s", hdev->name);
4958
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004959 if (!hci_conn_num(hdev, ESCO_LINK))
4960 return;
4961
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004962 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4963 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004964 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4965 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004966 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004967
4968 conn->sent++;
4969 if (conn->sent == ~0)
4970 conn->sent = 0;
4971 }
4972 }
4973}
4974
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004975static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004976{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004977 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004978 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004979 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004980
4981 BT_DBG("%s", hdev->name);
4982
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004983 if (!hci_conn_num(hdev, LE_LINK))
4984 return;
4985
Marcel Holtmann4a964402014-07-02 19:10:33 +02004986 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004987 /* LE tx timeout must be longer than maximum
4988 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004989 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004990 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004991 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004992 }
4993
4994 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004995 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004996 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004997 u32 priority = (skb_peek(&chan->data_q))->priority;
4998 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004999 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005000 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005001
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005002 /* Stop if priority has changed */
5003 if (skb->priority < priority)
5004 break;
5005
5006 skb = skb_dequeue(&chan->data_q);
5007
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005008 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005009 hdev->le_last_tx = jiffies;
5010
5011 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005012 chan->sent++;
5013 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005014 }
5015 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005016
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005017 if (hdev->le_pkts)
5018 hdev->le_cnt = cnt;
5019 else
5020 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005021
5022 if (cnt != tmp)
5023 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005024}
5025
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005026static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005028 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029 struct sk_buff *skb;
5030
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005031 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005032 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033
Marcel Holtmann52de5992013-09-03 18:08:38 -07005034 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5035 /* Schedule queues and send stuff to HCI driver */
5036 hci_sched_acl(hdev);
5037 hci_sched_sco(hdev);
5038 hci_sched_esco(hdev);
5039 hci_sched_le(hdev);
5040 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005041
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042 /* Send next queued raw (unknown type) packet */
5043 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005044 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045}
5046
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005047/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048
5049/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005050static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051{
5052 struct hci_acl_hdr *hdr = (void *) skb->data;
5053 struct hci_conn *conn;
5054 __u16 handle, flags;
5055
5056 skb_pull(skb, HCI_ACL_HDR_SIZE);
5057
5058 handle = __le16_to_cpu(hdr->handle);
5059 flags = hci_flags(handle);
5060 handle = hci_handle(handle);
5061
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005062 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005063 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064
5065 hdev->stat.acl_rx++;
5066
5067 hci_dev_lock(hdev);
5068 conn = hci_conn_hash_lookup_handle(hdev, handle);
5069 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005070
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005072 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005073
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005075 l2cap_recv_acldata(conn, skb, flags);
5076 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005077 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005078 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005079 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 }
5081
5082 kfree_skb(skb);
5083}
5084
5085/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005086static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087{
5088 struct hci_sco_hdr *hdr = (void *) skb->data;
5089 struct hci_conn *conn;
5090 __u16 handle;
5091
5092 skb_pull(skb, HCI_SCO_HDR_SIZE);
5093
5094 handle = __le16_to_cpu(hdr->handle);
5095
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005096 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097
5098 hdev->stat.sco_rx++;
5099
5100 hci_dev_lock(hdev);
5101 conn = hci_conn_hash_lookup_handle(hdev, handle);
5102 hci_dev_unlock(hdev);
5103
5104 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005106 sco_recv_scodata(conn, skb);
5107 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005109 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005110 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111 }
5112
5113 kfree_skb(skb);
5114}
5115
Johan Hedberg9238f362013-03-05 20:37:48 +02005116static bool hci_req_is_complete(struct hci_dev *hdev)
5117{
5118 struct sk_buff *skb;
5119
5120 skb = skb_peek(&hdev->cmd_q);
5121 if (!skb)
5122 return true;
5123
5124 return bt_cb(skb)->req.start;
5125}
5126
Johan Hedberg42c6b122013-03-05 20:37:49 +02005127static void hci_resend_last(struct hci_dev *hdev)
5128{
5129 struct hci_command_hdr *sent;
5130 struct sk_buff *skb;
5131 u16 opcode;
5132
5133 if (!hdev->sent_cmd)
5134 return;
5135
5136 sent = (void *) hdev->sent_cmd->data;
5137 opcode = __le16_to_cpu(sent->opcode);
5138 if (opcode == HCI_OP_RESET)
5139 return;
5140
5141 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5142 if (!skb)
5143 return;
5144
5145 skb_queue_head(&hdev->cmd_q, skb);
5146 queue_work(hdev->workqueue, &hdev->cmd_work);
5147}
5148
Johan Hedberg9238f362013-03-05 20:37:48 +02005149void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5150{
5151 hci_req_complete_t req_complete = NULL;
5152 struct sk_buff *skb;
5153 unsigned long flags;
5154
5155 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5156
Johan Hedberg42c6b122013-03-05 20:37:49 +02005157 /* If the completed command doesn't match the last one that was
5158 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005159 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005160 if (!hci_sent_cmd_data(hdev, opcode)) {
5161 /* Some CSR based controllers generate a spontaneous
5162 * reset complete event during init and any pending
5163 * command will never be completed. In such a case we
5164 * need to resend whatever was the last sent
5165 * command.
5166 */
5167 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5168 hci_resend_last(hdev);
5169
Johan Hedberg9238f362013-03-05 20:37:48 +02005170 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005171 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005172
5173 /* If the command succeeded and there's still more commands in
5174 * this request the request is not yet complete.
5175 */
5176 if (!status && !hci_req_is_complete(hdev))
5177 return;
5178
5179 /* If this was the last command in a request the complete
5180 * callback would be found in hdev->sent_cmd instead of the
5181 * command queue (hdev->cmd_q).
5182 */
5183 if (hdev->sent_cmd) {
5184 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005185
5186 if (req_complete) {
5187 /* We must set the complete callback to NULL to
5188 * avoid calling the callback more than once if
5189 * this function gets called again.
5190 */
5191 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5192
Johan Hedberg9238f362013-03-05 20:37:48 +02005193 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005194 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005195 }
5196
5197 /* Remove all pending commands belonging to this request */
5198 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5199 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5200 if (bt_cb(skb)->req.start) {
5201 __skb_queue_head(&hdev->cmd_q, skb);
5202 break;
5203 }
5204
5205 req_complete = bt_cb(skb)->req.complete;
5206 kfree_skb(skb);
5207 }
5208 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5209
5210call_complete:
5211 if (req_complete)
5212 req_complete(hdev, status);
5213}
5214
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005215static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005217 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 struct sk_buff *skb;
5219
5220 BT_DBG("%s", hdev->name);
5221
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005223 /* Send copy to monitor */
5224 hci_send_to_monitor(hdev, skb);
5225
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 if (atomic_read(&hdev->promisc)) {
5227 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005228 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 }
5230
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 kfree_skb(skb);
5233 continue;
5234 }
5235
5236 if (test_bit(HCI_INIT, &hdev->flags)) {
5237 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005238 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 case HCI_ACLDATA_PKT:
5240 case HCI_SCODATA_PKT:
5241 kfree_skb(skb);
5242 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 }
5245
5246 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005247 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005249 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250 hci_event_packet(hdev, skb);
5251 break;
5252
5253 case HCI_ACLDATA_PKT:
5254 BT_DBG("%s ACL data packet", hdev->name);
5255 hci_acldata_packet(hdev, skb);
5256 break;
5257
5258 case HCI_SCODATA_PKT:
5259 BT_DBG("%s SCO data packet", hdev->name);
5260 hci_scodata_packet(hdev, skb);
5261 break;
5262
5263 default:
5264 kfree_skb(skb);
5265 break;
5266 }
5267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268}
5269
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005270static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005272 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 struct sk_buff *skb;
5274
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005275 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5276 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005279 if (atomic_read(&hdev->cmd_cnt)) {
5280 skb = skb_dequeue(&hdev->cmd_q);
5281 if (!skb)
5282 return;
5283
Wei Yongjun7585b972009-02-25 18:29:52 +08005284 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005286 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005287 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005289 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005290 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005291 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005292 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005293 schedule_delayed_work(&hdev->cmd_timer,
5294 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295 } else {
5296 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005297 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298 }
5299 }
5300}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005301
5302void hci_req_add_le_scan_disable(struct hci_request *req)
5303{
5304 struct hci_cp_le_set_scan_enable cp;
5305
5306 memset(&cp, 0, sizeof(cp));
5307 cp.enable = LE_SCAN_DISABLE;
5308 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5309}
Andre Guedesa4790db2014-02-26 20:21:47 -03005310
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005311void hci_req_add_le_passive_scan(struct hci_request *req)
5312{
5313 struct hci_cp_le_set_scan_param param_cp;
5314 struct hci_cp_le_set_scan_enable enable_cp;
5315 struct hci_dev *hdev = req->hdev;
5316 u8 own_addr_type;
5317
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005318 /* Set require_privacy to false since no SCAN_REQ are send
5319 * during passive scanning. Not using an unresolvable address
5320 * here is important so that peer devices using direct
5321 * advertising with our address will be correctly reported
5322 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005323 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005324 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005325 return;
5326
5327 memset(&param_cp, 0, sizeof(param_cp));
5328 param_cp.type = LE_SCAN_PASSIVE;
5329 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5330 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5331 param_cp.own_address_type = own_addr_type;
5332 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5333 &param_cp);
5334
5335 memset(&enable_cp, 0, sizeof(enable_cp));
5336 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005337 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005338 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5339 &enable_cp);
5340}
5341
Andre Guedesa4790db2014-02-26 20:21:47 -03005342static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5343{
5344 if (status)
5345 BT_DBG("HCI request failed to update background scanning: "
5346 "status 0x%2.2x", status);
5347}
5348
5349/* This function controls the background scanning based on hdev->pend_le_conns
5350 * list. If there are pending LE connection we start the background scanning,
5351 * otherwise we stop it.
5352 *
5353 * This function requires the caller holds hdev->lock.
5354 */
5355void hci_update_background_scan(struct hci_dev *hdev)
5356{
Andre Guedesa4790db2014-02-26 20:21:47 -03005357 struct hci_request req;
5358 struct hci_conn *conn;
5359 int err;
5360
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005361 if (!test_bit(HCI_UP, &hdev->flags) ||
5362 test_bit(HCI_INIT, &hdev->flags) ||
5363 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005364 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005365 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005366 return;
5367
Andre Guedesa4790db2014-02-26 20:21:47 -03005368 hci_req_init(&req, hdev);
5369
Johan Hedberg66f84552014-07-04 12:37:18 +03005370 if (list_empty(&hdev->pend_le_conns) &&
5371 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005372 /* If there is no pending LE connections or devices
5373 * to be scanned for, we should stop the background
5374 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005375 */
5376
5377 /* If controller is not scanning we are done. */
5378 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5379 return;
5380
5381 hci_req_add_le_scan_disable(&req);
5382
5383 BT_DBG("%s stopping background scanning", hdev->name);
5384 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005385 /* If there is at least one pending LE connection, we should
5386 * keep the background scan running.
5387 */
5388
Andre Guedesa4790db2014-02-26 20:21:47 -03005389 /* If controller is connecting, we should not start scanning
5390 * since some controllers are not able to scan and connect at
5391 * the same time.
5392 */
5393 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5394 if (conn)
5395 return;
5396
Andre Guedes4340a122014-03-10 18:26:24 -03005397 /* If controller is currently scanning, we stop it to ensure we
5398 * don't miss any advertising (due to duplicates filter).
5399 */
5400 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5401 hci_req_add_le_scan_disable(&req);
5402
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005403 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005404
5405 BT_DBG("%s starting background scanning", hdev->name);
5406 }
5407
5408 err = hci_req_run(&req, update_background_scan_complete);
5409 if (err)
5410 BT_ERR("Failed to run HCI request: err %d", err);
5411}