blob: cc60465ea774a6493c628dd093addaee5280edfa [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Johan Hedberg858cdc72014-10-16 10:45:31 +0200409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700478 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700537 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700565 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
Marcel Holtmannac345812014-02-23 12:44:25 -0800641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200644 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800650
Johan Hedberga1f4c312014-02-27 14:05:41 +0200651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800652 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700720{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700747 return -EINVAL;
748
Marcel Holtmann111902f2014-06-21 04:53:17 +0200749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800750 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700751
Marcel Holtmann111902f2014-06-21 04:53:17 +0200752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800753
754 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700755}
756
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
Marcel Holtmann92202182013-10-18 16:38:10 -0700763
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
Marcel Holtmann3698d702014-02-18 21:54:49 -0800789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200792 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800793
Johan Hedbergadae20c2014-11-13 14:37:48 +0200794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200800 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200821 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700822
Johan Hedberg970d0f12014-11-13 14:37:47 +0200823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200829 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700854 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700882 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
Marcel Holtmannf1649572014-06-30 12:34:38 +0200930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
Georg Lukas729a1052014-07-26 13:59:58 +0200986static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200987{
Georg Lukas729a1052014-07-26 13:59:58 +0200988 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200989
Georg Lukas729a1052014-07-26 13:59:58 +0200990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200991 return -EINVAL;
992
Andre Guedes7d474e02014-02-26 20:21:54 -0300993 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
Georg Lukas729a1052014-07-26 13:59:58 +02001000static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001001{
Georg Lukas729a1052014-07-26 13:59:58 +02001002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009}
1010
Georg Lukas729a1052014-07-26 13:59:58 +02001011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001015{
Georg Lukas729a1052014-07-26 13:59:58 +02001016 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017
Georg Lukas729a1052014-07-26 13:59:58 +02001018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return -EINVAL;
1020
Georg Lukas729a1052014-07-26 13:59:58 +02001021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001024
Georg Lukas729a1052014-07-26 13:59:58 +02001025 return 0;
1026}
Andre Guedes7d474e02014-02-26 20:21:54 -03001027
Georg Lukas729a1052014-07-26 13:59:58 +02001028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001031
Georg Lukas729a1052014-07-26 13:59:58 +02001032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035
Georg Lukas729a1052014-07-26 13:59:58 +02001036 return 0;
1037}
Andre Guedes7d474e02014-02-26 20:21:54 -03001038
Georg Lukas729a1052014-07-26 13:59:58 +02001039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001041
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001042static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001043{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001045 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001046 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001047
Andre Guedes7d474e02014-02-26 20:21:54 -03001048 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001053 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001056
Andre Guedes7d474e02014-02-26 20:21:54 -03001057 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001058}
1059
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001060static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001061{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001062 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001063}
1064
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001067 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* ---- HCI requests ---- */
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
Fengguang Wu77a63e02013-04-20 16:24:31 +03001096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
Johan Hedberg75e84b72013-04-02 13:35:04 +03001127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001152 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163
1164 hdev->req_status = HCI_REQ_PEND;
1165
Johan Hedberg75e84b72013-04-02 13:35:04 +03001166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
Chan-yeol Park039fada2014-10-31 14:23:06 +09001169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001172 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001173 return ERR_PTR(err);
1174 }
1175
Johan Hedberg75e84b72013-04-02 13:35:04 +03001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001209 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_init(&req, hdev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hdev->req_status = HCI_REQ_PEND;
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001232
Chan-yeol Park039fada2014-10-31 14:23:06 +09001233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
Chan-yeol Park039fada2014-10-31 14:23:06 +09001240 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001241 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001242
Andre Guedes920c8302013-03-08 11:20:15 -03001243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 */
Andre Guedes920c8302013-03-08 11:20:15 -03001248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001252 }
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376 __le16 param;
1377 __u8 flt_type;
1378
1379 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001380 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001381
1382 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
1385 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001386 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387
1388 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001391 /* Read Number of Supported IAC */
1392 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1393
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001394 /* Read Current IAC LAP */
1395 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1396
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397 /* Clear Event Filters */
1398 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400
1401 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001402 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001403 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001404}
1405
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001408 struct hci_dev *hdev = req->hdev;
1409
Johan Hedberg2177bab2013-03-05 20:37:43 +02001410 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001411 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001412
1413 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001414 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001415
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001416 /* Read LE Supported States */
1417 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1418
Johan Hedberg2177bab2013-03-05 20:37:43 +02001419 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001420 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001421
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001422 /* Clear LE White List */
1423 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001424
1425 /* LE-only controllers have LE implicitly enabled */
1426 if (!lmp_bredr_capable(hdev))
1427 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001428}
1429
1430static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1431{
1432 if (lmp_ext_inq_capable(hdev))
1433 return 0x02;
1434
1435 if (lmp_inq_rssi_capable(hdev))
1436 return 0x01;
1437
1438 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1439 hdev->lmp_subver == 0x0757)
1440 return 0x01;
1441
1442 if (hdev->manufacturer == 15) {
1443 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1444 return 0x01;
1445 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1446 return 0x01;
1447 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1448 return 0x01;
1449 }
1450
1451 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1452 hdev->lmp_subver == 0x1805)
1453 return 0x01;
1454
1455 return 0x00;
1456}
1457
Johan Hedberg42c6b122013-03-05 20:37:49 +02001458static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459{
1460 u8 mode;
1461
Johan Hedberg42c6b122013-03-05 20:37:49 +02001462 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001463
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001465}
1466
Johan Hedberg42c6b122013-03-05 20:37:49 +02001467static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001468{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001469 struct hci_dev *hdev = req->hdev;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 /* The second byte is 0xff instead of 0x9f (two reserved bits
1472 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1473 * command otherwise.
1474 */
1475 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1476
1477 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1478 * any event mask for pre 1.2 devices.
1479 */
1480 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1481 return;
1482
1483 if (lmp_bredr_capable(hdev)) {
1484 events[4] |= 0x01; /* Flow Specification Complete */
1485 events[4] |= 0x02; /* Inquiry Result with RSSI */
1486 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1487 events[5] |= 0x08; /* Synchronous Connection Complete */
1488 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001489 } else {
1490 /* Use a different default for LE-only devices */
1491 memset(events, 0, sizeof(events));
1492 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001493 events[1] |= 0x08; /* Read Remote Version Information Complete */
1494 events[1] |= 0x20; /* Command Complete */
1495 events[1] |= 0x40; /* Command Status */
1496 events[1] |= 0x80; /* Hardware Error */
1497 events[2] |= 0x04; /* Number of Completed Packets */
1498 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001499
1500 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1501 events[0] |= 0x80; /* Encryption Change */
1502 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1503 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001504 }
1505
1506 if (lmp_inq_rssi_capable(hdev))
1507 events[4] |= 0x02; /* Inquiry Result with RSSI */
1508
1509 if (lmp_sniffsubr_capable(hdev))
1510 events[5] |= 0x20; /* Sniff Subrating */
1511
1512 if (lmp_pause_enc_capable(hdev))
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1514
1515 if (lmp_ext_inq_capable(hdev))
1516 events[5] |= 0x40; /* Extended Inquiry Result */
1517
1518 if (lmp_no_flush_capable(hdev))
1519 events[7] |= 0x01; /* Enhanced Flush Complete */
1520
1521 if (lmp_lsto_capable(hdev))
1522 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1523
1524 if (lmp_ssp_capable(hdev)) {
1525 events[6] |= 0x01; /* IO Capability Request */
1526 events[6] |= 0x02; /* IO Capability Response */
1527 events[6] |= 0x04; /* User Confirmation Request */
1528 events[6] |= 0x08; /* User Passkey Request */
1529 events[6] |= 0x10; /* Remote OOB Data Request */
1530 events[6] |= 0x20; /* Simple Pairing Complete */
1531 events[7] |= 0x04; /* User Passkey Notification */
1532 events[7] |= 0x08; /* Keypress Notification */
1533 events[7] |= 0x10; /* Remote Host Supported
1534 * Features Notification
1535 */
1536 }
1537
1538 if (lmp_le_capable(hdev))
1539 events[7] |= 0x20; /* LE Meta-Event */
1540
Johan Hedberg42c6b122013-03-05 20:37:49 +02001541 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542}
1543
Johan Hedberg42c6b122013-03-05 20:37:49 +02001544static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001545{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001546 struct hci_dev *hdev = req->hdev;
1547
Johan Hedberg2177bab2013-03-05 20:37:43 +02001548 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001550 else
1551 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552
1553 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001556 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1557 * local supported commands HCI command.
1558 */
1559 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001561
1562 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001563 /* When SSP is available, then the host features page
1564 * should also be available as well. However some
1565 * controllers list the max_page as 0 as long as SSP
1566 * has not been enabled. To achieve proper debugging
1567 * output, force the minimum max_page to 1 at least.
1568 */
1569 hdev->max_page = 0x01;
1570
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1572 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1574 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001575 } else {
1576 struct hci_cp_write_eir cp;
1577
1578 memset(hdev->eir, 0, sizeof(hdev->eir));
1579 memset(&cp, 0, sizeof(cp));
1580
Johan Hedberg42c6b122013-03-05 20:37:49 +02001581 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001582 }
1583 }
1584
1585 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001586 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001587
1588 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590
1591 if (lmp_ext_feat_capable(hdev)) {
1592 struct hci_cp_read_local_ext_features cp;
1593
1594 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001595 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1596 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597 }
1598
1599 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1600 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001601 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1602 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001603 }
1604}
1605
Johan Hedberg42c6b122013-03-05 20:37:49 +02001606static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001608 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001609 struct hci_cp_write_def_link_policy cp;
1610 u16 link_policy = 0;
1611
1612 if (lmp_rswitch_capable(hdev))
1613 link_policy |= HCI_LP_RSWITCH;
1614 if (lmp_hold_capable(hdev))
1615 link_policy |= HCI_LP_HOLD;
1616 if (lmp_sniff_capable(hdev))
1617 link_policy |= HCI_LP_SNIFF;
1618 if (lmp_park_capable(hdev))
1619 link_policy |= HCI_LP_PARK;
1620
1621 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001622 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001623}
1624
Johan Hedberg42c6b122013-03-05 20:37:49 +02001625static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001626{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001627 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001628 struct hci_cp_write_le_host_supported cp;
1629
Johan Hedbergc73eee92013-04-19 18:35:21 +03001630 /* LE-only devices do not support explicit enablement */
1631 if (!lmp_bredr_capable(hdev))
1632 return;
1633
Johan Hedberg2177bab2013-03-05 20:37:43 +02001634 memset(&cp, 0, sizeof(cp));
1635
1636 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1637 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001638 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001639 }
1640
1641 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001642 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1643 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001644}
1645
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001646static void hci_set_event_mask_page_2(struct hci_request *req)
1647{
1648 struct hci_dev *hdev = req->hdev;
1649 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1650
1651 /* If Connectionless Slave Broadcast master role is supported
1652 * enable all necessary events for it.
1653 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001654 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001655 events[1] |= 0x40; /* Triggered Clock Capture */
1656 events[1] |= 0x80; /* Synchronization Train Complete */
1657 events[2] |= 0x10; /* Slave Page Response Timeout */
1658 events[2] |= 0x20; /* CSB Channel Map Change */
1659 }
1660
1661 /* If Connectionless Slave Broadcast slave role is supported
1662 * enable all necessary events for it.
1663 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001664 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 events[2] |= 0x01; /* Synchronization Train Received */
1666 events[2] |= 0x02; /* CSB Receive */
1667 events[2] |= 0x04; /* CSB Timeout */
1668 events[2] |= 0x08; /* Truncated Page Complete */
1669 }
1670
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001671 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001672 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001673 events[2] |= 0x80;
1674
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001675 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1676}
1677
Johan Hedberg42c6b122013-03-05 20:37:49 +02001678static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001679{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001680 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001681 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001682
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001683 hci_setup_event_mask(req);
1684
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001685 /* Some Broadcom based Bluetooth controllers do not support the
1686 * Delete Stored Link Key command. They are clearly indicating its
1687 * absence in the bit mask of supported commands.
1688 *
1689 * Check the supported commands and only if the the command is marked
1690 * as supported send it. If not supported assume that the controller
1691 * does not have actual support for stored link keys which makes this
1692 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001693 *
1694 * Some controllers indicate that they support handling deleting
1695 * stored link keys, but they don't. The quirk lets a driver
1696 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001697 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001698 if (hdev->commands[6] & 0x80 &&
1699 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001700 struct hci_cp_delete_stored_link_key cp;
1701
1702 bacpy(&cp.bdaddr, BDADDR_ANY);
1703 cp.delete_all = 0x01;
1704 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1705 sizeof(cp), &cp);
1706 }
1707
Johan Hedberg2177bab2013-03-05 20:37:43 +02001708 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001709 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001710
Marcel Holtmann417287d2014-12-11 20:21:54 +01001711 if (hdev->commands[8] & 0x01)
1712 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1713
1714 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1715 * support the Read Page Scan Type command. Check support for
1716 * this command in the bit mask of supported commands.
1717 */
1718 if (hdev->commands[13] & 0x01)
1719 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1720
Andre Guedes9193c6e2014-07-01 18:10:09 -03001721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
Marcel Holtmann4b71bba2014-12-05 16:20:12 +01001738 /* If the controller supports Extended Scanner Filter
1739 * Policies, enable the correspondig event.
1740 */
1741 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742 events[1] |= 0x04; /* LE Direct Advertising
1743 * Report
1744 */
1745
Marcel Holtmann5a34bd52014-12-05 16:20:15 +01001746 /* If the controller supports the LE Read Local P-256
1747 * Public Key command, enable the corresponding event.
1748 */
1749 if (hdev->commands[34] & 0x02)
1750 events[0] |= 0x80; /* LE Read Local P-256
1751 * Public Key Complete
1752 */
1753
1754 /* If the controller supports the LE Generate DHKey
1755 * command, enable the corresponding event.
1756 */
1757 if (hdev->commands[34] & 0x04)
1758 events[1] |= 0x01; /* LE Generate DHKey Complete */
1759
Andre Guedes9193c6e2014-07-01 18:10:09 -03001760 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1761 events);
1762
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001763 if (hdev->commands[25] & 0x40) {
1764 /* Read LE Advertising Channel TX Power */
1765 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1766 }
1767
Johan Hedberg42c6b122013-03-05 20:37:49 +02001768 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001769 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001770
1771 /* Read features beyond page 1 if available */
1772 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1773 struct hci_cp_read_local_ext_features cp;
1774
1775 cp.page = p;
1776 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1777 sizeof(cp), &cp);
1778 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001779}
1780
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001781static void hci_init4_req(struct hci_request *req, unsigned long opt)
1782{
1783 struct hci_dev *hdev = req->hdev;
1784
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001785 /* Set event mask page 2 if the HCI command for it is supported */
1786 if (hdev->commands[22] & 0x04)
1787 hci_set_event_mask_page_2(req);
1788
Marcel Holtmann109e3192014-07-23 19:24:56 +02001789 /* Read local codec list if the HCI command is supported */
1790 if (hdev->commands[29] & 0x20)
1791 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1792
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001793 /* Get MWS transport configuration if the HCI command is supported */
1794 if (hdev->commands[30] & 0x08)
1795 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1796
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001797 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001798 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001799 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001800
1801 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001802 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001803 u8 support = 0x01;
1804 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805 sizeof(support), &support);
1806 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001807}
1808
Johan Hedberg2177bab2013-03-05 20:37:43 +02001809static int __hci_init(struct hci_dev *hdev)
1810{
1811 int err;
1812
1813 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1814 if (err < 0)
1815 return err;
1816
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001817 /* The Device Under Test (DUT) mode is special and available for
1818 * all controller types. So just create it early on.
1819 */
1820 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1821 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1822 &dut_mode_fops);
1823 }
1824
Johan Hedberg2177bab2013-03-05 20:37:43 +02001825 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1826 * BR/EDR/LE type controllers. AMP controllers only need the
1827 * first stage init.
1828 */
1829 if (hdev->dev_type != HCI_BREDR)
1830 return 0;
1831
1832 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1833 if (err < 0)
1834 return err;
1835
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001836 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1837 if (err < 0)
1838 return err;
1839
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001840 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1841 if (err < 0)
1842 return err;
1843
1844 /* Only create debugfs entries during the initial setup
1845 * phase and not every time the controller gets powered on.
1846 */
1847 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1848 return 0;
1849
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001850 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1851 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001852 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853 &hdev->manufacturer);
1854 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001856 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1857 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001858 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1859 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001860 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1861
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001862 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863 &conn_info_min_age_fops);
1864 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865 &conn_info_max_age_fops);
1866
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001867 if (lmp_bredr_capable(hdev)) {
1868 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001870 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001872 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001874 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001876 }
1877
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001878 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001879 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1880 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001881 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001883 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001885 if (lmp_le_capable(hdev))
1886 debugfs_create_file("force_lesc_support", 0644,
1887 hdev->debugfs, hdev,
1888 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001889 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001890
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001891 if (lmp_sniff_capable(hdev)) {
1892 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893 hdev, &idle_timeout_fops);
1894 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895 hdev, &sniff_min_interval_fops);
1896 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897 hdev, &sniff_max_interval_fops);
1898 }
1899
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001900 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001901 debugfs_create_file("identity", 0400, hdev->debugfs,
1902 hdev, &identity_fops);
1903 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001905 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001907 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908 hdev, &static_address_fops);
1909
1910 /* For controllers with a public address, provide a debug
1911 * option to force the usage of the configured static
1912 * address. By default the public address is used.
1913 */
1914 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915 debugfs_create_file("force_static_address", 0644,
1916 hdev->debugfs, hdev,
1917 &force_static_address_fops);
1918
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001919 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001921 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1922 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001923 debugfs_create_file("identity_resolving_keys", 0400,
1924 hdev->debugfs, hdev,
1925 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001926 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001928 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929 hdev, &conn_min_interval_fops);
1930 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001932 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001934 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001936 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001938 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939 hdev, &adv_min_interval_fops);
1940 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001942 debugfs_create_u16("discov_interleaved_timeout", 0644,
1943 hdev->debugfs,
1944 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001945
Johan Hedberg711eafe2014-08-08 09:32:52 +03001946 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001947 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001948
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001949 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001950}
1951
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001952static void hci_init0_req(struct hci_request *req, unsigned long opt)
1953{
1954 struct hci_dev *hdev = req->hdev;
1955
1956 BT_DBG("%s %ld", hdev->name, opt);
1957
1958 /* Reset */
1959 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1960 hci_reset_req(req, 0);
1961
1962 /* Read Local Version */
1963 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1964
1965 /* Read BD Address */
1966 if (hdev->set_bdaddr)
1967 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1968}
1969
1970static int __hci_unconf_init(struct hci_dev *hdev)
1971{
1972 int err;
1973
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975 return 0;
1976
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001977 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1978 if (err < 0)
1979 return err;
1980
1981 return 0;
1982}
1983
Johan Hedberg42c6b122013-03-05 20:37:49 +02001984static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 __u8 scan = opt;
1987
Johan Hedberg42c6b122013-03-05 20:37:49 +02001988 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
1990 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001991 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992}
1993
Johan Hedberg42c6b122013-03-05 20:37:49 +02001994static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995{
1996 __u8 auth = opt;
1997
Johan Hedberg42c6b122013-03-05 20:37:49 +02001998 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002001 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
2003
Johan Hedberg42c6b122013-03-05 20:37:49 +02002004static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
2006 __u8 encrypt = opt;
2007
Johan Hedberg42c6b122013-03-05 20:37:49 +02002008 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002010 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002011 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012}
2013
Johan Hedberg42c6b122013-03-05 20:37:49 +02002014static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002015{
2016 __le16 policy = cpu_to_le16(opt);
2017
Johan Hedberg42c6b122013-03-05 20:37:49 +02002018 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002019
2020 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002021 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002022}
2023
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002024/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 * Device is held on return. */
2026struct hci_dev *hci_dev_get(int index)
2027{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002028 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
2030 BT_DBG("%d", index);
2031
2032 if (index < 0)
2033 return NULL;
2034
2035 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002036 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 if (d->id == index) {
2038 hdev = hci_dev_hold(d);
2039 break;
2040 }
2041 }
2042 read_unlock(&hci_dev_list_lock);
2043 return hdev;
2044}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
2046/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002047
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002048bool hci_discovery_active(struct hci_dev *hdev)
2049{
2050 struct discovery_state *discov = &hdev->discovery;
2051
Andre Guedes6fbe1952012-02-03 17:47:58 -03002052 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002053 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002054 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002055 return true;
2056
Andre Guedes6fbe1952012-02-03 17:47:58 -03002057 default:
2058 return false;
2059 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002060}
2061
Johan Hedbergff9ef572012-01-04 14:23:45 +02002062void hci_discovery_set_state(struct hci_dev *hdev, int state)
2063{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002064 int old_state = hdev->discovery.state;
2065
Johan Hedbergff9ef572012-01-04 14:23:45 +02002066 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2067
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002068 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002069 return;
2070
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002071 hdev->discovery.state = state;
2072
Johan Hedbergff9ef572012-01-04 14:23:45 +02002073 switch (state) {
2074 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002075 hci_update_background_scan(hdev);
2076
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002077 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002078 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002079 break;
2080 case DISCOVERY_STARTING:
2081 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002082 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002083 mgmt_discovering(hdev, 1);
2084 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002085 case DISCOVERY_RESOLVING:
2086 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002087 case DISCOVERY_STOPPING:
2088 break;
2089 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002090}
2091
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002092void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Johan Hedberg30883512012-01-04 14:16:21 +02002094 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002095 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
Johan Hedberg561aafb2012-01-04 13:31:59 +02002097 list_for_each_entry_safe(p, n, &cache->all, all) {
2098 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002099 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002101
2102 INIT_LIST_HEAD(&cache->unknown);
2103 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002106struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2107 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Johan Hedberg30883512012-01-04 14:16:21 +02002109 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct inquiry_entry *e;
2111
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002112 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Johan Hedberg561aafb2012-01-04 13:31:59 +02002114 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002116 return e;
2117 }
2118
2119 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120}
2121
Johan Hedberg561aafb2012-01-04 13:31:59 +02002122struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002123 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002124{
Johan Hedberg30883512012-01-04 14:16:21 +02002125 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002126 struct inquiry_entry *e;
2127
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002128 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002129
2130 list_for_each_entry(e, &cache->unknown, list) {
2131 if (!bacmp(&e->data.bdaddr, bdaddr))
2132 return e;
2133 }
2134
2135 return NULL;
2136}
2137
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002138struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002139 bdaddr_t *bdaddr,
2140 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002141{
2142 struct discovery_state *cache = &hdev->discovery;
2143 struct inquiry_entry *e;
2144
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002145 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002146
2147 list_for_each_entry(e, &cache->resolve, list) {
2148 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2149 return e;
2150 if (!bacmp(&e->data.bdaddr, bdaddr))
2151 return e;
2152 }
2153
2154 return NULL;
2155}
2156
Johan Hedberga3d4e202012-01-09 00:53:02 +02002157void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002158 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002159{
2160 struct discovery_state *cache = &hdev->discovery;
2161 struct list_head *pos = &cache->resolve;
2162 struct inquiry_entry *p;
2163
2164 list_del(&ie->list);
2165
2166 list_for_each_entry(p, &cache->resolve, list) {
2167 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002168 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002169 break;
2170 pos = &p->list;
2171 }
2172
2173 list_add(&ie->list, pos);
2174}
2175
Marcel Holtmannaf589252014-07-01 14:11:20 +02002176u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2177 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Johan Hedberg30883512012-01-04 14:16:21 +02002179 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002180 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002181 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002183 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Johan Hedberg6928a922014-10-26 20:46:09 +01002185 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002186
Marcel Holtmannaf589252014-07-01 14:11:20 +02002187 if (!data->ssp_mode)
2188 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002189
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002190 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002191 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002192 if (!ie->data.ssp_mode)
2193 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002194
Johan Hedberga3d4e202012-01-09 00:53:02 +02002195 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002196 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002197 ie->data.rssi = data->rssi;
2198 hci_inquiry_cache_update_resolve(hdev, ie);
2199 }
2200
Johan Hedberg561aafb2012-01-04 13:31:59 +02002201 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002202 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002203
Johan Hedberg561aafb2012-01-04 13:31:59 +02002204 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002205 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002206 if (!ie) {
2207 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2208 goto done;
2209 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002210
2211 list_add(&ie->all, &cache->all);
2212
2213 if (name_known) {
2214 ie->name_state = NAME_KNOWN;
2215 } else {
2216 ie->name_state = NAME_NOT_KNOWN;
2217 list_add(&ie->list, &cache->unknown);
2218 }
2219
2220update:
2221 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002222 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002223 ie->name_state = NAME_KNOWN;
2224 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 }
2226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002227 memcpy(&ie->data, data, sizeof(*data));
2228 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002230
2231 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002232 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002233
Marcel Holtmannaf589252014-07-01 14:11:20 +02002234done:
2235 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
2237
2238static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2239{
Johan Hedberg30883512012-01-04 14:16:21 +02002240 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 struct inquiry_info *info = (struct inquiry_info *) buf;
2242 struct inquiry_entry *e;
2243 int copied = 0;
2244
Johan Hedberg561aafb2012-01-04 13:31:59 +02002245 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002247
2248 if (copied >= num)
2249 break;
2250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 bacpy(&info->bdaddr, &data->bdaddr);
2252 info->pscan_rep_mode = data->pscan_rep_mode;
2253 info->pscan_period_mode = data->pscan_period_mode;
2254 info->pscan_mode = data->pscan_mode;
2255 memcpy(info->dev_class, data->dev_class, 3);
2256 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002259 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
2261
2262 BT_DBG("cache %p, copied %d", cache, copied);
2263 return copied;
2264}
2265
Johan Hedberg42c6b122013-03-05 20:37:49 +02002266static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267{
2268 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002269 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 struct hci_cp_inquiry cp;
2271
2272 BT_DBG("%s", hdev->name);
2273
2274 if (test_bit(HCI_INQUIRY, &hdev->flags))
2275 return;
2276
2277 /* Start Inquiry */
2278 memcpy(&cp.lap, &ir->lap, 3);
2279 cp.length = ir->length;
2280 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002281 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282}
2283
2284int hci_inquiry(void __user *arg)
2285{
2286 __u8 __user *ptr = arg;
2287 struct hci_inquiry_req ir;
2288 struct hci_dev *hdev;
2289 int err = 0, do_inquiry = 0, max_rsp;
2290 long timeo;
2291 __u8 *buf;
2292
2293 if (copy_from_user(&ir, ptr, sizeof(ir)))
2294 return -EFAULT;
2295
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002296 hdev = hci_dev_get(ir.dev_id);
2297 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 return -ENODEV;
2299
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002300 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2301 err = -EBUSY;
2302 goto done;
2303 }
2304
Marcel Holtmann4a964402014-07-02 19:10:33 +02002305 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002306 err = -EOPNOTSUPP;
2307 goto done;
2308 }
2309
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002310 if (hdev->dev_type != HCI_BREDR) {
2311 err = -EOPNOTSUPP;
2312 goto done;
2313 }
2314
Johan Hedberg56f87902013-10-02 13:43:13 +03002315 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2316 err = -EOPNOTSUPP;
2317 goto done;
2318 }
2319
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002320 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002321 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002322 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002323 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 do_inquiry = 1;
2325 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002326 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
Marcel Holtmann04837f62006-07-03 10:02:33 +02002328 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002329
2330 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002331 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2332 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002333 if (err < 0)
2334 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002335
2336 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2337 * cleared). If it is interrupted by a signal, return -EINTR.
2338 */
NeilBrown74316202014-07-07 15:16:04 +10002339 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002340 TASK_INTERRUPTIBLE))
2341 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002344 /* for unlimited number of responses we will use buffer with
2345 * 255 entries
2346 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2348
2349 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2350 * copy it to the user space.
2351 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002352 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002353 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 err = -ENOMEM;
2355 goto done;
2356 }
2357
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002358 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002360 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
2362 BT_DBG("num_rsp %d", ir.num_rsp);
2363
2364 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2365 ptr += sizeof(ir);
2366 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002367 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002369 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 err = -EFAULT;
2371
2372 kfree(buf);
2373
2374done:
2375 hci_dev_put(hdev);
2376 return err;
2377}
2378
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002379static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 int ret = 0;
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 BT_DBG("%s %p", hdev->name, hdev);
2384
2385 hci_req_lock(hdev);
2386
Johan Hovold94324962012-03-15 14:48:41 +01002387 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2388 ret = -ENODEV;
2389 goto done;
2390 }
2391
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002392 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2393 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002394 /* Check for rfkill but allow the HCI setup stage to
2395 * proceed (which in itself doesn't cause any RF activity).
2396 */
2397 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2398 ret = -ERFKILL;
2399 goto done;
2400 }
2401
2402 /* Check for valid public address or a configured static
2403 * random adddress, but let the HCI setup proceed to
2404 * be able to determine if there is a public address
2405 * or not.
2406 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002407 * In case of user channel usage, it is not important
2408 * if a public address or static random address is
2409 * available.
2410 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002411 * This check is only valid for BR/EDR controllers
2412 * since AMP controllers do not have an address.
2413 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002414 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2415 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002416 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2417 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2418 ret = -EADDRNOTAVAIL;
2419 goto done;
2420 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002421 }
2422
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 if (test_bit(HCI_UP, &hdev->flags)) {
2424 ret = -EALREADY;
2425 goto done;
2426 }
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 if (hdev->open(hdev)) {
2429 ret = -EIO;
2430 goto done;
2431 }
2432
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002433 atomic_set(&hdev->cmd_cnt, 1);
2434 set_bit(HCI_INIT, &hdev->flags);
2435
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002436 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2437 if (hdev->setup)
2438 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002439
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002440 /* The transport driver can set these quirks before
2441 * creating the HCI device or in its setup callback.
2442 *
2443 * In case any of them is set, the controller has to
2444 * start up as unconfigured.
2445 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002446 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2447 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002448 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002449
2450 /* For an unconfigured controller it is required to
2451 * read at least the version information provided by
2452 * the Read Local Version Information command.
2453 *
2454 * If the set_bdaddr driver callback is provided, then
2455 * also the original Bluetooth public device address
2456 * will be read using the Read BD Address command.
2457 */
2458 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2459 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002460 }
2461
Marcel Holtmann9713c172014-07-06 12:11:15 +02002462 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2463 /* If public address change is configured, ensure that
2464 * the address gets programmed. If the driver does not
2465 * support changing the public address, fail the power
2466 * on procedure.
2467 */
2468 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2469 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002470 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2471 else
2472 ret = -EADDRNOTAVAIL;
2473 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002474
2475 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002476 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002478 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 }
2480
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002481 clear_bit(HCI_INIT, &hdev->flags);
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 if (!ret) {
2484 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002485 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 set_bit(HCI_UP, &hdev->flags);
2487 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002488 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002489 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002490 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002491 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002492 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002493 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002494 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002495 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002496 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002497 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002499 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002500 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002501 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 skb_queue_purge(&hdev->cmd_q);
2504 skb_queue_purge(&hdev->rx_q);
2505
2506 if (hdev->flush)
2507 hdev->flush(hdev);
2508
2509 if (hdev->sent_cmd) {
2510 kfree_skb(hdev->sent_cmd);
2511 hdev->sent_cmd = NULL;
2512 }
2513
2514 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002515 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
2517
2518done:
2519 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 return ret;
2521}
2522
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002523/* ---- HCI ioctl helpers ---- */
2524
2525int hci_dev_open(__u16 dev)
2526{
2527 struct hci_dev *hdev;
2528 int err;
2529
2530 hdev = hci_dev_get(dev);
2531 if (!hdev)
2532 return -ENODEV;
2533
Marcel Holtmann4a964402014-07-02 19:10:33 +02002534 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002535 * up as user channel. Trying to bring them up as normal devices
2536 * will result into a failure. Only user channel operation is
2537 * possible.
2538 *
2539 * When this function is called for a user channel, the flag
2540 * HCI_USER_CHANNEL will be set first before attempting to
2541 * open the device.
2542 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002543 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002544 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 err = -EOPNOTSUPP;
2546 goto done;
2547 }
2548
Johan Hedberge1d08f42013-10-01 22:44:50 +03002549 /* We need to ensure that no other power on/off work is pending
2550 * before proceeding to call hci_dev_do_open. This is
2551 * particularly important if the setup procedure has not yet
2552 * completed.
2553 */
2554 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2555 cancel_delayed_work(&hdev->power_off);
2556
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002557 /* After this call it is guaranteed that the setup procedure
2558 * has finished. This means that error conditions like RFKILL
2559 * or no valid public or static random address apply.
2560 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002561 flush_workqueue(hdev->req_workqueue);
2562
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002563 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002564 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002565 * so that pairing works for them. Once the management interface
2566 * is in use this bit will be cleared again and userspace has
2567 * to explicitly enable it.
2568 */
2569 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2570 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002571 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002572
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002573 err = hci_dev_do_open(hdev);
2574
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002575done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002576 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002577 return err;
2578}
2579
Johan Hedbergd7347f32014-07-04 12:37:23 +03002580/* This function requires the caller holds hdev->lock */
2581static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2582{
2583 struct hci_conn_params *p;
2584
Johan Hedbergf161dd42014-08-15 21:06:54 +03002585 list_for_each_entry(p, &hdev->le_conn_params, list) {
2586 if (p->conn) {
2587 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002588 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002589 p->conn = NULL;
2590 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002591 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002592 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002593
2594 BT_DBG("All LE pending actions cleared");
2595}
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597static int hci_dev_do_close(struct hci_dev *hdev)
2598{
2599 BT_DBG("%s %p", hdev->name, hdev);
2600
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002601 cancel_delayed_work(&hdev->power_off);
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 hci_req_cancel(hdev, ENODEV);
2604 hci_req_lock(hdev);
2605
2606 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002607 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 hci_req_unlock(hdev);
2609 return 0;
2610 }
2611
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002612 /* Flush RX and TX works */
2613 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002614 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002616 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002617 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002618 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002619 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002620 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002621 }
2622
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002623 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002624 cancel_delayed_work(&hdev->service_cache);
2625
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002626 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002627
2628 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002630
Johan Hedberg76727c02014-11-18 09:00:14 +02002631 /* Avoid potential lockdep warnings from the *_flush() calls by
2632 * ensuring the workqueue is empty up front.
2633 */
2634 drain_workqueue(hdev->workqueue);
2635
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002636 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002637 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002638 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002639 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002640 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
2642 hci_notify(hdev, HCI_DEV_DOWN);
2643
2644 if (hdev->flush)
2645 hdev->flush(hdev);
2646
2647 /* Reset device */
2648 skb_queue_purge(&hdev->cmd_q);
2649 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002650 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2651 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002652 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002654 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 clear_bit(HCI_INIT, &hdev->flags);
2656 }
2657
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002658 /* flush cmd work */
2659 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661 /* Drop queues */
2662 skb_queue_purge(&hdev->rx_q);
2663 skb_queue_purge(&hdev->cmd_q);
2664 skb_queue_purge(&hdev->raw_q);
2665
2666 /* Drop last sent command */
2667 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002668 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 kfree_skb(hdev->sent_cmd);
2670 hdev->sent_cmd = NULL;
2671 }
2672
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002673 kfree_skb(hdev->recv_evt);
2674 hdev->recv_evt = NULL;
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 /* After this point our queues are empty
2677 * and no tasks are scheduled. */
2678 hdev->close(hdev);
2679
Johan Hedberg35b973c2013-03-15 17:06:59 -05002680 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002681 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002682 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2683
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002684 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2685 if (hdev->dev_type == HCI_BREDR) {
2686 hci_dev_lock(hdev);
2687 mgmt_powered(hdev, 0);
2688 hci_dev_unlock(hdev);
2689 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002690 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002691
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002692 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002693 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002694
Johan Hedberge59fda82012-02-22 18:11:53 +02002695 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002696 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002697 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002698
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 hci_req_unlock(hdev);
2700
2701 hci_dev_put(hdev);
2702 return 0;
2703}
2704
2705int hci_dev_close(__u16 dev)
2706{
2707 struct hci_dev *hdev;
2708 int err;
2709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002710 hdev = hci_dev_get(dev);
2711 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002713
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002714 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2715 err = -EBUSY;
2716 goto done;
2717 }
2718
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002719 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2720 cancel_delayed_work(&hdev->power_off);
2721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002723
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002724done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 hci_dev_put(hdev);
2726 return err;
2727}
2728
2729int hci_dev_reset(__u16 dev)
2730{
2731 struct hci_dev *hdev;
2732 int ret = 0;
2733
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002734 hdev = hci_dev_get(dev);
2735 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 return -ENODEV;
2737
2738 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
Marcel Holtmann808a0492013-08-26 20:57:58 -07002740 if (!test_bit(HCI_UP, &hdev->flags)) {
2741 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002745 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2746 ret = -EBUSY;
2747 goto done;
2748 }
2749
Marcel Holtmann4a964402014-07-02 19:10:33 +02002750 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002751 ret = -EOPNOTSUPP;
2752 goto done;
2753 }
2754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 /* Drop queues */
2756 skb_queue_purge(&hdev->rx_q);
2757 skb_queue_purge(&hdev->cmd_q);
2758
Johan Hedberg76727c02014-11-18 09:00:14 +02002759 /* Avoid potential lockdep warnings from the *_flush() calls by
2760 * ensuring the workqueue is empty up front.
2761 */
2762 drain_workqueue(hdev->workqueue);
2763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002764 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002765 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002767 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
2769 if (hdev->flush)
2770 hdev->flush(hdev);
2771
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002772 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002773 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002775 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 hci_req_unlock(hdev);
2779 hci_dev_put(hdev);
2780 return ret;
2781}
2782
2783int hci_dev_reset_stat(__u16 dev)
2784{
2785 struct hci_dev *hdev;
2786 int ret = 0;
2787
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002788 hdev = hci_dev_get(dev);
2789 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 return -ENODEV;
2791
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002792 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2793 ret = -EBUSY;
2794 goto done;
2795 }
2796
Marcel Holtmann4a964402014-07-02 19:10:33 +02002797 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002798 ret = -EOPNOTSUPP;
2799 goto done;
2800 }
2801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2803
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002804done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 return ret;
2807}
2808
Johan Hedberg123abc02014-07-10 12:09:07 +03002809static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2810{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002811 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002812
2813 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2814
2815 if ((scan & SCAN_PAGE))
2816 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2817 &hdev->dev_flags);
2818 else
2819 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2820 &hdev->dev_flags);
2821
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002822 if ((scan & SCAN_INQUIRY)) {
2823 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2824 &hdev->dev_flags);
2825 } else {
2826 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2827 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2828 &hdev->dev_flags);
2829 }
2830
Johan Hedberg123abc02014-07-10 12:09:07 +03002831 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2832 return;
2833
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002834 if (conn_changed || discov_changed) {
2835 /* In case this was disabled through mgmt */
2836 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2837
2838 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2839 mgmt_update_adv_data(hdev);
2840
Johan Hedberg123abc02014-07-10 12:09:07 +03002841 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002842 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002843}
2844
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845int hci_dev_cmd(unsigned int cmd, void __user *arg)
2846{
2847 struct hci_dev *hdev;
2848 struct hci_dev_req dr;
2849 int err = 0;
2850
2851 if (copy_from_user(&dr, arg, sizeof(dr)))
2852 return -EFAULT;
2853
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002854 hdev = hci_dev_get(dr.dev_id);
2855 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 return -ENODEV;
2857
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002858 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2859 err = -EBUSY;
2860 goto done;
2861 }
2862
Marcel Holtmann4a964402014-07-02 19:10:33 +02002863 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002864 err = -EOPNOTSUPP;
2865 goto done;
2866 }
2867
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002868 if (hdev->dev_type != HCI_BREDR) {
2869 err = -EOPNOTSUPP;
2870 goto done;
2871 }
2872
Johan Hedberg56f87902013-10-02 13:43:13 +03002873 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2874 err = -EOPNOTSUPP;
2875 goto done;
2876 }
2877
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 switch (cmd) {
2879 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002880 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2881 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 break;
2883
2884 case HCISETENCRYPT:
2885 if (!lmp_encrypt_capable(hdev)) {
2886 err = -EOPNOTSUPP;
2887 break;
2888 }
2889
2890 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2891 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002892 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2893 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 if (err)
2895 break;
2896 }
2897
Johan Hedberg01178cd2013-03-05 20:37:41 +02002898 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2899 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 break;
2901
2902 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002903 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2904 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002905
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002906 /* Ensure that the connectable and discoverable states
2907 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002908 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002909 if (!err)
2910 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 break;
2912
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002913 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002914 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2915 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002916 break;
2917
2918 case HCISETLINKMODE:
2919 hdev->link_mode = ((__u16) dr.dev_opt) &
2920 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2921 break;
2922
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 case HCISETPTYPE:
2924 hdev->pkt_type = (__u16) dr.dev_opt;
2925 break;
2926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002928 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2929 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 break;
2931
2932 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002933 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2934 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 break;
2936
2937 default:
2938 err = -EINVAL;
2939 break;
2940 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002941
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002942done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 hci_dev_put(hdev);
2944 return err;
2945}
2946
2947int hci_get_dev_list(void __user *arg)
2948{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002949 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 struct hci_dev_list_req *dl;
2951 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 int n = 0, size, err;
2953 __u16 dev_num;
2954
2955 if (get_user(dev_num, (__u16 __user *) arg))
2956 return -EFAULT;
2957
2958 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2959 return -EINVAL;
2960
2961 size = sizeof(*dl) + dev_num * sizeof(*dr);
2962
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002963 dl = kzalloc(size, GFP_KERNEL);
2964 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 return -ENOMEM;
2966
2967 dr = dl->dev_req;
2968
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002969 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002970 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002971 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002972
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002973 /* When the auto-off is configured it means the transport
2974 * is running, but in that case still indicate that the
2975 * device is actually down.
2976 */
2977 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2978 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002981 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002982
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 if (++n >= dev_num)
2984 break;
2985 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002986 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987
2988 dl->dev_num = n;
2989 size = sizeof(*dl) + n * sizeof(*dr);
2990
2991 err = copy_to_user(arg, dl, size);
2992 kfree(dl);
2993
2994 return err ? -EFAULT : 0;
2995}
2996
2997int hci_get_dev_info(void __user *arg)
2998{
2999 struct hci_dev *hdev;
3000 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003001 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 int err = 0;
3003
3004 if (copy_from_user(&di, arg, sizeof(di)))
3005 return -EFAULT;
3006
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003007 hdev = hci_dev_get(di.dev_id);
3008 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 return -ENODEV;
3010
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003011 /* When the auto-off is configured it means the transport
3012 * is running, but in that case still indicate that the
3013 * device is actually down.
3014 */
3015 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3016 flags = hdev->flags & ~BIT(HCI_UP);
3017 else
3018 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 strcpy(di.name, hdev->name);
3021 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003022 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003023 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003025 if (lmp_bredr_capable(hdev)) {
3026 di.acl_mtu = hdev->acl_mtu;
3027 di.acl_pkts = hdev->acl_pkts;
3028 di.sco_mtu = hdev->sco_mtu;
3029 di.sco_pkts = hdev->sco_pkts;
3030 } else {
3031 di.acl_mtu = hdev->le_mtu;
3032 di.acl_pkts = hdev->le_pkts;
3033 di.sco_mtu = 0;
3034 di.sco_pkts = 0;
3035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 di.link_policy = hdev->link_policy;
3037 di.link_mode = hdev->link_mode;
3038
3039 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3040 memcpy(&di.features, &hdev->features, sizeof(di.features));
3041
3042 if (copy_to_user(arg, &di, sizeof(di)))
3043 err = -EFAULT;
3044
3045 hci_dev_put(hdev);
3046
3047 return err;
3048}
3049
3050/* ---- Interface to HCI drivers ---- */
3051
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003052static int hci_rfkill_set_block(void *data, bool blocked)
3053{
3054 struct hci_dev *hdev = data;
3055
3056 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3057
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003058 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3059 return -EBUSY;
3060
Johan Hedberg5e130362013-09-13 08:58:17 +03003061 if (blocked) {
3062 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003063 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3064 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003065 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003066 } else {
3067 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003068 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003069
3070 return 0;
3071}
3072
3073static const struct rfkill_ops hci_rfkill_ops = {
3074 .set_block = hci_rfkill_set_block,
3075};
3076
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003077static void hci_power_on(struct work_struct *work)
3078{
3079 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003080 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003081
3082 BT_DBG("%s", hdev->name);
3083
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003084 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003085 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05303086 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003087 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05303088 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003089 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003090 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003091
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003092 /* During the HCI setup phase, a few error conditions are
3093 * ignored and they need to be checked now. If they are still
3094 * valid, it is important to turn the device back off.
3095 */
3096 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003097 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003098 (hdev->dev_type == HCI_BREDR &&
3099 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3100 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003101 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3102 hci_dev_do_close(hdev);
3103 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003104 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3105 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003106 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003107
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003109 /* For unconfigured devices, set the HCI_RAW flag
3110 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003111 */
3112 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3113 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003114
3115 /* For fully configured devices, this will send
3116 * the Index Added event. For unconfigured devices,
3117 * it will send Unconfigued Index Added event.
3118 *
3119 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3120 * and no event will be send.
3121 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003122 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003123 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003124 /* When the controller is now configured, then it
3125 * is important to clear the HCI_RAW flag.
3126 */
3127 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3128 clear_bit(HCI_RAW, &hdev->flags);
3129
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003130 /* Powering on the controller with HCI_CONFIG set only
3131 * happens with the transition from unconfigured to
3132 * configured. This will send the Index Added event.
3133 */
3134 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003135 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003136}
3137
3138static void hci_power_off(struct work_struct *work)
3139{
Johan Hedberg32435532011-11-07 22:16:04 +02003140 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003141 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003142
3143 BT_DBG("%s", hdev->name);
3144
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003145 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003146}
3147
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003148static void hci_discov_off(struct work_struct *work)
3149{
3150 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003151
3152 hdev = container_of(work, struct hci_dev, discov_off.work);
3153
3154 BT_DBG("%s", hdev->name);
3155
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003156 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003157}
3158
Johan Hedberg35f74982014-02-18 17:14:32 +02003159void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003160{
Johan Hedberg48210022013-01-27 00:31:28 +02003161 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003162
Johan Hedberg48210022013-01-27 00:31:28 +02003163 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3164 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003165 kfree(uuid);
3166 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003167}
3168
Johan Hedberg35f74982014-02-18 17:14:32 +02003169void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003170{
Johan Hedberg0378b592014-11-19 15:22:22 +02003171 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003172
Johan Hedberg0378b592014-11-19 15:22:22 +02003173 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3174 list_del_rcu(&key->list);
3175 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003176 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003177}
3178
Johan Hedberg35f74982014-02-18 17:14:32 +02003179void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003180{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003181 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003182
Johan Hedberg970d0f12014-11-13 14:37:47 +02003183 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3184 list_del_rcu(&k->list);
3185 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003186 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003187}
3188
Johan Hedberg970c4e42014-02-18 10:19:33 +02003189void hci_smp_irks_clear(struct hci_dev *hdev)
3190{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003191 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003192
Johan Hedbergadae20c2014-11-13 14:37:48 +02003193 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3194 list_del_rcu(&k->list);
3195 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003196 }
3197}
3198
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003199struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3200{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003201 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003202
Johan Hedberg0378b592014-11-19 15:22:22 +02003203 rcu_read_lock();
3204 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3205 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3206 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003207 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003208 }
3209 }
3210 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003211
3212 return NULL;
3213}
3214
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303215static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003216 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003217{
3218 /* Legacy key */
3219 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303220 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003221
3222 /* Debug keys are insecure so don't store them persistently */
3223 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303224 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003225
3226 /* Changed combination key and there's no previous one */
3227 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303228 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003229
3230 /* Security mode 3 case */
3231 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303232 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003233
Johan Hedberge3befab2014-06-01 16:33:39 +03003234 /* BR/EDR key derived using SC from an LE link */
3235 if (conn->type == LE_LINK)
3236 return true;
3237
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003238 /* Neither local nor remote side had no-bonding as requirement */
3239 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303240 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003241
3242 /* Local side had dedicated bonding as requirement */
3243 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303244 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003245
3246 /* Remote side had dedicated bonding as requirement */
3247 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303248 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003249
3250 /* If none of the above criteria match, then don't store the key
3251 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303252 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003253}
3254
Johan Hedberge804d252014-07-16 11:42:28 +03003255static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003256{
Johan Hedberge804d252014-07-16 11:42:28 +03003257 if (type == SMP_LTK)
3258 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003259
Johan Hedberge804d252014-07-16 11:42:28 +03003260 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003261}
3262
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003263struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3264 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003265{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003266 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003267
Johan Hedberg970d0f12014-11-13 14:37:47 +02003268 rcu_read_lock();
3269 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003270 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3271 continue;
3272
Johan Hedberg923e2412014-12-03 12:43:39 +02003273 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003274 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003275 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003276 }
3277 }
3278 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003279
3280 return NULL;
3281}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003282
Johan Hedberg970c4e42014-02-18 10:19:33 +02003283struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3284{
3285 struct smp_irk *irk;
3286
Johan Hedbergadae20c2014-11-13 14:37:48 +02003287 rcu_read_lock();
3288 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3289 if (!bacmp(&irk->rpa, rpa)) {
3290 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003291 return irk;
3292 }
3293 }
3294
Johan Hedbergadae20c2014-11-13 14:37:48 +02003295 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3296 if (smp_irk_matches(hdev, irk->val, rpa)) {
3297 bacpy(&irk->rpa, rpa);
3298 rcu_read_unlock();
3299 return irk;
3300 }
3301 }
3302 rcu_read_unlock();
3303
Johan Hedberg970c4e42014-02-18 10:19:33 +02003304 return NULL;
3305}
3306
3307struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3308 u8 addr_type)
3309{
3310 struct smp_irk *irk;
3311
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003312 /* Identity Address must be public or static random */
3313 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3314 return NULL;
3315
Johan Hedbergadae20c2014-11-13 14:37:48 +02003316 rcu_read_lock();
3317 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003318 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003319 bacmp(bdaddr, &irk->bdaddr) == 0) {
3320 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003321 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003322 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003323 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003324 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003325
3326 return NULL;
3327}
3328
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003329struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003330 bdaddr_t *bdaddr, u8 *val, u8 type,
3331 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003332{
3333 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303334 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003335
3336 old_key = hci_find_link_key(hdev, bdaddr);
3337 if (old_key) {
3338 old_key_type = old_key->type;
3339 key = old_key;
3340 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003341 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003342 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003343 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003344 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003345 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003346 }
3347
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003348 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003349
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003350 /* Some buggy controller combinations generate a changed
3351 * combination key for legacy pairing even when there's no
3352 * previous key */
3353 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003354 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003355 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003356 if (conn)
3357 conn->key_type = type;
3358 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003359
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003360 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003361 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003362 key->pin_len = pin_len;
3363
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003364 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003365 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003366 else
3367 key->type = type;
3368
Johan Hedberg7652ff62014-06-24 13:15:49 +03003369 if (persistent)
3370 *persistent = hci_persistent_key(hdev, conn, type,
3371 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003372
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003373 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003374}
3375
Johan Hedbergca9142b2014-02-19 14:57:44 +02003376struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003377 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003378 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003379{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003380 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003381 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003382
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003383 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003384 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003385 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003386 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003387 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003388 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003389 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003390 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003391 }
3392
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003393 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003394 key->bdaddr_type = addr_type;
3395 memcpy(key->val, tk, sizeof(key->val));
3396 key->authenticated = authenticated;
3397 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003398 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003399 key->enc_size = enc_size;
3400 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003401
Johan Hedbergca9142b2014-02-19 14:57:44 +02003402 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003403}
3404
Johan Hedbergca9142b2014-02-19 14:57:44 +02003405struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3406 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003407{
3408 struct smp_irk *irk;
3409
3410 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3411 if (!irk) {
3412 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3413 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003414 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003415
3416 bacpy(&irk->bdaddr, bdaddr);
3417 irk->addr_type = addr_type;
3418
Johan Hedbergadae20c2014-11-13 14:37:48 +02003419 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003420 }
3421
3422 memcpy(irk->val, val, 16);
3423 bacpy(&irk->rpa, rpa);
3424
Johan Hedbergca9142b2014-02-19 14:57:44 +02003425 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003426}
3427
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003428int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3429{
3430 struct link_key *key;
3431
3432 key = hci_find_link_key(hdev, bdaddr);
3433 if (!key)
3434 return -ENOENT;
3435
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003436 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003437
Johan Hedberg0378b592014-11-19 15:22:22 +02003438 list_del_rcu(&key->list);
3439 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003440
3441 return 0;
3442}
3443
Johan Hedberge0b2b272014-02-18 17:14:31 +02003444int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003445{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003446 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003447 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003448
Johan Hedberg970d0f12014-11-13 14:37:47 +02003449 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003450 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003451 continue;
3452
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003453 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003454
Johan Hedberg970d0f12014-11-13 14:37:47 +02003455 list_del_rcu(&k->list);
3456 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003457 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003458 }
3459
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003460 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003461}
3462
Johan Hedberga7ec7332014-02-18 17:14:35 +02003463void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3464{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003465 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003466
Johan Hedbergadae20c2014-11-13 14:37:48 +02003467 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003468 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3469 continue;
3470
3471 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3472
Johan Hedbergadae20c2014-11-13 14:37:48 +02003473 list_del_rcu(&k->list);
3474 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003475 }
3476}
3477
Ville Tervo6bd32322011-02-16 16:32:41 +02003478/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003479static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003480{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003481 struct hci_dev *hdev = container_of(work, struct hci_dev,
3482 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003483
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003484 if (hdev->sent_cmd) {
3485 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3486 u16 opcode = __le16_to_cpu(sent->opcode);
3487
3488 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3489 } else {
3490 BT_ERR("%s command tx timeout", hdev->name);
3491 }
3492
Ville Tervo6bd32322011-02-16 16:32:41 +02003493 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003494 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003495}
3496
Szymon Janc2763eda2011-03-22 13:12:22 +01003497struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003498 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003499{
3500 struct oob_data *data;
3501
Johan Hedberg6928a922014-10-26 20:46:09 +01003502 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3503 if (bacmp(bdaddr, &data->bdaddr) != 0)
3504 continue;
3505 if (data->bdaddr_type != bdaddr_type)
3506 continue;
3507 return data;
3508 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003509
3510 return NULL;
3511}
3512
Johan Hedberg6928a922014-10-26 20:46:09 +01003513int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3514 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003515{
3516 struct oob_data *data;
3517
Johan Hedberg6928a922014-10-26 20:46:09 +01003518 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003519 if (!data)
3520 return -ENOENT;
3521
Johan Hedberg6928a922014-10-26 20:46:09 +01003522 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003523
3524 list_del(&data->list);
3525 kfree(data);
3526
3527 return 0;
3528}
3529
Johan Hedberg35f74982014-02-18 17:14:32 +02003530void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003531{
3532 struct oob_data *data, *n;
3533
3534 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3535 list_del(&data->list);
3536 kfree(data);
3537 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003538}
3539
Marcel Holtmann07988722014-01-10 02:07:29 -08003540int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003541 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003542 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003543{
3544 struct oob_data *data;
3545
Johan Hedberg6928a922014-10-26 20:46:09 +01003546 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003547 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003548 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003549 if (!data)
3550 return -ENOMEM;
3551
3552 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003553 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003554 list_add(&data->list, &hdev->remote_oob_data);
3555 }
3556
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003557 if (hash192 && rand192) {
3558 memcpy(data->hash192, hash192, sizeof(data->hash192));
3559 memcpy(data->rand192, rand192, sizeof(data->rand192));
3560 } else {
3561 memset(data->hash192, 0, sizeof(data->hash192));
3562 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003563 }
3564
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003565 if (hash256 && rand256) {
3566 memcpy(data->hash256, hash256, sizeof(data->hash256));
3567 memcpy(data->rand256, rand256, sizeof(data->rand256));
3568 } else {
3569 memset(data->hash256, 0, sizeof(data->hash256));
3570 memset(data->rand256, 0, sizeof(data->rand256));
3571 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003572
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003573 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003574
3575 return 0;
3576}
3577
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003578struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003579 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003580{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003581 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003582
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003583 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003584 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003585 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003586 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003587
3588 return NULL;
3589}
3590
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003591void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003592{
3593 struct list_head *p, *n;
3594
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003595 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003596 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003597
3598 list_del(p);
3599 kfree(b);
3600 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003601}
3602
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003603int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003604{
3605 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003606
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003607 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003608 return -EBADF;
3609
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003610 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003611 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003612
Johan Hedberg27f70f32014-07-21 10:50:06 +03003613 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003614 if (!entry)
3615 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003616
3617 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003618 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003619
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003620 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003621
3622 return 0;
3623}
3624
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003625int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003626{
3627 struct bdaddr_list *entry;
3628
Johan Hedberg35f74982014-02-18 17:14:32 +02003629 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003630 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003631 return 0;
3632 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003633
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003634 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003635 if (!entry)
3636 return -ENOENT;
3637
3638 list_del(&entry->list);
3639 kfree(entry);
3640
3641 return 0;
3642}
3643
Andre Guedes15819a72014-02-03 13:56:18 -03003644/* This function requires the caller holds hdev->lock */
3645struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3646 bdaddr_t *addr, u8 addr_type)
3647{
3648 struct hci_conn_params *params;
3649
Johan Hedberg738f6182014-07-03 19:33:51 +03003650 /* The conn params list only contains identity addresses */
3651 if (!hci_is_identity_address(addr, addr_type))
3652 return NULL;
3653
Andre Guedes15819a72014-02-03 13:56:18 -03003654 list_for_each_entry(params, &hdev->le_conn_params, list) {
3655 if (bacmp(&params->addr, addr) == 0 &&
3656 params->addr_type == addr_type) {
3657 return params;
3658 }
3659 }
3660
3661 return NULL;
3662}
3663
Andre Guedescef952c2014-02-26 20:21:49 -03003664static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3665{
3666 struct hci_conn *conn;
3667
3668 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3669 if (!conn)
3670 return false;
3671
3672 if (conn->dst_type != type)
3673 return false;
3674
3675 if (conn->state != BT_CONNECTED)
3676 return false;
3677
3678 return true;
3679}
3680
Andre Guedes15819a72014-02-03 13:56:18 -03003681/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003682struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3683 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003684{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003685 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003686
Johan Hedberg738f6182014-07-03 19:33:51 +03003687 /* The list only contains identity addresses */
3688 if (!hci_is_identity_address(addr, addr_type))
3689 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003690
Johan Hedberg501f8822014-07-04 12:37:26 +03003691 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003692 if (bacmp(&param->addr, addr) == 0 &&
3693 param->addr_type == addr_type)
3694 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003695 }
3696
3697 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003698}
3699
3700/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003701struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3702 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003703{
3704 struct hci_conn_params *params;
3705
Johan Hedbergc46245b2014-07-02 17:37:33 +03003706 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003707 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003708
Andre Guedes15819a72014-02-03 13:56:18 -03003709 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003710 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003711 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003712
3713 params = kzalloc(sizeof(*params), GFP_KERNEL);
3714 if (!params) {
3715 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003716 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003717 }
3718
3719 bacpy(&params->addr, addr);
3720 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003721
3722 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003723 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003724
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003725 params->conn_min_interval = hdev->le_conn_min_interval;
3726 params->conn_max_interval = hdev->le_conn_max_interval;
3727 params->conn_latency = hdev->le_conn_latency;
3728 params->supervision_timeout = hdev->le_supv_timeout;
3729 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3730
3731 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3732
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003733 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003734}
3735
3736/* This function requires the caller holds hdev->lock */
3737int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003738 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003739{
3740 struct hci_conn_params *params;
3741
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003742 params = hci_conn_params_add(hdev, addr, addr_type);
3743 if (!params)
3744 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003745
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003746 if (params->auto_connect == auto_connect)
3747 return 0;
3748
Johan Hedberg95305ba2014-07-04 12:37:21 +03003749 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003750
Andre Guedescef952c2014-02-26 20:21:49 -03003751 switch (auto_connect) {
3752 case HCI_AUTO_CONN_DISABLED:
3753 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003754 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003755 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003756 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003757 list_add(&params->action, &hdev->pend_le_reports);
3758 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003759 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003760 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003761 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003762 if (!is_connected(hdev, addr, addr_type)) {
3763 list_add(&params->action, &hdev->pend_le_conns);
3764 hci_update_background_scan(hdev);
3765 }
Andre Guedescef952c2014-02-26 20:21:49 -03003766 break;
3767 }
Andre Guedes15819a72014-02-03 13:56:18 -03003768
Johan Hedberg851efca2014-07-02 22:42:00 +03003769 params->auto_connect = auto_connect;
3770
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003771 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3772 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003773
3774 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003775}
3776
Johan Hedbergf6c63242014-08-15 21:06:59 +03003777static void hci_conn_params_free(struct hci_conn_params *params)
3778{
3779 if (params->conn) {
3780 hci_conn_drop(params->conn);
3781 hci_conn_put(params->conn);
3782 }
3783
3784 list_del(&params->action);
3785 list_del(&params->list);
3786 kfree(params);
3787}
3788
Andre Guedes15819a72014-02-03 13:56:18 -03003789/* This function requires the caller holds hdev->lock */
3790void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3791{
3792 struct hci_conn_params *params;
3793
3794 params = hci_conn_params_lookup(hdev, addr, addr_type);
3795 if (!params)
3796 return;
3797
Johan Hedbergf6c63242014-08-15 21:06:59 +03003798 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003799
Johan Hedberg95305ba2014-07-04 12:37:21 +03003800 hci_update_background_scan(hdev);
3801
Andre Guedes15819a72014-02-03 13:56:18 -03003802 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3803}
3804
3805/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003806void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003807{
3808 struct hci_conn_params *params, *tmp;
3809
3810 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003811 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3812 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003813 list_del(&params->list);
3814 kfree(params);
3815 }
3816
Johan Hedberg55af49a82014-07-02 17:37:26 +03003817 BT_DBG("All LE disabled connection parameters were removed");
3818}
3819
3820/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003821void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003822{
3823 struct hci_conn_params *params, *tmp;
3824
Johan Hedbergf6c63242014-08-15 21:06:59 +03003825 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3826 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003827
Johan Hedberga2f41a82014-07-04 12:37:19 +03003828 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003829
Andre Guedes15819a72014-02-03 13:56:18 -03003830 BT_DBG("All LE connection parameters were removed");
3831}
3832
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003833static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003834{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003835 if (status) {
3836 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003837
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003838 hci_dev_lock(hdev);
3839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3840 hci_dev_unlock(hdev);
3841 return;
3842 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003843}
3844
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003845static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003846{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003847 /* General inquiry access code (GIAC) */
3848 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3849 struct hci_request req;
3850 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003851 int err;
3852
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003853 if (status) {
3854 BT_ERR("Failed to disable LE scanning: status %d", status);
3855 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003856 }
3857
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003858 switch (hdev->discovery.type) {
3859 case DISCOV_TYPE_LE:
3860 hci_dev_lock(hdev);
3861 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3862 hci_dev_unlock(hdev);
3863 break;
3864
3865 case DISCOV_TYPE_INTERLEAVED:
3866 hci_req_init(&req, hdev);
3867
3868 memset(&cp, 0, sizeof(cp));
3869 memcpy(&cp.lap, lap, sizeof(cp.lap));
3870 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3871 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3872
3873 hci_dev_lock(hdev);
3874
3875 hci_inquiry_cache_flush(hdev);
3876
3877 err = hci_req_run(&req, inquiry_complete);
3878 if (err) {
3879 BT_ERR("Inquiry request failed: err %d", err);
3880 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3881 }
3882
3883 hci_dev_unlock(hdev);
3884 break;
3885 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003886}
3887
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003888static void le_scan_disable_work(struct work_struct *work)
3889{
3890 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003891 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003892 struct hci_request req;
3893 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003894
3895 BT_DBG("%s", hdev->name);
3896
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003897 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003898
Andre Guedesb1efcc22014-02-26 20:21:40 -03003899 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003900
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003901 err = hci_req_run(&req, le_scan_disable_work_complete);
3902 if (err)
3903 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003904}
3905
Johan Hedberg8d972502014-02-28 12:54:14 +02003906static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3907{
3908 struct hci_dev *hdev = req->hdev;
3909
3910 /* If we're advertising or initiating an LE connection we can't
3911 * go ahead and change the random address at this time. This is
3912 * because the eventual initiator address used for the
3913 * subsequently created connection will be undefined (some
3914 * controllers use the new address and others the one we had
3915 * when the operation started).
3916 *
3917 * In this kind of scenario skip the update and let the random
3918 * address be updated at the next cycle.
3919 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003920 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003921 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3922 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003923 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003924 return;
3925 }
3926
3927 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3928}
3929
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003930int hci_update_random_address(struct hci_request *req, bool require_privacy,
3931 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003932{
3933 struct hci_dev *hdev = req->hdev;
3934 int err;
3935
3936 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003937 * current RPA has expired or there is something else than
3938 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003939 */
3940 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003941 int to;
3942
3943 *own_addr_type = ADDR_LE_DEV_RANDOM;
3944
3945 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003946 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003947 return 0;
3948
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003949 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003950 if (err < 0) {
3951 BT_ERR("%s failed to generate new RPA", hdev->name);
3952 return err;
3953 }
3954
Johan Hedberg8d972502014-02-28 12:54:14 +02003955 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003956
3957 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3958 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3959
3960 return 0;
3961 }
3962
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003963 /* In case of required privacy without resolvable private address,
Marcel Holtmann9437d2e2014-12-07 20:13:17 +01003964 * use an non-resolvable private address. This is useful for active
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003965 * scanning and non-connectable advertising.
3966 */
3967 if (require_privacy) {
Marcel Holtmann9437d2e2014-12-07 20:13:17 +01003968 bdaddr_t nrpa;
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003969
Marcel Holtmann9437d2e2014-12-07 20:13:17 +01003970 while (true) {
3971 /* The non-resolvable private address is generated
3972 * from random six bytes with the two most significant
3973 * bits cleared.
3974 */
3975 get_random_bytes(&nrpa, 6);
3976 nrpa.b[5] &= 0x3f;
3977
3978 /* The non-resolvable private address shall not be
3979 * equal to the public address.
3980 */
3981 if (bacmp(&hdev->bdaddr, &nrpa))
3982 break;
3983 }
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003984
3985 *own_addr_type = ADDR_LE_DEV_RANDOM;
Marcel Holtmann9437d2e2014-12-07 20:13:17 +01003986 set_random_addr(req, &nrpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003987 return 0;
3988 }
3989
Johan Hedbergebd3a742014-02-23 19:42:21 +02003990 /* If forcing static address is in use or there is no public
3991 * address use the static address as random address (but skip
3992 * the HCI command if the current random address is already the
3993 * static one.
3994 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003995 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003996 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3997 *own_addr_type = ADDR_LE_DEV_RANDOM;
3998 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3999 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4000 &hdev->static_addr);
4001 return 0;
4002 }
4003
4004 /* Neither privacy nor static address is being used so use a
4005 * public address.
4006 */
4007 *own_addr_type = ADDR_LE_DEV_PUBLIC;
4008
4009 return 0;
4010}
4011
Johan Hedberga1f4c312014-02-27 14:05:41 +02004012/* Copy the Identity Address of the controller.
4013 *
4014 * If the controller has a public BD_ADDR, then by default use that one.
4015 * If this is a LE only controller without a public address, default to
4016 * the static random address.
4017 *
4018 * For debugging purposes it is possible to force controllers with a
4019 * public address to use the static random address instead.
4020 */
4021void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4022 u8 *bdaddr_type)
4023{
Marcel Holtmann111902f2014-06-21 04:53:17 +02004024 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02004025 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4026 bacpy(bdaddr, &hdev->static_addr);
4027 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4028 } else {
4029 bacpy(bdaddr, &hdev->bdaddr);
4030 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4031 }
4032}
4033
David Herrmann9be0dab2012-04-22 14:39:57 +02004034/* Alloc HCI device */
4035struct hci_dev *hci_alloc_dev(void)
4036{
4037 struct hci_dev *hdev;
4038
Johan Hedberg27f70f32014-07-21 10:50:06 +03004039 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02004040 if (!hdev)
4041 return NULL;
4042
David Herrmannb1b813d2012-04-22 14:39:58 +02004043 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4044 hdev->esco_type = (ESCO_HV1);
4045 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004046 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4047 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004048 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004049 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4050 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004051
David Herrmannb1b813d2012-04-22 14:39:58 +02004052 hdev->sniff_max_interval = 800;
4053 hdev->sniff_min_interval = 80;
4054
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004055 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004056 hdev->le_adv_min_interval = 0x0800;
4057 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004058 hdev->le_scan_interval = 0x0060;
4059 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004060 hdev->le_conn_min_interval = 0x0028;
4061 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004062 hdev->le_conn_latency = 0x0000;
4063 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004064
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004065 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004066 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004067 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4068 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004069
David Herrmannb1b813d2012-04-22 14:39:58 +02004070 mutex_init(&hdev->lock);
4071 mutex_init(&hdev->req_lock);
4072
4073 INIT_LIST_HEAD(&hdev->mgmt_pending);
4074 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004075 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004076 INIT_LIST_HEAD(&hdev->uuids);
4077 INIT_LIST_HEAD(&hdev->link_keys);
4078 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004079 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004080 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004081 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004082 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004083 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004084 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004085 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004086
4087 INIT_WORK(&hdev->rx_work, hci_rx_work);
4088 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4089 INIT_WORK(&hdev->tx_work, hci_tx_work);
4090 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004091
David Herrmannb1b813d2012-04-22 14:39:58 +02004092 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4093 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4094 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4095
David Herrmannb1b813d2012-04-22 14:39:58 +02004096 skb_queue_head_init(&hdev->rx_q);
4097 skb_queue_head_init(&hdev->cmd_q);
4098 skb_queue_head_init(&hdev->raw_q);
4099
4100 init_waitqueue_head(&hdev->req_wait_q);
4101
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004102 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004103
David Herrmannb1b813d2012-04-22 14:39:58 +02004104 hci_init_sysfs(hdev);
4105 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004106
4107 return hdev;
4108}
4109EXPORT_SYMBOL(hci_alloc_dev);
4110
4111/* Free HCI device */
4112void hci_free_dev(struct hci_dev *hdev)
4113{
David Herrmann9be0dab2012-04-22 14:39:57 +02004114 /* will free via device release */
4115 put_device(&hdev->dev);
4116}
4117EXPORT_SYMBOL(hci_free_dev);
4118
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119/* Register HCI device */
4120int hci_register_dev(struct hci_dev *hdev)
4121{
David Herrmannb1b813d2012-04-22 14:39:58 +02004122 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123
Marcel Holtmann74292d52014-07-06 15:50:27 +02004124 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 return -EINVAL;
4126
Mat Martineau08add512011-11-02 16:18:36 -07004127 /* Do not allow HCI_AMP devices to register at index 0,
4128 * so the index can be used as the AMP controller ID.
4129 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004130 switch (hdev->dev_type) {
4131 case HCI_BREDR:
4132 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4133 break;
4134 case HCI_AMP:
4135 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4136 break;
4137 default:
4138 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004140
Sasha Levin3df92b32012-05-27 22:36:56 +02004141 if (id < 0)
4142 return id;
4143
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 sprintf(hdev->name, "hci%d", id);
4145 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004146
4147 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4148
Kees Cookd8537542013-07-03 15:04:57 -07004149 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4150 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004151 if (!hdev->workqueue) {
4152 error = -ENOMEM;
4153 goto err;
4154 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004155
Kees Cookd8537542013-07-03 15:04:57 -07004156 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4157 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004158 if (!hdev->req_workqueue) {
4159 destroy_workqueue(hdev->workqueue);
4160 error = -ENOMEM;
4161 goto err;
4162 }
4163
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004164 if (!IS_ERR_OR_NULL(bt_debugfs))
4165 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4166
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004167 dev_set_name(&hdev->dev, "%s", hdev->name);
4168
4169 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004170 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004171 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004173 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004174 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4175 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004176 if (hdev->rfkill) {
4177 if (rfkill_register(hdev->rfkill) < 0) {
4178 rfkill_destroy(hdev->rfkill);
4179 hdev->rfkill = NULL;
4180 }
4181 }
4182
Johan Hedberg5e130362013-09-13 08:58:17 +03004183 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4184 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4185
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004186 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004187 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004188
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004189 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004190 /* Assume BR/EDR support until proven otherwise (such as
4191 * through reading supported features during init.
4192 */
4193 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4194 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004195
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004196 write_lock(&hci_dev_list_lock);
4197 list_add(&hdev->list, &hci_dev_list);
4198 write_unlock(&hci_dev_list_lock);
4199
Marcel Holtmann4a964402014-07-02 19:10:33 +02004200 /* Devices that are marked for raw-only usage are unconfigured
4201 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004202 */
4203 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004204 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004205
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004207 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208
Johan Hedberg19202572013-01-14 22:33:51 +02004209 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004210
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004212
David Herrmann33ca9542011-10-08 14:58:49 +02004213err_wqueue:
4214 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004215 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004216err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004217 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004218
David Herrmann33ca9542011-10-08 14:58:49 +02004219 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220}
4221EXPORT_SYMBOL(hci_register_dev);
4222
4223/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004224void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225{
Sasha Levin3df92b32012-05-27 22:36:56 +02004226 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004227
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004228 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Johan Hovold94324962012-03-15 14:48:41 +01004230 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4231
Sasha Levin3df92b32012-05-27 22:36:56 +02004232 id = hdev->id;
4233
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004234 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004236 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237
4238 hci_dev_do_close(hdev);
4239
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304240 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004241 kfree_skb(hdev->reassembly[i]);
4242
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004243 cancel_work_sync(&hdev->power_on);
4244
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004245 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004246 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4247 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004248 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004249 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004250 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004251 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004252
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004253 /* mgmt_index_removed should take care of emptying the
4254 * pending list */
4255 BUG_ON(!list_empty(&hdev->mgmt_pending));
4256
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 hci_notify(hdev, HCI_DEV_UNREG);
4258
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004259 if (hdev->rfkill) {
4260 rfkill_unregister(hdev->rfkill);
4261 rfkill_destroy(hdev->rfkill);
4262 }
4263
Johan Hedberg711eafe2014-08-08 09:32:52 +03004264 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004265
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004266 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004267
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004268 debugfs_remove_recursive(hdev->debugfs);
4269
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004270 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004271 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004272
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004273 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004274 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004275 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004276 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004277 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004278 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004279 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004280 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004281 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004282 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01004283 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004284 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004285
David Herrmanndc946bd2012-01-07 15:47:24 +01004286 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004287
4288 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289}
4290EXPORT_SYMBOL(hci_unregister_dev);
4291
4292/* Suspend HCI device */
4293int hci_suspend_dev(struct hci_dev *hdev)
4294{
4295 hci_notify(hdev, HCI_DEV_SUSPEND);
4296 return 0;
4297}
4298EXPORT_SYMBOL(hci_suspend_dev);
4299
4300/* Resume HCI device */
4301int hci_resume_dev(struct hci_dev *hdev)
4302{
4303 hci_notify(hdev, HCI_DEV_RESUME);
4304 return 0;
4305}
4306EXPORT_SYMBOL(hci_resume_dev);
4307
Marcel Holtmann75e05692014-11-02 08:15:38 +01004308/* Reset HCI device */
4309int hci_reset_dev(struct hci_dev *hdev)
4310{
4311 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4312 struct sk_buff *skb;
4313
4314 skb = bt_skb_alloc(3, GFP_ATOMIC);
4315 if (!skb)
4316 return -ENOMEM;
4317
4318 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4319 memcpy(skb_put(skb, 3), hw_err, 3);
4320
4321 /* Send Hardware Error to upper stack */
4322 return hci_recv_frame(hdev, skb);
4323}
4324EXPORT_SYMBOL(hci_reset_dev);
4325
Marcel Holtmann76bca882009-11-18 00:40:39 +01004326/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004327int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004328{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004329 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004330 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004331 kfree_skb(skb);
4332 return -ENXIO;
4333 }
4334
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004335 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004336 bt_cb(skb)->incoming = 1;
4337
4338 /* Time stamp */
4339 __net_timestamp(skb);
4340
Marcel Holtmann76bca882009-11-18 00:40:39 +01004341 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004342 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004343
Marcel Holtmann76bca882009-11-18 00:40:39 +01004344 return 0;
4345}
4346EXPORT_SYMBOL(hci_recv_frame);
4347
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304348static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004349 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304350{
4351 int len = 0;
4352 int hlen = 0;
4353 int remain = count;
4354 struct sk_buff *skb;
4355 struct bt_skb_cb *scb;
4356
4357 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004358 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304359 return -EILSEQ;
4360
4361 skb = hdev->reassembly[index];
4362
4363 if (!skb) {
4364 switch (type) {
4365 case HCI_ACLDATA_PKT:
4366 len = HCI_MAX_FRAME_SIZE;
4367 hlen = HCI_ACL_HDR_SIZE;
4368 break;
4369 case HCI_EVENT_PKT:
4370 len = HCI_MAX_EVENT_SIZE;
4371 hlen = HCI_EVENT_HDR_SIZE;
4372 break;
4373 case HCI_SCODATA_PKT:
4374 len = HCI_MAX_SCO_SIZE;
4375 hlen = HCI_SCO_HDR_SIZE;
4376 break;
4377 }
4378
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004379 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304380 if (!skb)
4381 return -ENOMEM;
4382
4383 scb = (void *) skb->cb;
4384 scb->expect = hlen;
4385 scb->pkt_type = type;
4386
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304387 hdev->reassembly[index] = skb;
4388 }
4389
4390 while (count) {
4391 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004392 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304393
4394 memcpy(skb_put(skb, len), data, len);
4395
4396 count -= len;
4397 data += len;
4398 scb->expect -= len;
4399 remain = count;
4400
4401 switch (type) {
4402 case HCI_EVENT_PKT:
4403 if (skb->len == HCI_EVENT_HDR_SIZE) {
4404 struct hci_event_hdr *h = hci_event_hdr(skb);
4405 scb->expect = h->plen;
4406
4407 if (skb_tailroom(skb) < scb->expect) {
4408 kfree_skb(skb);
4409 hdev->reassembly[index] = NULL;
4410 return -ENOMEM;
4411 }
4412 }
4413 break;
4414
4415 case HCI_ACLDATA_PKT:
4416 if (skb->len == HCI_ACL_HDR_SIZE) {
4417 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4418 scb->expect = __le16_to_cpu(h->dlen);
4419
4420 if (skb_tailroom(skb) < scb->expect) {
4421 kfree_skb(skb);
4422 hdev->reassembly[index] = NULL;
4423 return -ENOMEM;
4424 }
4425 }
4426 break;
4427
4428 case HCI_SCODATA_PKT:
4429 if (skb->len == HCI_SCO_HDR_SIZE) {
4430 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4431 scb->expect = h->dlen;
4432
4433 if (skb_tailroom(skb) < scb->expect) {
4434 kfree_skb(skb);
4435 hdev->reassembly[index] = NULL;
4436 return -ENOMEM;
4437 }
4438 }
4439 break;
4440 }
4441
4442 if (scb->expect == 0) {
4443 /* Complete frame */
4444
4445 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004446 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304447
4448 hdev->reassembly[index] = NULL;
4449 return remain;
4450 }
4451 }
4452
4453 return remain;
4454}
4455
Suraj Sumangala99811512010-07-14 13:02:19 +05304456#define STREAM_REASSEMBLY 0
4457
4458int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4459{
4460 int type;
4461 int rem = 0;
4462
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004463 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304464 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4465
4466 if (!skb) {
4467 struct { char type; } *pkt;
4468
4469 /* Start of the frame */
4470 pkt = data;
4471 type = pkt->type;
4472
4473 data++;
4474 count--;
4475 } else
4476 type = bt_cb(skb)->pkt_type;
4477
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004478 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004479 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304480 if (rem < 0)
4481 return rem;
4482
4483 data += (count - rem);
4484 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004485 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304486
4487 return rem;
4488}
4489EXPORT_SYMBOL(hci_recv_stream_fragment);
4490
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491/* ---- Interface to upper protocols ---- */
4492
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493int hci_register_cb(struct hci_cb *cb)
4494{
4495 BT_DBG("%p name %s", cb, cb->name);
4496
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004497 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004499 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
4501 return 0;
4502}
4503EXPORT_SYMBOL(hci_register_cb);
4504
4505int hci_unregister_cb(struct hci_cb *cb)
4506{
4507 BT_DBG("%p name %s", cb, cb->name);
4508
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004509 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004511 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512
4513 return 0;
4514}
4515EXPORT_SYMBOL(hci_unregister_cb);
4516
Marcel Holtmann51086992013-10-10 14:54:19 -07004517static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004519 int err;
4520
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004521 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004523 /* Time stamp */
4524 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004526 /* Send copy to monitor */
4527 hci_send_to_monitor(hdev, skb);
4528
4529 if (atomic_read(&hdev->promisc)) {
4530 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004531 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532 }
4533
4534 /* Get rid of skb owner, prior to sending to the driver. */
4535 skb_orphan(skb);
4536
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004537 err = hdev->send(hdev, skb);
4538 if (err < 0) {
4539 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4540 kfree_skb(skb);
4541 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542}
4543
Johan Hedberg3119ae92013-03-05 20:37:44 +02004544void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4545{
4546 skb_queue_head_init(&req->cmd_q);
4547 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004548 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004549}
4550
4551int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4552{
4553 struct hci_dev *hdev = req->hdev;
4554 struct sk_buff *skb;
4555 unsigned long flags;
4556
4557 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4558
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004559 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004560 * commands queued on the HCI request queue.
4561 */
4562 if (req->err) {
4563 skb_queue_purge(&req->cmd_q);
4564 return req->err;
4565 }
4566
Johan Hedberg3119ae92013-03-05 20:37:44 +02004567 /* Do not allow empty requests */
4568 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004569 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004570
4571 skb = skb_peek_tail(&req->cmd_q);
4572 bt_cb(skb)->req.complete = complete;
4573
4574 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4575 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4576 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4577
4578 queue_work(hdev->workqueue, &hdev->cmd_work);
4579
4580 return 0;
4581}
4582
Marcel Holtmann899de762014-07-11 05:51:58 +02004583bool hci_req_pending(struct hci_dev *hdev)
4584{
4585 return (hdev->req_status == HCI_REQ_PEND);
4586}
4587
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004588static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004589 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590{
4591 int len = HCI_COMMAND_HDR_SIZE + plen;
4592 struct hci_command_hdr *hdr;
4593 struct sk_buff *skb;
4594
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004596 if (!skb)
4597 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598
4599 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004600 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 hdr->plen = plen;
4602
4603 if (plen)
4604 memcpy(skb_put(skb, plen), param, plen);
4605
4606 BT_DBG("skb len %d", skb->len);
4607
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004608 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004609 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004610
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004611 return skb;
4612}
4613
4614/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004615int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4616 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004617{
4618 struct sk_buff *skb;
4619
4620 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4621
4622 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4623 if (!skb) {
4624 BT_ERR("%s no memory for command", hdev->name);
4625 return -ENOMEM;
4626 }
4627
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004628 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004629 * single-command requests.
4630 */
4631 bt_cb(skb)->req.start = true;
4632
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004634 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635
4636 return 0;
4637}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638
Johan Hedberg71c76a12013-03-05 20:37:46 +02004639/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004640void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4641 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004642{
4643 struct hci_dev *hdev = req->hdev;
4644 struct sk_buff *skb;
4645
4646 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4647
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004648 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004649 * queueing the HCI command. We can simply return.
4650 */
4651 if (req->err)
4652 return;
4653
Johan Hedberg71c76a12013-03-05 20:37:46 +02004654 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4655 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004656 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4657 hdev->name, opcode);
4658 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004659 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004660 }
4661
4662 if (skb_queue_empty(&req->cmd_q))
4663 bt_cb(skb)->req.start = true;
4664
Johan Hedberg02350a72013-04-03 21:50:29 +03004665 bt_cb(skb)->req.event = event;
4666
Johan Hedberg71c76a12013-03-05 20:37:46 +02004667 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004668}
4669
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004670void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4671 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004672{
4673 hci_req_add_ev(req, opcode, plen, param, 0);
4674}
4675
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004677void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678{
4679 struct hci_command_hdr *hdr;
4680
4681 if (!hdev->sent_cmd)
4682 return NULL;
4683
4684 hdr = (void *) hdev->sent_cmd->data;
4685
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004686 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 return NULL;
4688
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004689 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690
4691 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4692}
4693
4694/* Send ACL data */
4695static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4696{
4697 struct hci_acl_hdr *hdr;
4698 int len = skb->len;
4699
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004700 skb_push(skb, HCI_ACL_HDR_SIZE);
4701 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004702 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004703 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4704 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705}
4706
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004707static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004708 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004710 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 struct hci_dev *hdev = conn->hdev;
4712 struct sk_buff *list;
4713
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004714 skb->len = skb_headlen(skb);
4715 skb->data_len = 0;
4716
4717 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004718
4719 switch (hdev->dev_type) {
4720 case HCI_BREDR:
4721 hci_add_acl_hdr(skb, conn->handle, flags);
4722 break;
4723 case HCI_AMP:
4724 hci_add_acl_hdr(skb, chan->handle, flags);
4725 break;
4726 default:
4727 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4728 return;
4729 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004730
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004731 list = skb_shinfo(skb)->frag_list;
4732 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 /* Non fragmented */
4734 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4735
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004736 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 } else {
4738 /* Fragmented */
4739 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4740
4741 skb_shinfo(skb)->frag_list = NULL;
4742
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004743 /* Queue all fragments atomically. We need to use spin_lock_bh
4744 * here because of 6LoWPAN links, as there this function is
4745 * called from softirq and using normal spin lock could cause
4746 * deadlocks.
4747 */
4748 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004750 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004751
4752 flags &= ~ACL_START;
4753 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 do {
4755 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004756
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004757 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004758 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759
4760 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4761
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004762 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 } while (list);
4764
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004765 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004767}
4768
4769void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4770{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004771 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004772
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004773 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004774
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004775 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004777 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779
4780/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004781void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782{
4783 struct hci_dev *hdev = conn->hdev;
4784 struct hci_sco_hdr hdr;
4785
4786 BT_DBG("%s len %d", hdev->name, skb->len);
4787
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004788 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789 hdr.dlen = skb->len;
4790
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004791 skb_push(skb, HCI_SCO_HDR_SIZE);
4792 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004793 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004795 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004796
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004798 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800
4801/* ---- HCI TX task (outgoing data) ---- */
4802
4803/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004804static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4805 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806{
4807 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004808 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004809 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004811 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004813
4814 rcu_read_lock();
4815
4816 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004817 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004819
4820 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4821 continue;
4822
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 num++;
4824
4825 if (c->sent < min) {
4826 min = c->sent;
4827 conn = c;
4828 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004829
4830 if (hci_conn_num(hdev, type) == num)
4831 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832 }
4833
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004834 rcu_read_unlock();
4835
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004837 int cnt, q;
4838
4839 switch (conn->type) {
4840 case ACL_LINK:
4841 cnt = hdev->acl_cnt;
4842 break;
4843 case SCO_LINK:
4844 case ESCO_LINK:
4845 cnt = hdev->sco_cnt;
4846 break;
4847 case LE_LINK:
4848 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4849 break;
4850 default:
4851 cnt = 0;
4852 BT_ERR("Unknown link type");
4853 }
4854
4855 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 *quote = q ? q : 1;
4857 } else
4858 *quote = 0;
4859
4860 BT_DBG("conn %p quote %d", conn, *quote);
4861 return conn;
4862}
4863
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004864static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865{
4866 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004867 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868
Ville Tervobae1f5d92011-02-10 22:38:53 -03004869 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004871 rcu_read_lock();
4872
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004874 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004875 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004876 BT_ERR("%s killing stalled connection %pMR",
4877 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004878 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 }
4880 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004881
4882 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883}
4884
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004885static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4886 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004887{
4888 struct hci_conn_hash *h = &hdev->conn_hash;
4889 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004890 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004891 struct hci_conn *conn;
4892 int cnt, q, conn_num = 0;
4893
4894 BT_DBG("%s", hdev->name);
4895
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004896 rcu_read_lock();
4897
4898 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004899 struct hci_chan *tmp;
4900
4901 if (conn->type != type)
4902 continue;
4903
4904 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4905 continue;
4906
4907 conn_num++;
4908
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004909 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004910 struct sk_buff *skb;
4911
4912 if (skb_queue_empty(&tmp->data_q))
4913 continue;
4914
4915 skb = skb_peek(&tmp->data_q);
4916 if (skb->priority < cur_prio)
4917 continue;
4918
4919 if (skb->priority > cur_prio) {
4920 num = 0;
4921 min = ~0;
4922 cur_prio = skb->priority;
4923 }
4924
4925 num++;
4926
4927 if (conn->sent < min) {
4928 min = conn->sent;
4929 chan = tmp;
4930 }
4931 }
4932
4933 if (hci_conn_num(hdev, type) == conn_num)
4934 break;
4935 }
4936
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004937 rcu_read_unlock();
4938
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004939 if (!chan)
4940 return NULL;
4941
4942 switch (chan->conn->type) {
4943 case ACL_LINK:
4944 cnt = hdev->acl_cnt;
4945 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004946 case AMP_LINK:
4947 cnt = hdev->block_cnt;
4948 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004949 case SCO_LINK:
4950 case ESCO_LINK:
4951 cnt = hdev->sco_cnt;
4952 break;
4953 case LE_LINK:
4954 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4955 break;
4956 default:
4957 cnt = 0;
4958 BT_ERR("Unknown link type");
4959 }
4960
4961 q = cnt / num;
4962 *quote = q ? q : 1;
4963 BT_DBG("chan %p quote %d", chan, *quote);
4964 return chan;
4965}
4966
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004967static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4968{
4969 struct hci_conn_hash *h = &hdev->conn_hash;
4970 struct hci_conn *conn;
4971 int num = 0;
4972
4973 BT_DBG("%s", hdev->name);
4974
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004975 rcu_read_lock();
4976
4977 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004978 struct hci_chan *chan;
4979
4980 if (conn->type != type)
4981 continue;
4982
4983 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4984 continue;
4985
4986 num++;
4987
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004988 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004989 struct sk_buff *skb;
4990
4991 if (chan->sent) {
4992 chan->sent = 0;
4993 continue;
4994 }
4995
4996 if (skb_queue_empty(&chan->data_q))
4997 continue;
4998
4999 skb = skb_peek(&chan->data_q);
5000 if (skb->priority >= HCI_PRIO_MAX - 1)
5001 continue;
5002
5003 skb->priority = HCI_PRIO_MAX - 1;
5004
5005 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005006 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005007 }
5008
5009 if (hci_conn_num(hdev, type) == num)
5010 break;
5011 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02005012
5013 rcu_read_unlock();
5014
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005015}
5016
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005017static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
5018{
5019 /* Calculate count of blocks used by this packet */
5020 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
5021}
5022
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005023static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024{
Marcel Holtmann4a964402014-07-02 19:10:33 +02005025 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 /* ACL tx timeout must be longer than maximum
5027 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005028 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005029 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005030 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005031 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005032}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005034static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005035{
5036 unsigned int cnt = hdev->acl_cnt;
5037 struct hci_chan *chan;
5038 struct sk_buff *skb;
5039 int quote;
5040
5041 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005042
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005043 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005044 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005045 u32 priority = (skb_peek(&chan->data_q))->priority;
5046 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005047 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005048 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005049
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005050 /* Stop if priority has changed */
5051 if (skb->priority < priority)
5052 break;
5053
5054 skb = skb_dequeue(&chan->data_q);
5055
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005056 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005057 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005058
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005059 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 hdev->acl_last_tx = jiffies;
5061
5062 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005063 chan->sent++;
5064 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065 }
5066 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005067
5068 if (cnt != hdev->acl_cnt)
5069 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070}
5071
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005072static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005073{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005074 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005075 struct hci_chan *chan;
5076 struct sk_buff *skb;
5077 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005078 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005079
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005080 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005081
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005082 BT_DBG("%s", hdev->name);
5083
5084 if (hdev->dev_type == HCI_AMP)
5085 type = AMP_LINK;
5086 else
5087 type = ACL_LINK;
5088
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005089 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005090 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005091 u32 priority = (skb_peek(&chan->data_q))->priority;
5092 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5093 int blocks;
5094
5095 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005096 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005097
5098 /* Stop if priority has changed */
5099 if (skb->priority < priority)
5100 break;
5101
5102 skb = skb_dequeue(&chan->data_q);
5103
5104 blocks = __get_blocks(hdev, skb);
5105 if (blocks > hdev->block_cnt)
5106 return;
5107
5108 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005109 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005110
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005111 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005112 hdev->acl_last_tx = jiffies;
5113
5114 hdev->block_cnt -= blocks;
5115 quote -= blocks;
5116
5117 chan->sent += blocks;
5118 chan->conn->sent += blocks;
5119 }
5120 }
5121
5122 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005123 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005124}
5125
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005126static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005127{
5128 BT_DBG("%s", hdev->name);
5129
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005130 /* No ACL link over BR/EDR controller */
5131 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5132 return;
5133
5134 /* No AMP link over AMP controller */
5135 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005136 return;
5137
5138 switch (hdev->flow_ctl_mode) {
5139 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5140 hci_sched_acl_pkt(hdev);
5141 break;
5142
5143 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5144 hci_sched_acl_blk(hdev);
5145 break;
5146 }
5147}
5148
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005150static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151{
5152 struct hci_conn *conn;
5153 struct sk_buff *skb;
5154 int quote;
5155
5156 BT_DBG("%s", hdev->name);
5157
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005158 if (!hci_conn_num(hdev, SCO_LINK))
5159 return;
5160
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5162 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5163 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005164 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165
5166 conn->sent++;
5167 if (conn->sent == ~0)
5168 conn->sent = 0;
5169 }
5170 }
5171}
5172
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005173static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005174{
5175 struct hci_conn *conn;
5176 struct sk_buff *skb;
5177 int quote;
5178
5179 BT_DBG("%s", hdev->name);
5180
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005181 if (!hci_conn_num(hdev, ESCO_LINK))
5182 return;
5183
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005184 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5185 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005186 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5187 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005188 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005189
5190 conn->sent++;
5191 if (conn->sent == ~0)
5192 conn->sent = 0;
5193 }
5194 }
5195}
5196
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005197static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005198{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005199 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005200 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005201 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005202
5203 BT_DBG("%s", hdev->name);
5204
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005205 if (!hci_conn_num(hdev, LE_LINK))
5206 return;
5207
Marcel Holtmann4a964402014-07-02 19:10:33 +02005208 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005209 /* LE tx timeout must be longer than maximum
5210 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005211 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005212 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005213 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005214 }
5215
5216 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005217 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005218 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005219 u32 priority = (skb_peek(&chan->data_q))->priority;
5220 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005221 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005222 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005223
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005224 /* Stop if priority has changed */
5225 if (skb->priority < priority)
5226 break;
5227
5228 skb = skb_dequeue(&chan->data_q);
5229
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005230 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005231 hdev->le_last_tx = jiffies;
5232
5233 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005234 chan->sent++;
5235 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005236 }
5237 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005238
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005239 if (hdev->le_pkts)
5240 hdev->le_cnt = cnt;
5241 else
5242 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005243
5244 if (cnt != tmp)
5245 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005246}
5247
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005248static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005250 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 struct sk_buff *skb;
5252
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005253 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005254 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255
Marcel Holtmann52de5992013-09-03 18:08:38 -07005256 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5257 /* Schedule queues and send stuff to HCI driver */
5258 hci_sched_acl(hdev);
5259 hci_sched_sco(hdev);
5260 hci_sched_esco(hdev);
5261 hci_sched_le(hdev);
5262 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005263
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 /* Send next queued raw (unknown type) packet */
5265 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005266 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267}
5268
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005269/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270
5271/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005272static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273{
5274 struct hci_acl_hdr *hdr = (void *) skb->data;
5275 struct hci_conn *conn;
5276 __u16 handle, flags;
5277
5278 skb_pull(skb, HCI_ACL_HDR_SIZE);
5279
5280 handle = __le16_to_cpu(hdr->handle);
5281 flags = hci_flags(handle);
5282 handle = hci_handle(handle);
5283
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005284 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005285 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286
5287 hdev->stat.acl_rx++;
5288
5289 hci_dev_lock(hdev);
5290 conn = hci_conn_hash_lookup_handle(hdev, handle);
5291 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005292
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005294 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005295
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005297 l2cap_recv_acldata(conn, skb, flags);
5298 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005300 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005301 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 }
5303
5304 kfree_skb(skb);
5305}
5306
5307/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005308static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309{
5310 struct hci_sco_hdr *hdr = (void *) skb->data;
5311 struct hci_conn *conn;
5312 __u16 handle;
5313
5314 skb_pull(skb, HCI_SCO_HDR_SIZE);
5315
5316 handle = __le16_to_cpu(hdr->handle);
5317
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005318 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319
5320 hdev->stat.sco_rx++;
5321
5322 hci_dev_lock(hdev);
5323 conn = hci_conn_hash_lookup_handle(hdev, handle);
5324 hci_dev_unlock(hdev);
5325
5326 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005328 sco_recv_scodata(conn, skb);
5329 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005331 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005332 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 }
5334
5335 kfree_skb(skb);
5336}
5337
Johan Hedberg9238f362013-03-05 20:37:48 +02005338static bool hci_req_is_complete(struct hci_dev *hdev)
5339{
5340 struct sk_buff *skb;
5341
5342 skb = skb_peek(&hdev->cmd_q);
5343 if (!skb)
5344 return true;
5345
5346 return bt_cb(skb)->req.start;
5347}
5348
Johan Hedberg42c6b122013-03-05 20:37:49 +02005349static void hci_resend_last(struct hci_dev *hdev)
5350{
5351 struct hci_command_hdr *sent;
5352 struct sk_buff *skb;
5353 u16 opcode;
5354
5355 if (!hdev->sent_cmd)
5356 return;
5357
5358 sent = (void *) hdev->sent_cmd->data;
5359 opcode = __le16_to_cpu(sent->opcode);
5360 if (opcode == HCI_OP_RESET)
5361 return;
5362
5363 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5364 if (!skb)
5365 return;
5366
5367 skb_queue_head(&hdev->cmd_q, skb);
5368 queue_work(hdev->workqueue, &hdev->cmd_work);
5369}
5370
Johan Hedberg9238f362013-03-05 20:37:48 +02005371void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5372{
5373 hci_req_complete_t req_complete = NULL;
5374 struct sk_buff *skb;
5375 unsigned long flags;
5376
5377 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5378
Johan Hedberg42c6b122013-03-05 20:37:49 +02005379 /* If the completed command doesn't match the last one that was
5380 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005381 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005382 if (!hci_sent_cmd_data(hdev, opcode)) {
5383 /* Some CSR based controllers generate a spontaneous
5384 * reset complete event during init and any pending
5385 * command will never be completed. In such a case we
5386 * need to resend whatever was the last sent
5387 * command.
5388 */
5389 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5390 hci_resend_last(hdev);
5391
Johan Hedberg9238f362013-03-05 20:37:48 +02005392 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005393 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005394
5395 /* If the command succeeded and there's still more commands in
5396 * this request the request is not yet complete.
5397 */
5398 if (!status && !hci_req_is_complete(hdev))
5399 return;
5400
5401 /* If this was the last command in a request the complete
5402 * callback would be found in hdev->sent_cmd instead of the
5403 * command queue (hdev->cmd_q).
5404 */
5405 if (hdev->sent_cmd) {
5406 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005407
5408 if (req_complete) {
5409 /* We must set the complete callback to NULL to
5410 * avoid calling the callback more than once if
5411 * this function gets called again.
5412 */
5413 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5414
Johan Hedberg9238f362013-03-05 20:37:48 +02005415 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005416 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005417 }
5418
5419 /* Remove all pending commands belonging to this request */
5420 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5421 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5422 if (bt_cb(skb)->req.start) {
5423 __skb_queue_head(&hdev->cmd_q, skb);
5424 break;
5425 }
5426
5427 req_complete = bt_cb(skb)->req.complete;
5428 kfree_skb(skb);
5429 }
5430 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5431
5432call_complete:
5433 if (req_complete)
5434 req_complete(hdev, status);
5435}
5436
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005437static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005439 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 struct sk_buff *skb;
5441
5442 BT_DBG("%s", hdev->name);
5443
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005445 /* Send copy to monitor */
5446 hci_send_to_monitor(hdev, skb);
5447
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448 if (atomic_read(&hdev->promisc)) {
5449 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005450 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451 }
5452
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005453 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454 kfree_skb(skb);
5455 continue;
5456 }
5457
5458 if (test_bit(HCI_INIT, &hdev->flags)) {
5459 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005460 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461 case HCI_ACLDATA_PKT:
5462 case HCI_SCODATA_PKT:
5463 kfree_skb(skb);
5464 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 }
5467
5468 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005469 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005471 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 hci_event_packet(hdev, skb);
5473 break;
5474
5475 case HCI_ACLDATA_PKT:
5476 BT_DBG("%s ACL data packet", hdev->name);
5477 hci_acldata_packet(hdev, skb);
5478 break;
5479
5480 case HCI_SCODATA_PKT:
5481 BT_DBG("%s SCO data packet", hdev->name);
5482 hci_scodata_packet(hdev, skb);
5483 break;
5484
5485 default:
5486 kfree_skb(skb);
5487 break;
5488 }
5489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490}
5491
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005492static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005493{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005494 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495 struct sk_buff *skb;
5496
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005497 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5498 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499
Linus Torvalds1da177e2005-04-16 15:20:36 -07005500 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005501 if (atomic_read(&hdev->cmd_cnt)) {
5502 skb = skb_dequeue(&hdev->cmd_q);
5503 if (!skb)
5504 return;
5505
Wei Yongjun7585b972009-02-25 18:29:52 +08005506 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005507
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005508 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005509 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005510 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005511 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005512 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005513 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005514 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005515 schedule_delayed_work(&hdev->cmd_timer,
5516 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005517 } else {
5518 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005519 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520 }
5521 }
5522}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005523
5524void hci_req_add_le_scan_disable(struct hci_request *req)
5525{
5526 struct hci_cp_le_set_scan_enable cp;
5527
5528 memset(&cp, 0, sizeof(cp));
5529 cp.enable = LE_SCAN_DISABLE;
5530 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5531}
Andre Guedesa4790db2014-02-26 20:21:47 -03005532
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005533static void add_to_white_list(struct hci_request *req,
5534 struct hci_conn_params *params)
5535{
5536 struct hci_cp_le_add_to_white_list cp;
5537
5538 cp.bdaddr_type = params->addr_type;
5539 bacpy(&cp.bdaddr, &params->addr);
5540
5541 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5542}
5543
5544static u8 update_white_list(struct hci_request *req)
5545{
5546 struct hci_dev *hdev = req->hdev;
5547 struct hci_conn_params *params;
5548 struct bdaddr_list *b;
5549 uint8_t white_list_entries = 0;
5550
5551 /* Go through the current white list programmed into the
5552 * controller one by one and check if that address is still
5553 * in the list of pending connections or list of devices to
5554 * report. If not present in either list, then queue the
5555 * command to remove it from the controller.
5556 */
5557 list_for_each_entry(b, &hdev->le_white_list, list) {
5558 struct hci_cp_le_del_from_white_list cp;
5559
5560 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5561 &b->bdaddr, b->bdaddr_type) ||
5562 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5563 &b->bdaddr, b->bdaddr_type)) {
5564 white_list_entries++;
5565 continue;
5566 }
5567
5568 cp.bdaddr_type = b->bdaddr_type;
5569 bacpy(&cp.bdaddr, &b->bdaddr);
5570
5571 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5572 sizeof(cp), &cp);
5573 }
5574
5575 /* Since all no longer valid white list entries have been
5576 * removed, walk through the list of pending connections
5577 * and ensure that any new device gets programmed into
5578 * the controller.
5579 *
5580 * If the list of the devices is larger than the list of
5581 * available white list entries in the controller, then
5582 * just abort and return filer policy value to not use the
5583 * white list.
5584 */
5585 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5586 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5587 &params->addr, params->addr_type))
5588 continue;
5589
5590 if (white_list_entries >= hdev->le_white_list_size) {
5591 /* Select filter policy to accept all advertising */
5592 return 0x00;
5593 }
5594
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005595 if (hci_find_irk_by_addr(hdev, &params->addr,
5596 params->addr_type)) {
5597 /* White list can not be used with RPAs */
5598 return 0x00;
5599 }
5600
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005601 white_list_entries++;
5602 add_to_white_list(req, params);
5603 }
5604
5605 /* After adding all new pending connections, walk through
5606 * the list of pending reports and also add these to the
5607 * white list if there is still space.
5608 */
5609 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5610 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5611 &params->addr, params->addr_type))
5612 continue;
5613
5614 if (white_list_entries >= hdev->le_white_list_size) {
5615 /* Select filter policy to accept all advertising */
5616 return 0x00;
5617 }
5618
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005619 if (hci_find_irk_by_addr(hdev, &params->addr,
5620 params->addr_type)) {
5621 /* White list can not be used with RPAs */
5622 return 0x00;
5623 }
5624
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005625 white_list_entries++;
5626 add_to_white_list(req, params);
5627 }
5628
5629 /* Select filter policy to use white list */
5630 return 0x01;
5631}
5632
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005633void hci_req_add_le_passive_scan(struct hci_request *req)
5634{
5635 struct hci_cp_le_set_scan_param param_cp;
5636 struct hci_cp_le_set_scan_enable enable_cp;
5637 struct hci_dev *hdev = req->hdev;
5638 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005639 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005640
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005641 /* Set require_privacy to false since no SCAN_REQ are send
Marcel Holtmann9437d2e2014-12-07 20:13:17 +01005642 * during passive scanning. Not using an non-resolvable address
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005643 * here is important so that peer devices using direct
5644 * advertising with our address will be correctly reported
5645 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005646 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005647 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005648 return;
5649
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005650 /* Adding or removing entries from the white list must
5651 * happen before enabling scanning. The controller does
5652 * not allow white list modification while scanning.
5653 */
5654 filter_policy = update_white_list(req);
5655
Marcel Holtmann4efbb2c2014-12-05 16:20:14 +01005656 /* When the controller is using random resolvable addresses and
5657 * with that having LE privacy enabled, then controllers with
5658 * Extended Scanner Filter Policies support can now enable support
5659 * for handling directed advertising.
5660 *
5661 * So instead of using filter polices 0x00 (no whitelist)
5662 * and 0x01 (whitelist enabled) use the new filter policies
5663 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5664 */
5665 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5666 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5667 filter_policy |= 0x02;
5668
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005669 memset(&param_cp, 0, sizeof(param_cp));
5670 param_cp.type = LE_SCAN_PASSIVE;
5671 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5672 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5673 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005674 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005675 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5676 &param_cp);
5677
5678 memset(&enable_cp, 0, sizeof(enable_cp));
5679 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005680 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005681 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5682 &enable_cp);
5683}
5684
Andre Guedesa4790db2014-02-26 20:21:47 -03005685static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5686{
5687 if (status)
5688 BT_DBG("HCI request failed to update background scanning: "
5689 "status 0x%2.2x", status);
5690}
5691
5692/* This function controls the background scanning based on hdev->pend_le_conns
5693 * list. If there are pending LE connection we start the background scanning,
5694 * otherwise we stop it.
5695 *
5696 * This function requires the caller holds hdev->lock.
5697 */
5698void hci_update_background_scan(struct hci_dev *hdev)
5699{
Andre Guedesa4790db2014-02-26 20:21:47 -03005700 struct hci_request req;
5701 struct hci_conn *conn;
5702 int err;
5703
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005704 if (!test_bit(HCI_UP, &hdev->flags) ||
5705 test_bit(HCI_INIT, &hdev->flags) ||
5706 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005707 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005708 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005709 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005710 return;
5711
Johan Hedberga70f4b52014-07-07 15:19:50 +03005712 /* No point in doing scanning if LE support hasn't been enabled */
5713 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5714 return;
5715
Johan Hedbergae23ada2014-07-07 13:24:59 +03005716 /* If discovery is active don't interfere with it */
5717 if (hdev->discovery.state != DISCOVERY_STOPPED)
5718 return;
5719
Marcel Holtmannee3c3ca2014-12-05 11:45:23 +01005720 /* Reset RSSI and UUID filters when starting background scanning
5721 * since these filters are meant for service discovery only.
5722 *
5723 * The Start Discovery and Start Service Discovery operations
5724 * ensure to set proper values for RSSI threshold and UUID
5725 * filter list. So it is safe to just reset them here.
5726 */
5727 hci_discovery_filter_clear(hdev);
5728
Andre Guedesa4790db2014-02-26 20:21:47 -03005729 hci_req_init(&req, hdev);
5730
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005731 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005732 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005733 /* If there is no pending LE connections or devices
5734 * to be scanned for, we should stop the background
5735 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005736 */
5737
5738 /* If controller is not scanning we are done. */
5739 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5740 return;
5741
5742 hci_req_add_le_scan_disable(&req);
5743
5744 BT_DBG("%s stopping background scanning", hdev->name);
5745 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005746 /* If there is at least one pending LE connection, we should
5747 * keep the background scan running.
5748 */
5749
Andre Guedesa4790db2014-02-26 20:21:47 -03005750 /* If controller is connecting, we should not start scanning
5751 * since some controllers are not able to scan and connect at
5752 * the same time.
5753 */
5754 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5755 if (conn)
5756 return;
5757
Andre Guedes4340a122014-03-10 18:26:24 -03005758 /* If controller is currently scanning, we stop it to ensure we
5759 * don't miss any advertising (due to duplicates filter).
5760 */
5761 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5762 hci_req_add_le_scan_disable(&req);
5763
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005764 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005765
5766 BT_DBG("%s starting background scanning", hdev->name);
5767 }
5768
5769 err = hci_req_run(&req, update_background_scan_complete);
5770 if (err)
5771 BT_ERR("Failed to run HCI request: err %d", err);
5772}
Johan Hedberg432df052014-08-01 11:13:31 +03005773
Johan Hedberg22f433d2014-08-01 11:13:32 +03005774static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5775{
5776 struct bdaddr_list *b;
5777
5778 list_for_each_entry(b, &hdev->whitelist, list) {
5779 struct hci_conn *conn;
5780
5781 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5782 if (!conn)
5783 return true;
5784
5785 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5786 return true;
5787 }
5788
5789 return false;
5790}
5791
Johan Hedberg432df052014-08-01 11:13:31 +03005792void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5793{
5794 u8 scan;
5795
5796 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5797 return;
5798
5799 if (!hdev_is_powered(hdev))
5800 return;
5801
5802 if (mgmt_powering_down(hdev))
5803 return;
5804
5805 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005806 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005807 scan = SCAN_PAGE;
5808 else
5809 scan = SCAN_DISABLED;
5810
5811 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5812 return;
5813
5814 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5815 scan |= SCAN_INQUIRY;
5816
5817 if (req)
5818 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5819 else
5820 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5821}