blob: f0018562b028170247f036a7b190368e31d08ea2 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Johan Hedberg858cdc72014-10-16 10:45:31 +0200409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700478 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700537 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700565 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
Marcel Holtmannac345812014-02-23 12:44:25 -0800641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200644 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800650
Johan Hedberga1f4c312014-02-27 14:05:41 +0200651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800652 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700720{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700747 return -EINVAL;
748
Marcel Holtmann111902f2014-06-21 04:53:17 +0200749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800750 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700751
Marcel Holtmann111902f2014-06-21 04:53:17 +0200752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800753
754 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700755}
756
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
Marcel Holtmann92202182013-10-18 16:38:10 -0700763
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
Marcel Holtmann3698d702014-02-18 21:54:49 -0800789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200792 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800793
Johan Hedbergadae20c2014-11-13 14:37:48 +0200794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200800 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200821 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700822
Johan Hedberg970d0f12014-11-13 14:37:47 +0200823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200829 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700854 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700882 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
Marcel Holtmannf1649572014-06-30 12:34:38 +0200930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
Georg Lukas729a1052014-07-26 13:59:58 +0200986static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200987{
Georg Lukas729a1052014-07-26 13:59:58 +0200988 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200989
Georg Lukas729a1052014-07-26 13:59:58 +0200990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200991 return -EINVAL;
992
Andre Guedes7d474e02014-02-26 20:21:54 -0300993 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
Georg Lukas729a1052014-07-26 13:59:58 +02001000static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001001{
Georg Lukas729a1052014-07-26 13:59:58 +02001002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009}
1010
Georg Lukas729a1052014-07-26 13:59:58 +02001011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001015{
Georg Lukas729a1052014-07-26 13:59:58 +02001016 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017
Georg Lukas729a1052014-07-26 13:59:58 +02001018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return -EINVAL;
1020
Georg Lukas729a1052014-07-26 13:59:58 +02001021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001024
Georg Lukas729a1052014-07-26 13:59:58 +02001025 return 0;
1026}
Andre Guedes7d474e02014-02-26 20:21:54 -03001027
Georg Lukas729a1052014-07-26 13:59:58 +02001028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001031
Georg Lukas729a1052014-07-26 13:59:58 +02001032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035
Georg Lukas729a1052014-07-26 13:59:58 +02001036 return 0;
1037}
Andre Guedes7d474e02014-02-26 20:21:54 -03001038
Georg Lukas729a1052014-07-26 13:59:58 +02001039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001041
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001042static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001043{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001045 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001046 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001047
Andre Guedes7d474e02014-02-26 20:21:54 -03001048 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001053 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001056
Andre Guedes7d474e02014-02-26 20:21:54 -03001057 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001058}
1059
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001060static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001061{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001062 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001063}
1064
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001067 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* ---- HCI requests ---- */
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
Fengguang Wu77a63e02013-04-20 16:24:31 +03001096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
Johan Hedberg75e84b72013-04-02 13:35:04 +03001127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001152 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163
1164 hdev->req_status = HCI_REQ_PEND;
1165
Johan Hedberg75e84b72013-04-02 13:35:04 +03001166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
Chan-yeol Park039fada2014-10-31 14:23:06 +09001169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001172 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001173 return ERR_PTR(err);
1174 }
1175
Johan Hedberg75e84b72013-04-02 13:35:04 +03001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001209 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_init(&req, hdev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hdev->req_status = HCI_REQ_PEND;
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001232
Chan-yeol Park039fada2014-10-31 14:23:06 +09001233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
Chan-yeol Park039fada2014-10-31 14:23:06 +09001240 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001241 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001242
Andre Guedes920c8302013-03-08 11:20:15 -03001243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 */
Andre Guedes920c8302013-03-08 11:20:15 -03001248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001252 }
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
1390 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402
1403 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001404 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001406
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001414}
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001418 struct hci_dev *hdev = req->hdev;
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
1423 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001434
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438}
1439
1440static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441{
1442 if (lmp_ext_inq_capable(hdev))
1443 return 0x02;
1444
1445 if (lmp_inq_rssi_capable(hdev))
1446 return 0x01;
1447
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1450 return 0x01;
1451
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454 return 0x01;
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456 return 0x01;
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458 return 0x01;
1459 }
1460
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1463 return 0x01;
1464
1465 return 0x00;
1466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
1470 u8 mode;
1471
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475}
1476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 struct hci_dev *hdev = req->hdev;
1480
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1484 */
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1489 */
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001499 } else {
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001509
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552}
1553
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 struct hci_dev *hdev = req->hdev;
1557
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001560 else
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1568 */
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571
1572 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1578 */
1579 hdev->max_page = 0x01;
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 } else {
1586 struct hci_cp_write_eir cp;
1587
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1590
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001592 }
1593 }
1594
1595 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597
1598 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001600
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1603
1604 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607 }
1608
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613 }
1614}
1615
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001617{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1621
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1630
1631 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633}
1634
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001636{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001638 struct hci_cp_write_le_host_supported cp;
1639
Johan Hedbergc73eee92013-04-19 18:35:21 +03001640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1642 return;
1643
Johan Hedberg2177bab2013-03-05 20:37:43 +02001644 memset(&cp, 0, sizeof(cp));
1645
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001648 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001649 }
1650
1651 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001654}
1655
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656static void hci_set_event_mask_page_2(struct hci_request *req)
1657{
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1663 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001664 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1669 }
1670
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1673 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001674 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1679 }
1680
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001681 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001683 events[2] |= 0x80;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001689{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001690 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001691 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001692
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001693 hci_setup_event_mask(req);
1694
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1698 *
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001703 *
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001707 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001710 struct hci_cp_delete_stored_link_key cp;
1711
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715 sizeof(cp), &cp);
1716 }
1717
Johan Hedberg2177bab2013-03-05 20:37:43 +02001718 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001720
Andre Guedes9193c6e2014-07-01 18:10:09 -03001721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
Andre Guedes9193c6e2014-07-01 18:10:09 -03001738 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1739 events);
1740
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001741 if (hdev->commands[25] & 0x40) {
1742 /* Read LE Advertising Channel TX Power */
1743 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1744 }
1745
Johan Hedberg42c6b122013-03-05 20:37:49 +02001746 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001747 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001748
1749 /* Read features beyond page 1 if available */
1750 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1751 struct hci_cp_read_local_ext_features cp;
1752
1753 cp.page = p;
1754 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1755 sizeof(cp), &cp);
1756 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001757}
1758
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001759static void hci_init4_req(struct hci_request *req, unsigned long opt)
1760{
1761 struct hci_dev *hdev = req->hdev;
1762
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001763 /* Set event mask page 2 if the HCI command for it is supported */
1764 if (hdev->commands[22] & 0x04)
1765 hci_set_event_mask_page_2(req);
1766
Marcel Holtmann109e3192014-07-23 19:24:56 +02001767 /* Read local codec list if the HCI command is supported */
1768 if (hdev->commands[29] & 0x20)
1769 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1770
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001771 /* Get MWS transport configuration if the HCI command is supported */
1772 if (hdev->commands[30] & 0x08)
1773 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1774
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001775 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001776 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001777 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001778
1779 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001780 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001781 u8 support = 0x01;
1782 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1783 sizeof(support), &support);
1784 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001785}
1786
Johan Hedberg2177bab2013-03-05 20:37:43 +02001787static int __hci_init(struct hci_dev *hdev)
1788{
1789 int err;
1790
1791 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1792 if (err < 0)
1793 return err;
1794
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001795 /* The Device Under Test (DUT) mode is special and available for
1796 * all controller types. So just create it early on.
1797 */
1798 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1799 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1800 &dut_mode_fops);
1801 }
1802
Johan Hedberg2177bab2013-03-05 20:37:43 +02001803 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1804 * BR/EDR/LE type controllers. AMP controllers only need the
1805 * first stage init.
1806 */
1807 if (hdev->dev_type != HCI_BREDR)
1808 return 0;
1809
1810 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1811 if (err < 0)
1812 return err;
1813
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001814 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001818 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1819 if (err < 0)
1820 return err;
1821
1822 /* Only create debugfs entries during the initial setup
1823 * phase and not every time the controller gets powered on.
1824 */
1825 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1826 return 0;
1827
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001828 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1829 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001830 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1831 &hdev->manufacturer);
1832 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1833 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001834 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1835 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001836 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1837 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001838 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1839
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001840 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1841 &conn_info_min_age_fops);
1842 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1843 &conn_info_max_age_fops);
1844
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001845 if (lmp_bredr_capable(hdev)) {
1846 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1847 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001848 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1849 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001850 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1851 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001852 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1853 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001854 }
1855
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001856 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001857 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1858 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001859 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1860 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001861 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1862 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001863 if (lmp_le_capable(hdev))
1864 debugfs_create_file("force_lesc_support", 0644,
1865 hdev->debugfs, hdev,
1866 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001867 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001868
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001869 if (lmp_sniff_capable(hdev)) {
1870 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1871 hdev, &idle_timeout_fops);
1872 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1873 hdev, &sniff_min_interval_fops);
1874 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1875 hdev, &sniff_max_interval_fops);
1876 }
1877
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001878 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001879 debugfs_create_file("identity", 0400, hdev->debugfs,
1880 hdev, &identity_fops);
1881 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1882 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001883 debugfs_create_file("random_address", 0444, hdev->debugfs,
1884 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001885 debugfs_create_file("static_address", 0444, hdev->debugfs,
1886 hdev, &static_address_fops);
1887
1888 /* For controllers with a public address, provide a debug
1889 * option to force the usage of the configured static
1890 * address. By default the public address is used.
1891 */
1892 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1893 debugfs_create_file("force_static_address", 0644,
1894 hdev->debugfs, hdev,
1895 &force_static_address_fops);
1896
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001897 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1898 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001899 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1900 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001901 debugfs_create_file("identity_resolving_keys", 0400,
1902 hdev->debugfs, hdev,
1903 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001904 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1905 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001906 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1907 hdev, &conn_min_interval_fops);
1908 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1909 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001910 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1911 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001912 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1913 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001914 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1915 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001916 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1917 hdev, &adv_min_interval_fops);
1918 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1919 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001920 debugfs_create_u16("discov_interleaved_timeout", 0644,
1921 hdev->debugfs,
1922 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001923
Johan Hedberg711eafe2014-08-08 09:32:52 +03001924 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001925 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001926
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001927 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001928}
1929
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001930static void hci_init0_req(struct hci_request *req, unsigned long opt)
1931{
1932 struct hci_dev *hdev = req->hdev;
1933
1934 BT_DBG("%s %ld", hdev->name, opt);
1935
1936 /* Reset */
1937 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1938 hci_reset_req(req, 0);
1939
1940 /* Read Local Version */
1941 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1942
1943 /* Read BD Address */
1944 if (hdev->set_bdaddr)
1945 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1946}
1947
1948static int __hci_unconf_init(struct hci_dev *hdev)
1949{
1950 int err;
1951
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001952 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1953 return 0;
1954
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001955 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1956 if (err < 0)
1957 return err;
1958
1959 return 0;
1960}
1961
Johan Hedberg42c6b122013-03-05 20:37:49 +02001962static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
1964 __u8 scan = opt;
1965
Johan Hedberg42c6b122013-03-05 20:37:49 +02001966 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001969 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970}
1971
Johan Hedberg42c6b122013-03-05 20:37:49 +02001972static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
1974 __u8 auth = opt;
1975
Johan Hedberg42c6b122013-03-05 20:37:49 +02001976 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001979 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980}
1981
Johan Hedberg42c6b122013-03-05 20:37:49 +02001982static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
1984 __u8 encrypt = opt;
1985
Johan Hedberg42c6b122013-03-05 20:37:49 +02001986 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001988 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001989 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
1991
Johan Hedberg42c6b122013-03-05 20:37:49 +02001992static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001993{
1994 __le16 policy = cpu_to_le16(opt);
1995
Johan Hedberg42c6b122013-03-05 20:37:49 +02001996 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001997
1998 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001999 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002000}
2001
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002002/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 * Device is held on return. */
2004struct hci_dev *hci_dev_get(int index)
2005{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002006 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
2008 BT_DBG("%d", index);
2009
2010 if (index < 0)
2011 return NULL;
2012
2013 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002014 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (d->id == index) {
2016 hdev = hci_dev_hold(d);
2017 break;
2018 }
2019 }
2020 read_unlock(&hci_dev_list_lock);
2021 return hdev;
2022}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002025
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002026bool hci_discovery_active(struct hci_dev *hdev)
2027{
2028 struct discovery_state *discov = &hdev->discovery;
2029
Andre Guedes6fbe1952012-02-03 17:47:58 -03002030 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002031 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002032 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002033 return true;
2034
Andre Guedes6fbe1952012-02-03 17:47:58 -03002035 default:
2036 return false;
2037 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002038}
2039
Johan Hedbergff9ef572012-01-04 14:23:45 +02002040void hci_discovery_set_state(struct hci_dev *hdev, int state)
2041{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002042 int old_state = hdev->discovery.state;
2043
Johan Hedbergff9ef572012-01-04 14:23:45 +02002044 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2045
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002046 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002047 return;
2048
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002049 hdev->discovery.state = state;
2050
Johan Hedbergff9ef572012-01-04 14:23:45 +02002051 switch (state) {
2052 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002053 hci_update_background_scan(hdev);
2054
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002055 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002056 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002057 break;
2058 case DISCOVERY_STARTING:
2059 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002060 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002061 mgmt_discovering(hdev, 1);
2062 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002063 case DISCOVERY_RESOLVING:
2064 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002065 case DISCOVERY_STOPPING:
2066 break;
2067 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002068}
2069
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002070void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
Johan Hedberg30883512012-01-04 14:16:21 +02002072 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002073 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Johan Hedberg561aafb2012-01-04 13:31:59 +02002075 list_for_each_entry_safe(p, n, &cache->all, all) {
2076 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002077 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002079
2080 INIT_LIST_HEAD(&cache->unknown);
2081 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082}
2083
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002084struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2085 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086{
Johan Hedberg30883512012-01-04 14:16:21 +02002087 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 struct inquiry_entry *e;
2089
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002090 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
Johan Hedberg561aafb2012-01-04 13:31:59 +02002092 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002094 return e;
2095 }
2096
2097 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
2099
Johan Hedberg561aafb2012-01-04 13:31:59 +02002100struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002101 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002102{
Johan Hedberg30883512012-01-04 14:16:21 +02002103 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002104 struct inquiry_entry *e;
2105
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002106 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002107
2108 list_for_each_entry(e, &cache->unknown, list) {
2109 if (!bacmp(&e->data.bdaddr, bdaddr))
2110 return e;
2111 }
2112
2113 return NULL;
2114}
2115
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002116struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002117 bdaddr_t *bdaddr,
2118 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002119{
2120 struct discovery_state *cache = &hdev->discovery;
2121 struct inquiry_entry *e;
2122
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002123 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002124
2125 list_for_each_entry(e, &cache->resolve, list) {
2126 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2127 return e;
2128 if (!bacmp(&e->data.bdaddr, bdaddr))
2129 return e;
2130 }
2131
2132 return NULL;
2133}
2134
Johan Hedberga3d4e202012-01-09 00:53:02 +02002135void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002136 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002137{
2138 struct discovery_state *cache = &hdev->discovery;
2139 struct list_head *pos = &cache->resolve;
2140 struct inquiry_entry *p;
2141
2142 list_del(&ie->list);
2143
2144 list_for_each_entry(p, &cache->resolve, list) {
2145 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002146 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002147 break;
2148 pos = &p->list;
2149 }
2150
2151 list_add(&ie->list, pos);
2152}
2153
Marcel Holtmannaf589252014-07-01 14:11:20 +02002154u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2155 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156{
Johan Hedberg30883512012-01-04 14:16:21 +02002157 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002159 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002161 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
Johan Hedberg6928a922014-10-26 20:46:09 +01002163 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002164
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165 if (!data->ssp_mode)
2166 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002167
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002168 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002169 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002170 if (!ie->data.ssp_mode)
2171 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002172
Johan Hedberga3d4e202012-01-09 00:53:02 +02002173 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002174 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002175 ie->data.rssi = data->rssi;
2176 hci_inquiry_cache_update_resolve(hdev, ie);
2177 }
2178
Johan Hedberg561aafb2012-01-04 13:31:59 +02002179 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002180 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002181
Johan Hedberg561aafb2012-01-04 13:31:59 +02002182 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002183 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002184 if (!ie) {
2185 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2186 goto done;
2187 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002188
2189 list_add(&ie->all, &cache->all);
2190
2191 if (name_known) {
2192 ie->name_state = NAME_KNOWN;
2193 } else {
2194 ie->name_state = NAME_NOT_KNOWN;
2195 list_add(&ie->list, &cache->unknown);
2196 }
2197
2198update:
2199 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002200 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002201 ie->name_state = NAME_KNOWN;
2202 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 }
2204
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002205 memcpy(&ie->data, data, sizeof(*data));
2206 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002208
2209 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002211
Marcel Holtmannaf589252014-07-01 14:11:20 +02002212done:
2213 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214}
2215
2216static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2217{
Johan Hedberg30883512012-01-04 14:16:21 +02002218 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 struct inquiry_info *info = (struct inquiry_info *) buf;
2220 struct inquiry_entry *e;
2221 int copied = 0;
2222
Johan Hedberg561aafb2012-01-04 13:31:59 +02002223 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002225
2226 if (copied >= num)
2227 break;
2228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 bacpy(&info->bdaddr, &data->bdaddr);
2230 info->pscan_rep_mode = data->pscan_rep_mode;
2231 info->pscan_period_mode = data->pscan_period_mode;
2232 info->pscan_mode = data->pscan_mode;
2233 memcpy(info->dev_class, data->dev_class, 3);
2234 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002237 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 }
2239
2240 BT_DBG("cache %p, copied %d", cache, copied);
2241 return copied;
2242}
2243
Johan Hedberg42c6b122013-03-05 20:37:49 +02002244static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245{
2246 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002247 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 struct hci_cp_inquiry cp;
2249
2250 BT_DBG("%s", hdev->name);
2251
2252 if (test_bit(HCI_INQUIRY, &hdev->flags))
2253 return;
2254
2255 /* Start Inquiry */
2256 memcpy(&cp.lap, &ir->lap, 3);
2257 cp.length = ir->length;
2258 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002259 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260}
2261
2262int hci_inquiry(void __user *arg)
2263{
2264 __u8 __user *ptr = arg;
2265 struct hci_inquiry_req ir;
2266 struct hci_dev *hdev;
2267 int err = 0, do_inquiry = 0, max_rsp;
2268 long timeo;
2269 __u8 *buf;
2270
2271 if (copy_from_user(&ir, ptr, sizeof(ir)))
2272 return -EFAULT;
2273
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002274 hdev = hci_dev_get(ir.dev_id);
2275 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 return -ENODEV;
2277
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002278 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2279 err = -EBUSY;
2280 goto done;
2281 }
2282
Marcel Holtmann4a964402014-07-02 19:10:33 +02002283 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002284 err = -EOPNOTSUPP;
2285 goto done;
2286 }
2287
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002288 if (hdev->dev_type != HCI_BREDR) {
2289 err = -EOPNOTSUPP;
2290 goto done;
2291 }
2292
Johan Hedberg56f87902013-10-02 13:43:13 +03002293 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2294 err = -EOPNOTSUPP;
2295 goto done;
2296 }
2297
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002298 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002299 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002300 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002301 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 do_inquiry = 1;
2303 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002304 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
Marcel Holtmann04837f62006-07-03 10:02:33 +02002306 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002307
2308 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002309 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2310 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002311 if (err < 0)
2312 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002313
2314 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2315 * cleared). If it is interrupted by a signal, return -EINTR.
2316 */
NeilBrown74316202014-07-07 15:16:04 +10002317 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002318 TASK_INTERRUPTIBLE))
2319 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002322 /* for unlimited number of responses we will use buffer with
2323 * 255 entries
2324 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2326
2327 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2328 * copy it to the user space.
2329 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002330 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002331 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 err = -ENOMEM;
2333 goto done;
2334 }
2335
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002336 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002338 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
2340 BT_DBG("num_rsp %d", ir.num_rsp);
2341
2342 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2343 ptr += sizeof(ir);
2344 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002345 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002347 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 err = -EFAULT;
2349
2350 kfree(buf);
2351
2352done:
2353 hci_dev_put(hdev);
2354 return err;
2355}
2356
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002357static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 int ret = 0;
2360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 BT_DBG("%s %p", hdev->name, hdev);
2362
2363 hci_req_lock(hdev);
2364
Johan Hovold94324962012-03-15 14:48:41 +01002365 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2366 ret = -ENODEV;
2367 goto done;
2368 }
2369
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002370 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2371 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002372 /* Check for rfkill but allow the HCI setup stage to
2373 * proceed (which in itself doesn't cause any RF activity).
2374 */
2375 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2376 ret = -ERFKILL;
2377 goto done;
2378 }
2379
2380 /* Check for valid public address or a configured static
2381 * random adddress, but let the HCI setup proceed to
2382 * be able to determine if there is a public address
2383 * or not.
2384 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002385 * In case of user channel usage, it is not important
2386 * if a public address or static random address is
2387 * available.
2388 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002389 * This check is only valid for BR/EDR controllers
2390 * since AMP controllers do not have an address.
2391 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002392 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2393 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002394 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2395 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2396 ret = -EADDRNOTAVAIL;
2397 goto done;
2398 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002399 }
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 if (test_bit(HCI_UP, &hdev->flags)) {
2402 ret = -EALREADY;
2403 goto done;
2404 }
2405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 if (hdev->open(hdev)) {
2407 ret = -EIO;
2408 goto done;
2409 }
2410
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002411 atomic_set(&hdev->cmd_cnt, 1);
2412 set_bit(HCI_INIT, &hdev->flags);
2413
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002414 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2415 if (hdev->setup)
2416 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002417
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002418 /* The transport driver can set these quirks before
2419 * creating the HCI device or in its setup callback.
2420 *
2421 * In case any of them is set, the controller has to
2422 * start up as unconfigured.
2423 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002424 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2425 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002426 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002427
2428 /* For an unconfigured controller it is required to
2429 * read at least the version information provided by
2430 * the Read Local Version Information command.
2431 *
2432 * If the set_bdaddr driver callback is provided, then
2433 * also the original Bluetooth public device address
2434 * will be read using the Read BD Address command.
2435 */
2436 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2437 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002438 }
2439
Marcel Holtmann9713c172014-07-06 12:11:15 +02002440 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2441 /* If public address change is configured, ensure that
2442 * the address gets programmed. If the driver does not
2443 * support changing the public address, fail the power
2444 * on procedure.
2445 */
2446 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2447 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002448 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2449 else
2450 ret = -EADDRNOTAVAIL;
2451 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002452
2453 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002454 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002455 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002456 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 }
2458
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002459 clear_bit(HCI_INIT, &hdev->flags);
2460
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 if (!ret) {
2462 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002463 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 set_bit(HCI_UP, &hdev->flags);
2465 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002466 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002467 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002468 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002469 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002470 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002471 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002472 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002473 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002474 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002475 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002477 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002478 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002479 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
2481 skb_queue_purge(&hdev->cmd_q);
2482 skb_queue_purge(&hdev->rx_q);
2483
2484 if (hdev->flush)
2485 hdev->flush(hdev);
2486
2487 if (hdev->sent_cmd) {
2488 kfree_skb(hdev->sent_cmd);
2489 hdev->sent_cmd = NULL;
2490 }
2491
2492 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002493 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 }
2495
2496done:
2497 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 return ret;
2499}
2500
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002501/* ---- HCI ioctl helpers ---- */
2502
2503int hci_dev_open(__u16 dev)
2504{
2505 struct hci_dev *hdev;
2506 int err;
2507
2508 hdev = hci_dev_get(dev);
2509 if (!hdev)
2510 return -ENODEV;
2511
Marcel Holtmann4a964402014-07-02 19:10:33 +02002512 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002513 * up as user channel. Trying to bring them up as normal devices
2514 * will result into a failure. Only user channel operation is
2515 * possible.
2516 *
2517 * When this function is called for a user channel, the flag
2518 * HCI_USER_CHANNEL will be set first before attempting to
2519 * open the device.
2520 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002521 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002522 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2523 err = -EOPNOTSUPP;
2524 goto done;
2525 }
2526
Johan Hedberge1d08f42013-10-01 22:44:50 +03002527 /* We need to ensure that no other power on/off work is pending
2528 * before proceeding to call hci_dev_do_open. This is
2529 * particularly important if the setup procedure has not yet
2530 * completed.
2531 */
2532 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2533 cancel_delayed_work(&hdev->power_off);
2534
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002535 /* After this call it is guaranteed that the setup procedure
2536 * has finished. This means that error conditions like RFKILL
2537 * or no valid public or static random address apply.
2538 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002539 flush_workqueue(hdev->req_workqueue);
2540
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002541 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002542 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002543 * so that pairing works for them. Once the management interface
2544 * is in use this bit will be cleared again and userspace has
2545 * to explicitly enable it.
2546 */
2547 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2548 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002549 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002550
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002551 err = hci_dev_do_open(hdev);
2552
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002553done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002554 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002555 return err;
2556}
2557
Johan Hedbergd7347f32014-07-04 12:37:23 +03002558/* This function requires the caller holds hdev->lock */
2559static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2560{
2561 struct hci_conn_params *p;
2562
Johan Hedbergf161dd42014-08-15 21:06:54 +03002563 list_for_each_entry(p, &hdev->le_conn_params, list) {
2564 if (p->conn) {
2565 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002566 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002567 p->conn = NULL;
2568 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002569 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002570 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002571
2572 BT_DBG("All LE pending actions cleared");
2573}
2574
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575static int hci_dev_do_close(struct hci_dev *hdev)
2576{
2577 BT_DBG("%s %p", hdev->name, hdev);
2578
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002579 cancel_delayed_work(&hdev->power_off);
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 hci_req_cancel(hdev, ENODEV);
2582 hci_req_lock(hdev);
2583
2584 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002585 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 hci_req_unlock(hdev);
2587 return 0;
2588 }
2589
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002590 /* Flush RX and TX works */
2591 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002592 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002594 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002595 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002596 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002597 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002598 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002599 }
2600
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002601 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002602 cancel_delayed_work(&hdev->service_cache);
2603
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002604 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002605
2606 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2607 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002608
Johan Hedberg76727c02014-11-18 09:00:14 +02002609 /* Avoid potential lockdep warnings from the *_flush() calls by
2610 * ensuring the workqueue is empty up front.
2611 */
2612 drain_workqueue(hdev->workqueue);
2613
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002614 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002615 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002616 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002617 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002618 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619
2620 hci_notify(hdev, HCI_DEV_DOWN);
2621
2622 if (hdev->flush)
2623 hdev->flush(hdev);
2624
2625 /* Reset device */
2626 skb_queue_purge(&hdev->cmd_q);
2627 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002628 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2629 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002630 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 clear_bit(HCI_INIT, &hdev->flags);
2634 }
2635
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002636 /* flush cmd work */
2637 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
2639 /* Drop queues */
2640 skb_queue_purge(&hdev->rx_q);
2641 skb_queue_purge(&hdev->cmd_q);
2642 skb_queue_purge(&hdev->raw_q);
2643
2644 /* Drop last sent command */
2645 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002646 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 kfree_skb(hdev->sent_cmd);
2648 hdev->sent_cmd = NULL;
2649 }
2650
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002651 kfree_skb(hdev->recv_evt);
2652 hdev->recv_evt = NULL;
2653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 /* After this point our queues are empty
2655 * and no tasks are scheduled. */
2656 hdev->close(hdev);
2657
Johan Hedberg35b973c2013-03-15 17:06:59 -05002658 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002659 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002660 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2661
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002662 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2663 if (hdev->dev_type == HCI_BREDR) {
2664 hci_dev_lock(hdev);
2665 mgmt_powered(hdev, 0);
2666 hci_dev_unlock(hdev);
2667 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002668 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002669
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002670 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002671 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002672
Johan Hedberge59fda82012-02-22 18:11:53 +02002673 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002674 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002675 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 hci_req_unlock(hdev);
2678
2679 hci_dev_put(hdev);
2680 return 0;
2681}
2682
2683int hci_dev_close(__u16 dev)
2684{
2685 struct hci_dev *hdev;
2686 int err;
2687
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002688 hdev = hci_dev_get(dev);
2689 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002691
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002692 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2693 err = -EBUSY;
2694 goto done;
2695 }
2696
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002697 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2698 cancel_delayed_work(&hdev->power_off);
2699
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002701
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002702done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 hci_dev_put(hdev);
2704 return err;
2705}
2706
2707int hci_dev_reset(__u16 dev)
2708{
2709 struct hci_dev *hdev;
2710 int ret = 0;
2711
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002712 hdev = hci_dev_get(dev);
2713 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 return -ENODEV;
2715
2716 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717
Marcel Holtmann808a0492013-08-26 20:57:58 -07002718 if (!test_bit(HCI_UP, &hdev->flags)) {
2719 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724 ret = -EBUSY;
2725 goto done;
2726 }
2727
Marcel Holtmann4a964402014-07-02 19:10:33 +02002728 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002729 ret = -EOPNOTSUPP;
2730 goto done;
2731 }
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 /* Drop queues */
2734 skb_queue_purge(&hdev->rx_q);
2735 skb_queue_purge(&hdev->cmd_q);
2736
Johan Hedberg76727c02014-11-18 09:00:14 +02002737 /* Avoid potential lockdep warnings from the *_flush() calls by
2738 * ensuring the workqueue is empty up front.
2739 */
2740 drain_workqueue(hdev->workqueue);
2741
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002742 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002743 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002745 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 if (hdev->flush)
2748 hdev->flush(hdev);
2749
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002750 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002751 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002753 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754
2755done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 hci_req_unlock(hdev);
2757 hci_dev_put(hdev);
2758 return ret;
2759}
2760
2761int hci_dev_reset_stat(__u16 dev)
2762{
2763 struct hci_dev *hdev;
2764 int ret = 0;
2765
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002766 hdev = hci_dev_get(dev);
2767 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 return -ENODEV;
2769
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002770 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2771 ret = -EBUSY;
2772 goto done;
2773 }
2774
Marcel Holtmann4a964402014-07-02 19:10:33 +02002775 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002776 ret = -EOPNOTSUPP;
2777 goto done;
2778 }
2779
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2781
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002782done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 return ret;
2785}
2786
Johan Hedberg123abc02014-07-10 12:09:07 +03002787static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2788{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002789 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002790
2791 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2792
2793 if ((scan & SCAN_PAGE))
2794 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2795 &hdev->dev_flags);
2796 else
2797 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2798 &hdev->dev_flags);
2799
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002800 if ((scan & SCAN_INQUIRY)) {
2801 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2802 &hdev->dev_flags);
2803 } else {
2804 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2805 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2806 &hdev->dev_flags);
2807 }
2808
Johan Hedberg123abc02014-07-10 12:09:07 +03002809 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2810 return;
2811
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002812 if (conn_changed || discov_changed) {
2813 /* In case this was disabled through mgmt */
2814 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2815
2816 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2817 mgmt_update_adv_data(hdev);
2818
Johan Hedberg123abc02014-07-10 12:09:07 +03002819 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002820 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002821}
2822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823int hci_dev_cmd(unsigned int cmd, void __user *arg)
2824{
2825 struct hci_dev *hdev;
2826 struct hci_dev_req dr;
2827 int err = 0;
2828
2829 if (copy_from_user(&dr, arg, sizeof(dr)))
2830 return -EFAULT;
2831
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002832 hdev = hci_dev_get(dr.dev_id);
2833 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 return -ENODEV;
2835
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002836 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2837 err = -EBUSY;
2838 goto done;
2839 }
2840
Marcel Holtmann4a964402014-07-02 19:10:33 +02002841 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002842 err = -EOPNOTSUPP;
2843 goto done;
2844 }
2845
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002846 if (hdev->dev_type != HCI_BREDR) {
2847 err = -EOPNOTSUPP;
2848 goto done;
2849 }
2850
Johan Hedberg56f87902013-10-02 13:43:13 +03002851 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2852 err = -EOPNOTSUPP;
2853 goto done;
2854 }
2855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 switch (cmd) {
2857 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002858 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2859 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 break;
2861
2862 case HCISETENCRYPT:
2863 if (!lmp_encrypt_capable(hdev)) {
2864 err = -EOPNOTSUPP;
2865 break;
2866 }
2867
2868 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2869 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2871 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 if (err)
2873 break;
2874 }
2875
Johan Hedberg01178cd2013-03-05 20:37:41 +02002876 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2877 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 break;
2879
2880 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002881 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2882 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002883
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002884 /* Ensure that the connectable and discoverable states
2885 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002886 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002887 if (!err)
2888 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 break;
2890
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002891 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002892 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2893 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002894 break;
2895
2896 case HCISETLINKMODE:
2897 hdev->link_mode = ((__u16) dr.dev_opt) &
2898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2899 break;
2900
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 case HCISETPTYPE:
2902 hdev->pkt_type = (__u16) dr.dev_opt;
2903 break;
2904
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 break;
2909
2910 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 break;
2914
2915 default:
2916 err = -EINVAL;
2917 break;
2918 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002919
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002920done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 hci_dev_put(hdev);
2922 return err;
2923}
2924
2925int hci_get_dev_list(void __user *arg)
2926{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002927 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 struct hci_dev_list_req *dl;
2929 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 int n = 0, size, err;
2931 __u16 dev_num;
2932
2933 if (get_user(dev_num, (__u16 __user *) arg))
2934 return -EFAULT;
2935
2936 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2937 return -EINVAL;
2938
2939 size = sizeof(*dl) + dev_num * sizeof(*dr);
2940
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002941 dl = kzalloc(size, GFP_KERNEL);
2942 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 return -ENOMEM;
2944
2945 dr = dl->dev_req;
2946
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002947 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002948 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002949 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002950
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002951 /* When the auto-off is configured it means the transport
2952 * is running, but in that case still indicate that the
2953 * device is actually down.
2954 */
2955 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2956 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002957
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002959 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002960
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 if (++n >= dev_num)
2962 break;
2963 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002964 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
2966 dl->dev_num = n;
2967 size = sizeof(*dl) + n * sizeof(*dr);
2968
2969 err = copy_to_user(arg, dl, size);
2970 kfree(dl);
2971
2972 return err ? -EFAULT : 0;
2973}
2974
2975int hci_get_dev_info(void __user *arg)
2976{
2977 struct hci_dev *hdev;
2978 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002979 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 int err = 0;
2981
2982 if (copy_from_user(&di, arg, sizeof(di)))
2983 return -EFAULT;
2984
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002985 hdev = hci_dev_get(di.dev_id);
2986 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 return -ENODEV;
2988
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002989 /* When the auto-off is configured it means the transport
2990 * is running, but in that case still indicate that the
2991 * device is actually down.
2992 */
2993 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2994 flags = hdev->flags & ~BIT(HCI_UP);
2995 else
2996 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002997
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 strcpy(di.name, hdev->name);
2999 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003000 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003001 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003003 if (lmp_bredr_capable(hdev)) {
3004 di.acl_mtu = hdev->acl_mtu;
3005 di.acl_pkts = hdev->acl_pkts;
3006 di.sco_mtu = hdev->sco_mtu;
3007 di.sco_pkts = hdev->sco_pkts;
3008 } else {
3009 di.acl_mtu = hdev->le_mtu;
3010 di.acl_pkts = hdev->le_pkts;
3011 di.sco_mtu = 0;
3012 di.sco_pkts = 0;
3013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 di.link_policy = hdev->link_policy;
3015 di.link_mode = hdev->link_mode;
3016
3017 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3018 memcpy(&di.features, &hdev->features, sizeof(di.features));
3019
3020 if (copy_to_user(arg, &di, sizeof(di)))
3021 err = -EFAULT;
3022
3023 hci_dev_put(hdev);
3024
3025 return err;
3026}
3027
3028/* ---- Interface to HCI drivers ---- */
3029
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003030static int hci_rfkill_set_block(void *data, bool blocked)
3031{
3032 struct hci_dev *hdev = data;
3033
3034 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3035
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003036 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3037 return -EBUSY;
3038
Johan Hedberg5e130362013-09-13 08:58:17 +03003039 if (blocked) {
3040 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003041 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3042 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003043 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003044 } else {
3045 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003046 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003047
3048 return 0;
3049}
3050
3051static const struct rfkill_ops hci_rfkill_ops = {
3052 .set_block = hci_rfkill_set_block,
3053};
3054
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003055static void hci_power_on(struct work_struct *work)
3056{
3057 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003058 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003059
3060 BT_DBG("%s", hdev->name);
3061
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003062 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003063 if (err < 0) {
3064 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003065 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003066 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003067
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003068 /* During the HCI setup phase, a few error conditions are
3069 * ignored and they need to be checked now. If they are still
3070 * valid, it is important to turn the device back off.
3071 */
3072 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003073 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003074 (hdev->dev_type == HCI_BREDR &&
3075 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3076 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003077 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3078 hci_dev_do_close(hdev);
3079 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003080 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3081 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003082 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003083
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003084 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003085 /* For unconfigured devices, set the HCI_RAW flag
3086 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003087 */
3088 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3089 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003090
3091 /* For fully configured devices, this will send
3092 * the Index Added event. For unconfigured devices,
3093 * it will send Unconfigued Index Added event.
3094 *
3095 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3096 * and no event will be send.
3097 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003098 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003099 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003100 /* When the controller is now configured, then it
3101 * is important to clear the HCI_RAW flag.
3102 */
3103 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3104 clear_bit(HCI_RAW, &hdev->flags);
3105
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003106 /* Powering on the controller with HCI_CONFIG set only
3107 * happens with the transition from unconfigured to
3108 * configured. This will send the Index Added event.
3109 */
3110 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003111 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003112}
3113
3114static void hci_power_off(struct work_struct *work)
3115{
Johan Hedberg32435532011-11-07 22:16:04 +02003116 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003117 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003118
3119 BT_DBG("%s", hdev->name);
3120
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003121 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003122}
3123
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003124static void hci_discov_off(struct work_struct *work)
3125{
3126 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003127
3128 hdev = container_of(work, struct hci_dev, discov_off.work);
3129
3130 BT_DBG("%s", hdev->name);
3131
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003132 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003133}
3134
Johan Hedberg35f74982014-02-18 17:14:32 +02003135void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003136{
Johan Hedberg48210022013-01-27 00:31:28 +02003137 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003138
Johan Hedberg48210022013-01-27 00:31:28 +02003139 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3140 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003141 kfree(uuid);
3142 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003143}
3144
Johan Hedberg35f74982014-02-18 17:14:32 +02003145void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003146{
Johan Hedberg0378b592014-11-19 15:22:22 +02003147 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003148
Johan Hedberg0378b592014-11-19 15:22:22 +02003149 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3150 list_del_rcu(&key->list);
3151 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003152 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003153}
3154
Johan Hedberg35f74982014-02-18 17:14:32 +02003155void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003156{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003157 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003158
Johan Hedberg970d0f12014-11-13 14:37:47 +02003159 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3160 list_del_rcu(&k->list);
3161 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003162 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003163}
3164
Johan Hedberg970c4e42014-02-18 10:19:33 +02003165void hci_smp_irks_clear(struct hci_dev *hdev)
3166{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003167 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003168
Johan Hedbergadae20c2014-11-13 14:37:48 +02003169 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3170 list_del_rcu(&k->list);
3171 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003172 }
3173}
3174
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003175struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3176{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003177 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003178
Johan Hedberg0378b592014-11-19 15:22:22 +02003179 rcu_read_lock();
3180 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3181 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3182 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003183 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003184 }
3185 }
3186 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003187
3188 return NULL;
3189}
3190
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303191static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003192 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003193{
3194 /* Legacy key */
3195 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303196 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003197
3198 /* Debug keys are insecure so don't store them persistently */
3199 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303200 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003201
3202 /* Changed combination key and there's no previous one */
3203 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303204 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003205
3206 /* Security mode 3 case */
3207 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303208 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003209
Johan Hedberge3befab2014-06-01 16:33:39 +03003210 /* BR/EDR key derived using SC from an LE link */
3211 if (conn->type == LE_LINK)
3212 return true;
3213
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003214 /* Neither local nor remote side had no-bonding as requirement */
3215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303216 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003217
3218 /* Local side had dedicated bonding as requirement */
3219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303220 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003221
3222 /* Remote side had dedicated bonding as requirement */
3223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303224 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003225
3226 /* If none of the above criteria match, then don't store the key
3227 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303228 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003229}
3230
Johan Hedberge804d252014-07-16 11:42:28 +03003231static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003232{
Johan Hedberge804d252014-07-16 11:42:28 +03003233 if (type == SMP_LTK)
3234 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003235
Johan Hedberge804d252014-07-16 11:42:28 +03003236 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003237}
3238
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003239struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003241{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003242 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003243
Johan Hedberg970d0f12014-11-13 14:37:47 +02003244 rcu_read_lock();
3245 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003246 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3247 continue;
3248
Johan Hedberg923e2412014-12-03 12:43:39 +02003249 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003250 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003251 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003252 }
3253 }
3254 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003255
3256 return NULL;
3257}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003258
Johan Hedberg970c4e42014-02-18 10:19:33 +02003259struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3260{
3261 struct smp_irk *irk;
3262
Johan Hedbergadae20c2014-11-13 14:37:48 +02003263 rcu_read_lock();
3264 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3265 if (!bacmp(&irk->rpa, rpa)) {
3266 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003267 return irk;
3268 }
3269 }
3270
Johan Hedbergadae20c2014-11-13 14:37:48 +02003271 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3272 if (smp_irk_matches(hdev, irk->val, rpa)) {
3273 bacpy(&irk->rpa, rpa);
3274 rcu_read_unlock();
3275 return irk;
3276 }
3277 }
3278 rcu_read_unlock();
3279
Johan Hedberg970c4e42014-02-18 10:19:33 +02003280 return NULL;
3281}
3282
3283struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3284 u8 addr_type)
3285{
3286 struct smp_irk *irk;
3287
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003288 /* Identity Address must be public or static random */
3289 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3290 return NULL;
3291
Johan Hedbergadae20c2014-11-13 14:37:48 +02003292 rcu_read_lock();
3293 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003294 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003295 bacmp(bdaddr, &irk->bdaddr) == 0) {
3296 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003297 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003298 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003299 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003300 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003301
3302 return NULL;
3303}
3304
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003305struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003306 bdaddr_t *bdaddr, u8 *val, u8 type,
3307 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003308{
3309 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303310 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003311
3312 old_key = hci_find_link_key(hdev, bdaddr);
3313 if (old_key) {
3314 old_key_type = old_key->type;
3315 key = old_key;
3316 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003317 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003318 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003319 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003320 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003321 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003322 }
3323
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003324 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003325
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003326 /* Some buggy controller combinations generate a changed
3327 * combination key for legacy pairing even when there's no
3328 * previous key */
3329 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003330 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003331 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003332 if (conn)
3333 conn->key_type = type;
3334 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003335
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003336 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003337 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003338 key->pin_len = pin_len;
3339
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003340 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003341 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003342 else
3343 key->type = type;
3344
Johan Hedberg7652ff62014-06-24 13:15:49 +03003345 if (persistent)
3346 *persistent = hci_persistent_key(hdev, conn, type,
3347 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003348
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003349 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003350}
3351
Johan Hedbergca9142b2014-02-19 14:57:44 +02003352struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003353 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003354 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003355{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003356 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003357 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003358
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003359 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003360 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003361 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003362 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003363 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003364 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003365 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003366 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003367 }
3368
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003369 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003370 key->bdaddr_type = addr_type;
3371 memcpy(key->val, tk, sizeof(key->val));
3372 key->authenticated = authenticated;
3373 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003374 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003375 key->enc_size = enc_size;
3376 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003377
Johan Hedbergca9142b2014-02-19 14:57:44 +02003378 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003379}
3380
Johan Hedbergca9142b2014-02-19 14:57:44 +02003381struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3382 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003383{
3384 struct smp_irk *irk;
3385
3386 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3387 if (!irk) {
3388 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3389 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003390 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003391
3392 bacpy(&irk->bdaddr, bdaddr);
3393 irk->addr_type = addr_type;
3394
Johan Hedbergadae20c2014-11-13 14:37:48 +02003395 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003396 }
3397
3398 memcpy(irk->val, val, 16);
3399 bacpy(&irk->rpa, rpa);
3400
Johan Hedbergca9142b2014-02-19 14:57:44 +02003401 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003402}
3403
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003404int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3405{
3406 struct link_key *key;
3407
3408 key = hci_find_link_key(hdev, bdaddr);
3409 if (!key)
3410 return -ENOENT;
3411
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003412 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003413
Johan Hedberg0378b592014-11-19 15:22:22 +02003414 list_del_rcu(&key->list);
3415 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003416
3417 return 0;
3418}
3419
Johan Hedberge0b2b272014-02-18 17:14:31 +02003420int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003421{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003422 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003423 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003424
Johan Hedberg970d0f12014-11-13 14:37:47 +02003425 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003426 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003427 continue;
3428
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003429 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003430
Johan Hedberg970d0f12014-11-13 14:37:47 +02003431 list_del_rcu(&k->list);
3432 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003433 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003434 }
3435
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003436 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003437}
3438
Johan Hedberga7ec7332014-02-18 17:14:35 +02003439void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3440{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003441 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003442
Johan Hedbergadae20c2014-11-13 14:37:48 +02003443 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003444 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3445 continue;
3446
3447 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3448
Johan Hedbergadae20c2014-11-13 14:37:48 +02003449 list_del_rcu(&k->list);
3450 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003451 }
3452}
3453
Ville Tervo6bd32322011-02-16 16:32:41 +02003454/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003455static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003456{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003457 struct hci_dev *hdev = container_of(work, struct hci_dev,
3458 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003459
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003460 if (hdev->sent_cmd) {
3461 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3462 u16 opcode = __le16_to_cpu(sent->opcode);
3463
3464 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3465 } else {
3466 BT_ERR("%s command tx timeout", hdev->name);
3467 }
3468
Ville Tervo6bd32322011-02-16 16:32:41 +02003469 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003470 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003471}
3472
Szymon Janc2763eda2011-03-22 13:12:22 +01003473struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003474 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003475{
3476 struct oob_data *data;
3477
Johan Hedberg6928a922014-10-26 20:46:09 +01003478 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3479 if (bacmp(bdaddr, &data->bdaddr) != 0)
3480 continue;
3481 if (data->bdaddr_type != bdaddr_type)
3482 continue;
3483 return data;
3484 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003485
3486 return NULL;
3487}
3488
Johan Hedberg6928a922014-10-26 20:46:09 +01003489int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3490 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003491{
3492 struct oob_data *data;
3493
Johan Hedberg6928a922014-10-26 20:46:09 +01003494 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003495 if (!data)
3496 return -ENOENT;
3497
Johan Hedberg6928a922014-10-26 20:46:09 +01003498 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003499
3500 list_del(&data->list);
3501 kfree(data);
3502
3503 return 0;
3504}
3505
Johan Hedberg35f74982014-02-18 17:14:32 +02003506void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003507{
3508 struct oob_data *data, *n;
3509
3510 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3511 list_del(&data->list);
3512 kfree(data);
3513 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003514}
3515
Marcel Holtmann07988722014-01-10 02:07:29 -08003516int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003517 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003518 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003519{
3520 struct oob_data *data;
3521
Johan Hedberg6928a922014-10-26 20:46:09 +01003522 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003523 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003524 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003525 if (!data)
3526 return -ENOMEM;
3527
3528 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003529 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003530 list_add(&data->list, &hdev->remote_oob_data);
3531 }
3532
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003533 if (hash192 && rand192) {
3534 memcpy(data->hash192, hash192, sizeof(data->hash192));
3535 memcpy(data->rand192, rand192, sizeof(data->rand192));
3536 } else {
3537 memset(data->hash192, 0, sizeof(data->hash192));
3538 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003539 }
3540
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003541 if (hash256 && rand256) {
3542 memcpy(data->hash256, hash256, sizeof(data->hash256));
3543 memcpy(data->rand256, rand256, sizeof(data->rand256));
3544 } else {
3545 memset(data->hash256, 0, sizeof(data->hash256));
3546 memset(data->rand256, 0, sizeof(data->rand256));
3547 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003548
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003549 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003550
3551 return 0;
3552}
3553
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003554struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003555 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003556{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003557 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003558
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003559 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003560 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003561 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003562 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003563
3564 return NULL;
3565}
3566
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003567void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003568{
3569 struct list_head *p, *n;
3570
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003571 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003572 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003573
3574 list_del(p);
3575 kfree(b);
3576 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003577}
3578
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003579int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003580{
3581 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003582
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003583 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003584 return -EBADF;
3585
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003586 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003587 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003588
Johan Hedberg27f70f32014-07-21 10:50:06 +03003589 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003590 if (!entry)
3591 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003592
3593 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003594 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003595
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003596 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003597
3598 return 0;
3599}
3600
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003601int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003602{
3603 struct bdaddr_list *entry;
3604
Johan Hedberg35f74982014-02-18 17:14:32 +02003605 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003606 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003607 return 0;
3608 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003609
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003610 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003611 if (!entry)
3612 return -ENOENT;
3613
3614 list_del(&entry->list);
3615 kfree(entry);
3616
3617 return 0;
3618}
3619
Andre Guedes15819a72014-02-03 13:56:18 -03003620/* This function requires the caller holds hdev->lock */
3621struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3622 bdaddr_t *addr, u8 addr_type)
3623{
3624 struct hci_conn_params *params;
3625
Johan Hedberg738f6182014-07-03 19:33:51 +03003626 /* The conn params list only contains identity addresses */
3627 if (!hci_is_identity_address(addr, addr_type))
3628 return NULL;
3629
Andre Guedes15819a72014-02-03 13:56:18 -03003630 list_for_each_entry(params, &hdev->le_conn_params, list) {
3631 if (bacmp(&params->addr, addr) == 0 &&
3632 params->addr_type == addr_type) {
3633 return params;
3634 }
3635 }
3636
3637 return NULL;
3638}
3639
Andre Guedescef952c2014-02-26 20:21:49 -03003640static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3641{
3642 struct hci_conn *conn;
3643
3644 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3645 if (!conn)
3646 return false;
3647
3648 if (conn->dst_type != type)
3649 return false;
3650
3651 if (conn->state != BT_CONNECTED)
3652 return false;
3653
3654 return true;
3655}
3656
Andre Guedes15819a72014-02-03 13:56:18 -03003657/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003658struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3659 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003660{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003661 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003662
Johan Hedberg738f6182014-07-03 19:33:51 +03003663 /* The list only contains identity addresses */
3664 if (!hci_is_identity_address(addr, addr_type))
3665 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003666
Johan Hedberg501f8822014-07-04 12:37:26 +03003667 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003668 if (bacmp(&param->addr, addr) == 0 &&
3669 param->addr_type == addr_type)
3670 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003671 }
3672
3673 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003674}
3675
3676/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003677struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3678 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003679{
3680 struct hci_conn_params *params;
3681
Johan Hedbergc46245b2014-07-02 17:37:33 +03003682 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003683 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003684
Andre Guedes15819a72014-02-03 13:56:18 -03003685 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003686 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003687 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003688
3689 params = kzalloc(sizeof(*params), GFP_KERNEL);
3690 if (!params) {
3691 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003692 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003693 }
3694
3695 bacpy(&params->addr, addr);
3696 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003697
3698 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003699 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003700
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003701 params->conn_min_interval = hdev->le_conn_min_interval;
3702 params->conn_max_interval = hdev->le_conn_max_interval;
3703 params->conn_latency = hdev->le_conn_latency;
3704 params->supervision_timeout = hdev->le_supv_timeout;
3705 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3706
3707 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3708
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003709 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003710}
3711
3712/* This function requires the caller holds hdev->lock */
3713int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003714 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003715{
3716 struct hci_conn_params *params;
3717
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003718 params = hci_conn_params_add(hdev, addr, addr_type);
3719 if (!params)
3720 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003721
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003722 if (params->auto_connect == auto_connect)
3723 return 0;
3724
Johan Hedberg95305ba2014-07-04 12:37:21 +03003725 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003726
Andre Guedescef952c2014-02-26 20:21:49 -03003727 switch (auto_connect) {
3728 case HCI_AUTO_CONN_DISABLED:
3729 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003730 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003731 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003732 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003733 list_add(&params->action, &hdev->pend_le_reports);
3734 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003735 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003736 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003737 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003738 if (!is_connected(hdev, addr, addr_type)) {
3739 list_add(&params->action, &hdev->pend_le_conns);
3740 hci_update_background_scan(hdev);
3741 }
Andre Guedescef952c2014-02-26 20:21:49 -03003742 break;
3743 }
Andre Guedes15819a72014-02-03 13:56:18 -03003744
Johan Hedberg851efca2014-07-02 22:42:00 +03003745 params->auto_connect = auto_connect;
3746
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003747 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3748 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003749
3750 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003751}
3752
Johan Hedbergf6c63242014-08-15 21:06:59 +03003753static void hci_conn_params_free(struct hci_conn_params *params)
3754{
3755 if (params->conn) {
3756 hci_conn_drop(params->conn);
3757 hci_conn_put(params->conn);
3758 }
3759
3760 list_del(&params->action);
3761 list_del(&params->list);
3762 kfree(params);
3763}
3764
Andre Guedes15819a72014-02-03 13:56:18 -03003765/* This function requires the caller holds hdev->lock */
3766void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3767{
3768 struct hci_conn_params *params;
3769
3770 params = hci_conn_params_lookup(hdev, addr, addr_type);
3771 if (!params)
3772 return;
3773
Johan Hedbergf6c63242014-08-15 21:06:59 +03003774 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003775
Johan Hedberg95305ba2014-07-04 12:37:21 +03003776 hci_update_background_scan(hdev);
3777
Andre Guedes15819a72014-02-03 13:56:18 -03003778 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3779}
3780
3781/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003782void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003783{
3784 struct hci_conn_params *params, *tmp;
3785
3786 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003787 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3788 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003789 list_del(&params->list);
3790 kfree(params);
3791 }
3792
Johan Hedberg55af49a82014-07-02 17:37:26 +03003793 BT_DBG("All LE disabled connection parameters were removed");
3794}
3795
3796/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003797void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003798{
3799 struct hci_conn_params *params, *tmp;
3800
Johan Hedbergf6c63242014-08-15 21:06:59 +03003801 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3802 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003803
Johan Hedberga2f41a82014-07-04 12:37:19 +03003804 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003805
Andre Guedes15819a72014-02-03 13:56:18 -03003806 BT_DBG("All LE connection parameters were removed");
3807}
3808
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003809static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003810{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003811 if (status) {
3812 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003813
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003814 hci_dev_lock(hdev);
3815 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3816 hci_dev_unlock(hdev);
3817 return;
3818 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003819}
3820
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003821static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003822{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003823 /* General inquiry access code (GIAC) */
3824 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3825 struct hci_request req;
3826 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003827 int err;
3828
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003829 if (status) {
3830 BT_ERR("Failed to disable LE scanning: status %d", status);
3831 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003832 }
3833
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003834 switch (hdev->discovery.type) {
3835 case DISCOV_TYPE_LE:
3836 hci_dev_lock(hdev);
3837 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838 hci_dev_unlock(hdev);
3839 break;
3840
3841 case DISCOV_TYPE_INTERLEAVED:
3842 hci_req_init(&req, hdev);
3843
3844 memset(&cp, 0, sizeof(cp));
3845 memcpy(&cp.lap, lap, sizeof(cp.lap));
3846 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3847 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3848
3849 hci_dev_lock(hdev);
3850
3851 hci_inquiry_cache_flush(hdev);
3852
3853 err = hci_req_run(&req, inquiry_complete);
3854 if (err) {
3855 BT_ERR("Inquiry request failed: err %d", err);
3856 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3857 }
3858
3859 hci_dev_unlock(hdev);
3860 break;
3861 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003862}
3863
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003864static void le_scan_disable_work(struct work_struct *work)
3865{
3866 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003867 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003868 struct hci_request req;
3869 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003870
3871 BT_DBG("%s", hdev->name);
3872
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003873 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003874
Andre Guedesb1efcc22014-02-26 20:21:40 -03003875 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003876
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003877 err = hci_req_run(&req, le_scan_disable_work_complete);
3878 if (err)
3879 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003880}
3881
Johan Hedberg8d972502014-02-28 12:54:14 +02003882static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3883{
3884 struct hci_dev *hdev = req->hdev;
3885
3886 /* If we're advertising or initiating an LE connection we can't
3887 * go ahead and change the random address at this time. This is
3888 * because the eventual initiator address used for the
3889 * subsequently created connection will be undefined (some
3890 * controllers use the new address and others the one we had
3891 * when the operation started).
3892 *
3893 * In this kind of scenario skip the update and let the random
3894 * address be updated at the next cycle.
3895 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003896 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003897 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3898 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003899 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003900 return;
3901 }
3902
3903 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3904}
3905
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003906int hci_update_random_address(struct hci_request *req, bool require_privacy,
3907 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003908{
3909 struct hci_dev *hdev = req->hdev;
3910 int err;
3911
3912 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003913 * current RPA has expired or there is something else than
3914 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003915 */
3916 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003917 int to;
3918
3919 *own_addr_type = ADDR_LE_DEV_RANDOM;
3920
3921 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003922 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003923 return 0;
3924
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003925 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003926 if (err < 0) {
3927 BT_ERR("%s failed to generate new RPA", hdev->name);
3928 return err;
3929 }
3930
Johan Hedberg8d972502014-02-28 12:54:14 +02003931 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003932
3933 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3934 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3935
3936 return 0;
3937 }
3938
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003939 /* In case of required privacy without resolvable private address,
3940 * use an unresolvable private address. This is useful for active
3941 * scanning and non-connectable advertising.
3942 */
3943 if (require_privacy) {
3944 bdaddr_t urpa;
3945
3946 get_random_bytes(&urpa, 6);
3947 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3948
3949 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003950 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003951 return 0;
3952 }
3953
Johan Hedbergebd3a742014-02-23 19:42:21 +02003954 /* If forcing static address is in use or there is no public
3955 * address use the static address as random address (but skip
3956 * the HCI command if the current random address is already the
3957 * static one.
3958 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003959 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003960 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3961 *own_addr_type = ADDR_LE_DEV_RANDOM;
3962 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3963 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3964 &hdev->static_addr);
3965 return 0;
3966 }
3967
3968 /* Neither privacy nor static address is being used so use a
3969 * public address.
3970 */
3971 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3972
3973 return 0;
3974}
3975
Johan Hedberga1f4c312014-02-27 14:05:41 +02003976/* Copy the Identity Address of the controller.
3977 *
3978 * If the controller has a public BD_ADDR, then by default use that one.
3979 * If this is a LE only controller without a public address, default to
3980 * the static random address.
3981 *
3982 * For debugging purposes it is possible to force controllers with a
3983 * public address to use the static random address instead.
3984 */
3985void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3986 u8 *bdaddr_type)
3987{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003988 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003989 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3990 bacpy(bdaddr, &hdev->static_addr);
3991 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3992 } else {
3993 bacpy(bdaddr, &hdev->bdaddr);
3994 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3995 }
3996}
3997
David Herrmann9be0dab2012-04-22 14:39:57 +02003998/* Alloc HCI device */
3999struct hci_dev *hci_alloc_dev(void)
4000{
4001 struct hci_dev *hdev;
4002
Johan Hedberg27f70f32014-07-21 10:50:06 +03004003 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02004004 if (!hdev)
4005 return NULL;
4006
David Herrmannb1b813d2012-04-22 14:39:58 +02004007 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4008 hdev->esco_type = (ESCO_HV1);
4009 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004010 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4011 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004012 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004013 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4014 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004015
David Herrmannb1b813d2012-04-22 14:39:58 +02004016 hdev->sniff_max_interval = 800;
4017 hdev->sniff_min_interval = 80;
4018
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004019 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004020 hdev->le_adv_min_interval = 0x0800;
4021 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004022 hdev->le_scan_interval = 0x0060;
4023 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004024 hdev->le_conn_min_interval = 0x0028;
4025 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004026 hdev->le_conn_latency = 0x0000;
4027 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004028
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004029 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004030 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004031 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4032 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004033
David Herrmannb1b813d2012-04-22 14:39:58 +02004034 mutex_init(&hdev->lock);
4035 mutex_init(&hdev->req_lock);
4036
4037 INIT_LIST_HEAD(&hdev->mgmt_pending);
4038 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004039 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004040 INIT_LIST_HEAD(&hdev->uuids);
4041 INIT_LIST_HEAD(&hdev->link_keys);
4042 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004043 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004044 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004045 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004046 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004047 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004048 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004049 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004050
4051 INIT_WORK(&hdev->rx_work, hci_rx_work);
4052 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4053 INIT_WORK(&hdev->tx_work, hci_tx_work);
4054 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004055
David Herrmannb1b813d2012-04-22 14:39:58 +02004056 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4057 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4058 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4059
David Herrmannb1b813d2012-04-22 14:39:58 +02004060 skb_queue_head_init(&hdev->rx_q);
4061 skb_queue_head_init(&hdev->cmd_q);
4062 skb_queue_head_init(&hdev->raw_q);
4063
4064 init_waitqueue_head(&hdev->req_wait_q);
4065
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004066 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004067
David Herrmannb1b813d2012-04-22 14:39:58 +02004068 hci_init_sysfs(hdev);
4069 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004070
4071 return hdev;
4072}
4073EXPORT_SYMBOL(hci_alloc_dev);
4074
4075/* Free HCI device */
4076void hci_free_dev(struct hci_dev *hdev)
4077{
David Herrmann9be0dab2012-04-22 14:39:57 +02004078 /* will free via device release */
4079 put_device(&hdev->dev);
4080}
4081EXPORT_SYMBOL(hci_free_dev);
4082
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083/* Register HCI device */
4084int hci_register_dev(struct hci_dev *hdev)
4085{
David Herrmannb1b813d2012-04-22 14:39:58 +02004086 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Marcel Holtmann74292d52014-07-06 15:50:27 +02004088 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 return -EINVAL;
4090
Mat Martineau08add512011-11-02 16:18:36 -07004091 /* Do not allow HCI_AMP devices to register at index 0,
4092 * so the index can be used as the AMP controller ID.
4093 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004094 switch (hdev->dev_type) {
4095 case HCI_BREDR:
4096 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4097 break;
4098 case HCI_AMP:
4099 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4100 break;
4101 default:
4102 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004104
Sasha Levin3df92b32012-05-27 22:36:56 +02004105 if (id < 0)
4106 return id;
4107
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 sprintf(hdev->name, "hci%d", id);
4109 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004110
4111 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4112
Kees Cookd8537542013-07-03 15:04:57 -07004113 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4114 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004115 if (!hdev->workqueue) {
4116 error = -ENOMEM;
4117 goto err;
4118 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004119
Kees Cookd8537542013-07-03 15:04:57 -07004120 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4121 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004122 if (!hdev->req_workqueue) {
4123 destroy_workqueue(hdev->workqueue);
4124 error = -ENOMEM;
4125 goto err;
4126 }
4127
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004128 if (!IS_ERR_OR_NULL(bt_debugfs))
4129 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4130
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004131 dev_set_name(&hdev->dev, "%s", hdev->name);
4132
4133 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004134 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004135 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004137 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004138 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4139 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004140 if (hdev->rfkill) {
4141 if (rfkill_register(hdev->rfkill) < 0) {
4142 rfkill_destroy(hdev->rfkill);
4143 hdev->rfkill = NULL;
4144 }
4145 }
4146
Johan Hedberg5e130362013-09-13 08:58:17 +03004147 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4148 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4149
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004150 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004151 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004152
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004153 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004154 /* Assume BR/EDR support until proven otherwise (such as
4155 * through reading supported features during init.
4156 */
4157 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4158 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004159
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004160 write_lock(&hci_dev_list_lock);
4161 list_add(&hdev->list, &hci_dev_list);
4162 write_unlock(&hci_dev_list_lock);
4163
Marcel Holtmann4a964402014-07-02 19:10:33 +02004164 /* Devices that are marked for raw-only usage are unconfigured
4165 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004166 */
4167 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004168 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004169
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004171 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Johan Hedberg19202572013-01-14 22:33:51 +02004173 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004174
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004176
David Herrmann33ca9542011-10-08 14:58:49 +02004177err_wqueue:
4178 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004179 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004180err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004181 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004182
David Herrmann33ca9542011-10-08 14:58:49 +02004183 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184}
4185EXPORT_SYMBOL(hci_register_dev);
4186
4187/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004188void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189{
Sasha Levin3df92b32012-05-27 22:36:56 +02004190 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004191
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004192 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193
Johan Hovold94324962012-03-15 14:48:41 +01004194 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4195
Sasha Levin3df92b32012-05-27 22:36:56 +02004196 id = hdev->id;
4197
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004198 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004200 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
4202 hci_dev_do_close(hdev);
4203
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304204 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004205 kfree_skb(hdev->reassembly[i]);
4206
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004207 cancel_work_sync(&hdev->power_on);
4208
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004209 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004210 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4211 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004212 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004213 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004214 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004215 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004216
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004217 /* mgmt_index_removed should take care of emptying the
4218 * pending list */
4219 BUG_ON(!list_empty(&hdev->mgmt_pending));
4220
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 hci_notify(hdev, HCI_DEV_UNREG);
4222
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004223 if (hdev->rfkill) {
4224 rfkill_unregister(hdev->rfkill);
4225 rfkill_destroy(hdev->rfkill);
4226 }
4227
Johan Hedberg711eafe2014-08-08 09:32:52 +03004228 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004229
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004230 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004231
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004232 debugfs_remove_recursive(hdev->debugfs);
4233
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004234 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004235 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004236
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004237 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004238 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004239 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004240 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004241 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004242 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004243 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004244 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004245 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004246 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004247 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004248
David Herrmanndc946bd2012-01-07 15:47:24 +01004249 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004250
4251 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252}
4253EXPORT_SYMBOL(hci_unregister_dev);
4254
4255/* Suspend HCI device */
4256int hci_suspend_dev(struct hci_dev *hdev)
4257{
4258 hci_notify(hdev, HCI_DEV_SUSPEND);
4259 return 0;
4260}
4261EXPORT_SYMBOL(hci_suspend_dev);
4262
4263/* Resume HCI device */
4264int hci_resume_dev(struct hci_dev *hdev)
4265{
4266 hci_notify(hdev, HCI_DEV_RESUME);
4267 return 0;
4268}
4269EXPORT_SYMBOL(hci_resume_dev);
4270
Marcel Holtmann75e05692014-11-02 08:15:38 +01004271/* Reset HCI device */
4272int hci_reset_dev(struct hci_dev *hdev)
4273{
4274 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4275 struct sk_buff *skb;
4276
4277 skb = bt_skb_alloc(3, GFP_ATOMIC);
4278 if (!skb)
4279 return -ENOMEM;
4280
4281 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4282 memcpy(skb_put(skb, 3), hw_err, 3);
4283
4284 /* Send Hardware Error to upper stack */
4285 return hci_recv_frame(hdev, skb);
4286}
4287EXPORT_SYMBOL(hci_reset_dev);
4288
Marcel Holtmann76bca882009-11-18 00:40:39 +01004289/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004290int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004291{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004292 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004293 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004294 kfree_skb(skb);
4295 return -ENXIO;
4296 }
4297
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004298 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004299 bt_cb(skb)->incoming = 1;
4300
4301 /* Time stamp */
4302 __net_timestamp(skb);
4303
Marcel Holtmann76bca882009-11-18 00:40:39 +01004304 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004305 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004306
Marcel Holtmann76bca882009-11-18 00:40:39 +01004307 return 0;
4308}
4309EXPORT_SYMBOL(hci_recv_frame);
4310
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304311static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004312 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304313{
4314 int len = 0;
4315 int hlen = 0;
4316 int remain = count;
4317 struct sk_buff *skb;
4318 struct bt_skb_cb *scb;
4319
4320 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004321 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304322 return -EILSEQ;
4323
4324 skb = hdev->reassembly[index];
4325
4326 if (!skb) {
4327 switch (type) {
4328 case HCI_ACLDATA_PKT:
4329 len = HCI_MAX_FRAME_SIZE;
4330 hlen = HCI_ACL_HDR_SIZE;
4331 break;
4332 case HCI_EVENT_PKT:
4333 len = HCI_MAX_EVENT_SIZE;
4334 hlen = HCI_EVENT_HDR_SIZE;
4335 break;
4336 case HCI_SCODATA_PKT:
4337 len = HCI_MAX_SCO_SIZE;
4338 hlen = HCI_SCO_HDR_SIZE;
4339 break;
4340 }
4341
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004342 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304343 if (!skb)
4344 return -ENOMEM;
4345
4346 scb = (void *) skb->cb;
4347 scb->expect = hlen;
4348 scb->pkt_type = type;
4349
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304350 hdev->reassembly[index] = skb;
4351 }
4352
4353 while (count) {
4354 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004355 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304356
4357 memcpy(skb_put(skb, len), data, len);
4358
4359 count -= len;
4360 data += len;
4361 scb->expect -= len;
4362 remain = count;
4363
4364 switch (type) {
4365 case HCI_EVENT_PKT:
4366 if (skb->len == HCI_EVENT_HDR_SIZE) {
4367 struct hci_event_hdr *h = hci_event_hdr(skb);
4368 scb->expect = h->plen;
4369
4370 if (skb_tailroom(skb) < scb->expect) {
4371 kfree_skb(skb);
4372 hdev->reassembly[index] = NULL;
4373 return -ENOMEM;
4374 }
4375 }
4376 break;
4377
4378 case HCI_ACLDATA_PKT:
4379 if (skb->len == HCI_ACL_HDR_SIZE) {
4380 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4381 scb->expect = __le16_to_cpu(h->dlen);
4382
4383 if (skb_tailroom(skb) < scb->expect) {
4384 kfree_skb(skb);
4385 hdev->reassembly[index] = NULL;
4386 return -ENOMEM;
4387 }
4388 }
4389 break;
4390
4391 case HCI_SCODATA_PKT:
4392 if (skb->len == HCI_SCO_HDR_SIZE) {
4393 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4394 scb->expect = h->dlen;
4395
4396 if (skb_tailroom(skb) < scb->expect) {
4397 kfree_skb(skb);
4398 hdev->reassembly[index] = NULL;
4399 return -ENOMEM;
4400 }
4401 }
4402 break;
4403 }
4404
4405 if (scb->expect == 0) {
4406 /* Complete frame */
4407
4408 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004409 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304410
4411 hdev->reassembly[index] = NULL;
4412 return remain;
4413 }
4414 }
4415
4416 return remain;
4417}
4418
Suraj Sumangala99811512010-07-14 13:02:19 +05304419#define STREAM_REASSEMBLY 0
4420
4421int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4422{
4423 int type;
4424 int rem = 0;
4425
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004426 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304427 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4428
4429 if (!skb) {
4430 struct { char type; } *pkt;
4431
4432 /* Start of the frame */
4433 pkt = data;
4434 type = pkt->type;
4435
4436 data++;
4437 count--;
4438 } else
4439 type = bt_cb(skb)->pkt_type;
4440
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004441 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004442 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304443 if (rem < 0)
4444 return rem;
4445
4446 data += (count - rem);
4447 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004448 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304449
4450 return rem;
4451}
4452EXPORT_SYMBOL(hci_recv_stream_fragment);
4453
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454/* ---- Interface to upper protocols ---- */
4455
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456int hci_register_cb(struct hci_cb *cb)
4457{
4458 BT_DBG("%p name %s", cb, cb->name);
4459
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004460 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004462 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
4464 return 0;
4465}
4466EXPORT_SYMBOL(hci_register_cb);
4467
4468int hci_unregister_cb(struct hci_cb *cb)
4469{
4470 BT_DBG("%p name %s", cb, cb->name);
4471
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004472 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004474 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475
4476 return 0;
4477}
4478EXPORT_SYMBOL(hci_unregister_cb);
4479
Marcel Holtmann51086992013-10-10 14:54:19 -07004480static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004482 int err;
4483
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004484 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004486 /* Time stamp */
4487 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004489 /* Send copy to monitor */
4490 hci_send_to_monitor(hdev, skb);
4491
4492 if (atomic_read(&hdev->promisc)) {
4493 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004494 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 }
4496
4497 /* Get rid of skb owner, prior to sending to the driver. */
4498 skb_orphan(skb);
4499
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004500 err = hdev->send(hdev, skb);
4501 if (err < 0) {
4502 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4503 kfree_skb(skb);
4504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505}
4506
Johan Hedberg3119ae92013-03-05 20:37:44 +02004507void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4508{
4509 skb_queue_head_init(&req->cmd_q);
4510 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004511 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004512}
4513
4514int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4515{
4516 struct hci_dev *hdev = req->hdev;
4517 struct sk_buff *skb;
4518 unsigned long flags;
4519
4520 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4521
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004522 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004523 * commands queued on the HCI request queue.
4524 */
4525 if (req->err) {
4526 skb_queue_purge(&req->cmd_q);
4527 return req->err;
4528 }
4529
Johan Hedberg3119ae92013-03-05 20:37:44 +02004530 /* Do not allow empty requests */
4531 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004532 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004533
4534 skb = skb_peek_tail(&req->cmd_q);
4535 bt_cb(skb)->req.complete = complete;
4536
4537 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4538 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4539 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4540
4541 queue_work(hdev->workqueue, &hdev->cmd_work);
4542
4543 return 0;
4544}
4545
Marcel Holtmann899de762014-07-11 05:51:58 +02004546bool hci_req_pending(struct hci_dev *hdev)
4547{
4548 return (hdev->req_status == HCI_REQ_PEND);
4549}
4550
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004551static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004552 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553{
4554 int len = HCI_COMMAND_HDR_SIZE + plen;
4555 struct hci_command_hdr *hdr;
4556 struct sk_buff *skb;
4557
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004559 if (!skb)
4560 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561
4562 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004563 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 hdr->plen = plen;
4565
4566 if (plen)
4567 memcpy(skb_put(skb, plen), param, plen);
4568
4569 BT_DBG("skb len %d", skb->len);
4570
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004571 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004572 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004573
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004574 return skb;
4575}
4576
4577/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004578int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4579 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004580{
4581 struct sk_buff *skb;
4582
4583 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4584
4585 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4586 if (!skb) {
4587 BT_ERR("%s no memory for command", hdev->name);
4588 return -ENOMEM;
4589 }
4590
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004591 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004592 * single-command requests.
4593 */
4594 bt_cb(skb)->req.start = true;
4595
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004597 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598
4599 return 0;
4600}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601
Johan Hedberg71c76a12013-03-05 20:37:46 +02004602/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004603void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4604 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004605{
4606 struct hci_dev *hdev = req->hdev;
4607 struct sk_buff *skb;
4608
4609 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4610
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004611 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004612 * queueing the HCI command. We can simply return.
4613 */
4614 if (req->err)
4615 return;
4616
Johan Hedberg71c76a12013-03-05 20:37:46 +02004617 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4618 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004619 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4620 hdev->name, opcode);
4621 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004622 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004623 }
4624
4625 if (skb_queue_empty(&req->cmd_q))
4626 bt_cb(skb)->req.start = true;
4627
Johan Hedberg02350a72013-04-03 21:50:29 +03004628 bt_cb(skb)->req.event = event;
4629
Johan Hedberg71c76a12013-03-05 20:37:46 +02004630 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004631}
4632
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004633void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4634 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004635{
4636 hci_req_add_ev(req, opcode, plen, param, 0);
4637}
4638
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004640void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641{
4642 struct hci_command_hdr *hdr;
4643
4644 if (!hdev->sent_cmd)
4645 return NULL;
4646
4647 hdr = (void *) hdev->sent_cmd->data;
4648
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004649 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 return NULL;
4651
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004652 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653
4654 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4655}
4656
4657/* Send ACL data */
4658static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4659{
4660 struct hci_acl_hdr *hdr;
4661 int len = skb->len;
4662
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004663 skb_push(skb, HCI_ACL_HDR_SIZE);
4664 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004665 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004666 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4667 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668}
4669
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004670static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004671 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004673 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674 struct hci_dev *hdev = conn->hdev;
4675 struct sk_buff *list;
4676
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004677 skb->len = skb_headlen(skb);
4678 skb->data_len = 0;
4679
4680 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004681
4682 switch (hdev->dev_type) {
4683 case HCI_BREDR:
4684 hci_add_acl_hdr(skb, conn->handle, flags);
4685 break;
4686 case HCI_AMP:
4687 hci_add_acl_hdr(skb, chan->handle, flags);
4688 break;
4689 default:
4690 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4691 return;
4692 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004693
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004694 list = skb_shinfo(skb)->frag_list;
4695 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 /* Non fragmented */
4697 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4698
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004699 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 } else {
4701 /* Fragmented */
4702 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4703
4704 skb_shinfo(skb)->frag_list = NULL;
4705
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004706 /* Queue all fragments atomically. We need to use spin_lock_bh
4707 * here because of 6LoWPAN links, as there this function is
4708 * called from softirq and using normal spin lock could cause
4709 * deadlocks.
4710 */
4711 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004713 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004714
4715 flags &= ~ACL_START;
4716 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 do {
4718 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004719
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004720 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004721 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722
4723 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4724
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004725 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 } while (list);
4727
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004728 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004730}
4731
4732void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4733{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004734 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004735
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004736 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004737
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004738 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004740 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742
4743/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004744void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745{
4746 struct hci_dev *hdev = conn->hdev;
4747 struct hci_sco_hdr hdr;
4748
4749 BT_DBG("%s len %d", hdev->name, skb->len);
4750
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004751 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 hdr.dlen = skb->len;
4753
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004754 skb_push(skb, HCI_SCO_HDR_SIZE);
4755 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004756 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004758 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004759
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004761 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763
4764/* ---- HCI TX task (outgoing data) ---- */
4765
4766/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004767static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4768 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769{
4770 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004771 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004772 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004774 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004776
4777 rcu_read_lock();
4778
4779 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004780 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004782
4783 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4784 continue;
4785
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 num++;
4787
4788 if (c->sent < min) {
4789 min = c->sent;
4790 conn = c;
4791 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004792
4793 if (hci_conn_num(hdev, type) == num)
4794 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 }
4796
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004797 rcu_read_unlock();
4798
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004800 int cnt, q;
4801
4802 switch (conn->type) {
4803 case ACL_LINK:
4804 cnt = hdev->acl_cnt;
4805 break;
4806 case SCO_LINK:
4807 case ESCO_LINK:
4808 cnt = hdev->sco_cnt;
4809 break;
4810 case LE_LINK:
4811 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4812 break;
4813 default:
4814 cnt = 0;
4815 BT_ERR("Unknown link type");
4816 }
4817
4818 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819 *quote = q ? q : 1;
4820 } else
4821 *quote = 0;
4822
4823 BT_DBG("conn %p quote %d", conn, *quote);
4824 return conn;
4825}
4826
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004827static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828{
4829 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004830 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831
Ville Tervobae1f5d92011-02-10 22:38:53 -03004832 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004834 rcu_read_lock();
4835
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004837 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004838 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004839 BT_ERR("%s killing stalled connection %pMR",
4840 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004841 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842 }
4843 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004844
4845 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846}
4847
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004848static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4849 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004850{
4851 struct hci_conn_hash *h = &hdev->conn_hash;
4852 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004853 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004854 struct hci_conn *conn;
4855 int cnt, q, conn_num = 0;
4856
4857 BT_DBG("%s", hdev->name);
4858
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004859 rcu_read_lock();
4860
4861 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004862 struct hci_chan *tmp;
4863
4864 if (conn->type != type)
4865 continue;
4866
4867 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4868 continue;
4869
4870 conn_num++;
4871
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004872 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004873 struct sk_buff *skb;
4874
4875 if (skb_queue_empty(&tmp->data_q))
4876 continue;
4877
4878 skb = skb_peek(&tmp->data_q);
4879 if (skb->priority < cur_prio)
4880 continue;
4881
4882 if (skb->priority > cur_prio) {
4883 num = 0;
4884 min = ~0;
4885 cur_prio = skb->priority;
4886 }
4887
4888 num++;
4889
4890 if (conn->sent < min) {
4891 min = conn->sent;
4892 chan = tmp;
4893 }
4894 }
4895
4896 if (hci_conn_num(hdev, type) == conn_num)
4897 break;
4898 }
4899
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004900 rcu_read_unlock();
4901
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004902 if (!chan)
4903 return NULL;
4904
4905 switch (chan->conn->type) {
4906 case ACL_LINK:
4907 cnt = hdev->acl_cnt;
4908 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004909 case AMP_LINK:
4910 cnt = hdev->block_cnt;
4911 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004912 case SCO_LINK:
4913 case ESCO_LINK:
4914 cnt = hdev->sco_cnt;
4915 break;
4916 case LE_LINK:
4917 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4918 break;
4919 default:
4920 cnt = 0;
4921 BT_ERR("Unknown link type");
4922 }
4923
4924 q = cnt / num;
4925 *quote = q ? q : 1;
4926 BT_DBG("chan %p quote %d", chan, *quote);
4927 return chan;
4928}
4929
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004930static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4931{
4932 struct hci_conn_hash *h = &hdev->conn_hash;
4933 struct hci_conn *conn;
4934 int num = 0;
4935
4936 BT_DBG("%s", hdev->name);
4937
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004938 rcu_read_lock();
4939
4940 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004941 struct hci_chan *chan;
4942
4943 if (conn->type != type)
4944 continue;
4945
4946 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4947 continue;
4948
4949 num++;
4950
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004951 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004952 struct sk_buff *skb;
4953
4954 if (chan->sent) {
4955 chan->sent = 0;
4956 continue;
4957 }
4958
4959 if (skb_queue_empty(&chan->data_q))
4960 continue;
4961
4962 skb = skb_peek(&chan->data_q);
4963 if (skb->priority >= HCI_PRIO_MAX - 1)
4964 continue;
4965
4966 skb->priority = HCI_PRIO_MAX - 1;
4967
4968 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004969 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004970 }
4971
4972 if (hci_conn_num(hdev, type) == num)
4973 break;
4974 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004975
4976 rcu_read_unlock();
4977
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004978}
4979
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004980static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4981{
4982 /* Calculate count of blocks used by this packet */
4983 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4984}
4985
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004986static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004988 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 /* ACL tx timeout must be longer than maximum
4990 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004991 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004992 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004993 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004995}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004997static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004998{
4999 unsigned int cnt = hdev->acl_cnt;
5000 struct hci_chan *chan;
5001 struct sk_buff *skb;
5002 int quote;
5003
5004 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005005
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005006 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005007 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005008 u32 priority = (skb_peek(&chan->data_q))->priority;
5009 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005010 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005011 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005012
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005013 /* Stop if priority has changed */
5014 if (skb->priority < priority)
5015 break;
5016
5017 skb = skb_dequeue(&chan->data_q);
5018
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005019 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005020 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005021
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005022 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 hdev->acl_last_tx = jiffies;
5024
5025 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005026 chan->sent++;
5027 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 }
5029 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005030
5031 if (cnt != hdev->acl_cnt)
5032 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033}
5034
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005035static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005036{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005037 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005038 struct hci_chan *chan;
5039 struct sk_buff *skb;
5040 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005041 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005042
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005043 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005044
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005045 BT_DBG("%s", hdev->name);
5046
5047 if (hdev->dev_type == HCI_AMP)
5048 type = AMP_LINK;
5049 else
5050 type = ACL_LINK;
5051
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005052 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005053 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005054 u32 priority = (skb_peek(&chan->data_q))->priority;
5055 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5056 int blocks;
5057
5058 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005059 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005060
5061 /* Stop if priority has changed */
5062 if (skb->priority < priority)
5063 break;
5064
5065 skb = skb_dequeue(&chan->data_q);
5066
5067 blocks = __get_blocks(hdev, skb);
5068 if (blocks > hdev->block_cnt)
5069 return;
5070
5071 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005072 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005073
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005074 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005075 hdev->acl_last_tx = jiffies;
5076
5077 hdev->block_cnt -= blocks;
5078 quote -= blocks;
5079
5080 chan->sent += blocks;
5081 chan->conn->sent += blocks;
5082 }
5083 }
5084
5085 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005086 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005087}
5088
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005089static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005090{
5091 BT_DBG("%s", hdev->name);
5092
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005093 /* No ACL link over BR/EDR controller */
5094 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5095 return;
5096
5097 /* No AMP link over AMP controller */
5098 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005099 return;
5100
5101 switch (hdev->flow_ctl_mode) {
5102 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5103 hci_sched_acl_pkt(hdev);
5104 break;
5105
5106 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5107 hci_sched_acl_blk(hdev);
5108 break;
5109 }
5110}
5111
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005113static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114{
5115 struct hci_conn *conn;
5116 struct sk_buff *skb;
5117 int quote;
5118
5119 BT_DBG("%s", hdev->name);
5120
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005121 if (!hci_conn_num(hdev, SCO_LINK))
5122 return;
5123
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5125 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5126 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005127 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128
5129 conn->sent++;
5130 if (conn->sent == ~0)
5131 conn->sent = 0;
5132 }
5133 }
5134}
5135
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005136static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005137{
5138 struct hci_conn *conn;
5139 struct sk_buff *skb;
5140 int quote;
5141
5142 BT_DBG("%s", hdev->name);
5143
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005144 if (!hci_conn_num(hdev, ESCO_LINK))
5145 return;
5146
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005147 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5148 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005149 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5150 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005151 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005152
5153 conn->sent++;
5154 if (conn->sent == ~0)
5155 conn->sent = 0;
5156 }
5157 }
5158}
5159
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005160static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005161{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005162 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005163 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005164 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005165
5166 BT_DBG("%s", hdev->name);
5167
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005168 if (!hci_conn_num(hdev, LE_LINK))
5169 return;
5170
Marcel Holtmann4a964402014-07-02 19:10:33 +02005171 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005172 /* LE tx timeout must be longer than maximum
5173 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005174 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005175 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005176 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005177 }
5178
5179 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005180 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005181 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005182 u32 priority = (skb_peek(&chan->data_q))->priority;
5183 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005184 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005185 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005186
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005187 /* Stop if priority has changed */
5188 if (skb->priority < priority)
5189 break;
5190
5191 skb = skb_dequeue(&chan->data_q);
5192
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005193 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005194 hdev->le_last_tx = jiffies;
5195
5196 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005197 chan->sent++;
5198 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005199 }
5200 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005201
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005202 if (hdev->le_pkts)
5203 hdev->le_cnt = cnt;
5204 else
5205 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005206
5207 if (cnt != tmp)
5208 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005209}
5210
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005211static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005213 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 struct sk_buff *skb;
5215
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005216 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005217 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218
Marcel Holtmann52de5992013-09-03 18:08:38 -07005219 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5220 /* Schedule queues and send stuff to HCI driver */
5221 hci_sched_acl(hdev);
5222 hci_sched_sco(hdev);
5223 hci_sched_esco(hdev);
5224 hci_sched_le(hdev);
5225 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005226
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 /* Send next queued raw (unknown type) packet */
5228 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005229 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230}
5231
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005232/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233
5234/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005235static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236{
5237 struct hci_acl_hdr *hdr = (void *) skb->data;
5238 struct hci_conn *conn;
5239 __u16 handle, flags;
5240
5241 skb_pull(skb, HCI_ACL_HDR_SIZE);
5242
5243 handle = __le16_to_cpu(hdr->handle);
5244 flags = hci_flags(handle);
5245 handle = hci_handle(handle);
5246
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005247 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005248 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249
5250 hdev->stat.acl_rx++;
5251
5252 hci_dev_lock(hdev);
5253 conn = hci_conn_hash_lookup_handle(hdev, handle);
5254 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005255
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005257 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005258
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005260 l2cap_recv_acldata(conn, skb, flags);
5261 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005263 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005264 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265 }
5266
5267 kfree_skb(skb);
5268}
5269
5270/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005271static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272{
5273 struct hci_sco_hdr *hdr = (void *) skb->data;
5274 struct hci_conn *conn;
5275 __u16 handle;
5276
5277 skb_pull(skb, HCI_SCO_HDR_SIZE);
5278
5279 handle = __le16_to_cpu(hdr->handle);
5280
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005281 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282
5283 hdev->stat.sco_rx++;
5284
5285 hci_dev_lock(hdev);
5286 conn = hci_conn_hash_lookup_handle(hdev, handle);
5287 hci_dev_unlock(hdev);
5288
5289 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005291 sco_recv_scodata(conn, skb);
5292 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005294 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005295 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296 }
5297
5298 kfree_skb(skb);
5299}
5300
Johan Hedberg9238f362013-03-05 20:37:48 +02005301static bool hci_req_is_complete(struct hci_dev *hdev)
5302{
5303 struct sk_buff *skb;
5304
5305 skb = skb_peek(&hdev->cmd_q);
5306 if (!skb)
5307 return true;
5308
5309 return bt_cb(skb)->req.start;
5310}
5311
Johan Hedberg42c6b122013-03-05 20:37:49 +02005312static void hci_resend_last(struct hci_dev *hdev)
5313{
5314 struct hci_command_hdr *sent;
5315 struct sk_buff *skb;
5316 u16 opcode;
5317
5318 if (!hdev->sent_cmd)
5319 return;
5320
5321 sent = (void *) hdev->sent_cmd->data;
5322 opcode = __le16_to_cpu(sent->opcode);
5323 if (opcode == HCI_OP_RESET)
5324 return;
5325
5326 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5327 if (!skb)
5328 return;
5329
5330 skb_queue_head(&hdev->cmd_q, skb);
5331 queue_work(hdev->workqueue, &hdev->cmd_work);
5332}
5333
Johan Hedberg9238f362013-03-05 20:37:48 +02005334void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5335{
5336 hci_req_complete_t req_complete = NULL;
5337 struct sk_buff *skb;
5338 unsigned long flags;
5339
5340 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5341
Johan Hedberg42c6b122013-03-05 20:37:49 +02005342 /* If the completed command doesn't match the last one that was
5343 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005344 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005345 if (!hci_sent_cmd_data(hdev, opcode)) {
5346 /* Some CSR based controllers generate a spontaneous
5347 * reset complete event during init and any pending
5348 * command will never be completed. In such a case we
5349 * need to resend whatever was the last sent
5350 * command.
5351 */
5352 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5353 hci_resend_last(hdev);
5354
Johan Hedberg9238f362013-03-05 20:37:48 +02005355 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005356 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005357
5358 /* If the command succeeded and there's still more commands in
5359 * this request the request is not yet complete.
5360 */
5361 if (!status && !hci_req_is_complete(hdev))
5362 return;
5363
5364 /* If this was the last command in a request the complete
5365 * callback would be found in hdev->sent_cmd instead of the
5366 * command queue (hdev->cmd_q).
5367 */
5368 if (hdev->sent_cmd) {
5369 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005370
5371 if (req_complete) {
5372 /* We must set the complete callback to NULL to
5373 * avoid calling the callback more than once if
5374 * this function gets called again.
5375 */
5376 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5377
Johan Hedberg9238f362013-03-05 20:37:48 +02005378 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005379 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005380 }
5381
5382 /* Remove all pending commands belonging to this request */
5383 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5384 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5385 if (bt_cb(skb)->req.start) {
5386 __skb_queue_head(&hdev->cmd_q, skb);
5387 break;
5388 }
5389
5390 req_complete = bt_cb(skb)->req.complete;
5391 kfree_skb(skb);
5392 }
5393 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5394
5395call_complete:
5396 if (req_complete)
5397 req_complete(hdev, status);
5398}
5399
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005400static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005402 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 struct sk_buff *skb;
5404
5405 BT_DBG("%s", hdev->name);
5406
Linus Torvalds1da177e2005-04-16 15:20:36 -07005407 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005408 /* Send copy to monitor */
5409 hci_send_to_monitor(hdev, skb);
5410
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411 if (atomic_read(&hdev->promisc)) {
5412 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005413 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 }
5415
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005416 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417 kfree_skb(skb);
5418 continue;
5419 }
5420
5421 if (test_bit(HCI_INIT, &hdev->flags)) {
5422 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005423 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 case HCI_ACLDATA_PKT:
5425 case HCI_SCODATA_PKT:
5426 kfree_skb(skb);
5427 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 }
5430
5431 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005432 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005434 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 hci_event_packet(hdev, skb);
5436 break;
5437
5438 case HCI_ACLDATA_PKT:
5439 BT_DBG("%s ACL data packet", hdev->name);
5440 hci_acldata_packet(hdev, skb);
5441 break;
5442
5443 case HCI_SCODATA_PKT:
5444 BT_DBG("%s SCO data packet", hdev->name);
5445 hci_scodata_packet(hdev, skb);
5446 break;
5447
5448 default:
5449 kfree_skb(skb);
5450 break;
5451 }
5452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453}
5454
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005455static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005457 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 struct sk_buff *skb;
5459
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005460 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5461 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005464 if (atomic_read(&hdev->cmd_cnt)) {
5465 skb = skb_dequeue(&hdev->cmd_q);
5466 if (!skb)
5467 return;
5468
Wei Yongjun7585b972009-02-25 18:29:52 +08005469 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005471 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005472 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005474 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005475 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005476 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005477 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005478 schedule_delayed_work(&hdev->cmd_timer,
5479 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480 } else {
5481 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005482 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005483 }
5484 }
5485}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005486
5487void hci_req_add_le_scan_disable(struct hci_request *req)
5488{
5489 struct hci_cp_le_set_scan_enable cp;
5490
5491 memset(&cp, 0, sizeof(cp));
5492 cp.enable = LE_SCAN_DISABLE;
5493 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5494}
Andre Guedesa4790db2014-02-26 20:21:47 -03005495
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005496static void add_to_white_list(struct hci_request *req,
5497 struct hci_conn_params *params)
5498{
5499 struct hci_cp_le_add_to_white_list cp;
5500
5501 cp.bdaddr_type = params->addr_type;
5502 bacpy(&cp.bdaddr, &params->addr);
5503
5504 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5505}
5506
5507static u8 update_white_list(struct hci_request *req)
5508{
5509 struct hci_dev *hdev = req->hdev;
5510 struct hci_conn_params *params;
5511 struct bdaddr_list *b;
5512 uint8_t white_list_entries = 0;
5513
5514 /* Go through the current white list programmed into the
5515 * controller one by one and check if that address is still
5516 * in the list of pending connections or list of devices to
5517 * report. If not present in either list, then queue the
5518 * command to remove it from the controller.
5519 */
5520 list_for_each_entry(b, &hdev->le_white_list, list) {
5521 struct hci_cp_le_del_from_white_list cp;
5522
5523 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5524 &b->bdaddr, b->bdaddr_type) ||
5525 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5526 &b->bdaddr, b->bdaddr_type)) {
5527 white_list_entries++;
5528 continue;
5529 }
5530
5531 cp.bdaddr_type = b->bdaddr_type;
5532 bacpy(&cp.bdaddr, &b->bdaddr);
5533
5534 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5535 sizeof(cp), &cp);
5536 }
5537
5538 /* Since all no longer valid white list entries have been
5539 * removed, walk through the list of pending connections
5540 * and ensure that any new device gets programmed into
5541 * the controller.
5542 *
5543 * If the list of the devices is larger than the list of
5544 * available white list entries in the controller, then
5545 * just abort and return filer policy value to not use the
5546 * white list.
5547 */
5548 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5549 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5550 &params->addr, params->addr_type))
5551 continue;
5552
5553 if (white_list_entries >= hdev->le_white_list_size) {
5554 /* Select filter policy to accept all advertising */
5555 return 0x00;
5556 }
5557
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005558 if (hci_find_irk_by_addr(hdev, &params->addr,
5559 params->addr_type)) {
5560 /* White list can not be used with RPAs */
5561 return 0x00;
5562 }
5563
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005564 white_list_entries++;
5565 add_to_white_list(req, params);
5566 }
5567
5568 /* After adding all new pending connections, walk through
5569 * the list of pending reports and also add these to the
5570 * white list if there is still space.
5571 */
5572 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5573 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5574 &params->addr, params->addr_type))
5575 continue;
5576
5577 if (white_list_entries >= hdev->le_white_list_size) {
5578 /* Select filter policy to accept all advertising */
5579 return 0x00;
5580 }
5581
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005582 if (hci_find_irk_by_addr(hdev, &params->addr,
5583 params->addr_type)) {
5584 /* White list can not be used with RPAs */
5585 return 0x00;
5586 }
5587
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005588 white_list_entries++;
5589 add_to_white_list(req, params);
5590 }
5591
5592 /* Select filter policy to use white list */
5593 return 0x01;
5594}
5595
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005596void hci_req_add_le_passive_scan(struct hci_request *req)
5597{
5598 struct hci_cp_le_set_scan_param param_cp;
5599 struct hci_cp_le_set_scan_enable enable_cp;
5600 struct hci_dev *hdev = req->hdev;
5601 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005602 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005603
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005604 /* Set require_privacy to false since no SCAN_REQ are send
5605 * during passive scanning. Not using an unresolvable address
5606 * here is important so that peer devices using direct
5607 * advertising with our address will be correctly reported
5608 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005609 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005610 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005611 return;
5612
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005613 /* Adding or removing entries from the white list must
5614 * happen before enabling scanning. The controller does
5615 * not allow white list modification while scanning.
5616 */
5617 filter_policy = update_white_list(req);
5618
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005619 memset(&param_cp, 0, sizeof(param_cp));
5620 param_cp.type = LE_SCAN_PASSIVE;
5621 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5622 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5623 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005624 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005625 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5626 &param_cp);
5627
5628 memset(&enable_cp, 0, sizeof(enable_cp));
5629 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005630 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005631 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5632 &enable_cp);
5633}
5634
Andre Guedesa4790db2014-02-26 20:21:47 -03005635static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5636{
5637 if (status)
5638 BT_DBG("HCI request failed to update background scanning: "
5639 "status 0x%2.2x", status);
5640}
5641
5642/* This function controls the background scanning based on hdev->pend_le_conns
5643 * list. If there are pending LE connection we start the background scanning,
5644 * otherwise we stop it.
5645 *
5646 * This function requires the caller holds hdev->lock.
5647 */
5648void hci_update_background_scan(struct hci_dev *hdev)
5649{
Andre Guedesa4790db2014-02-26 20:21:47 -03005650 struct hci_request req;
5651 struct hci_conn *conn;
5652 int err;
5653
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005654 if (!test_bit(HCI_UP, &hdev->flags) ||
5655 test_bit(HCI_INIT, &hdev->flags) ||
5656 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005657 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005658 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005659 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005660 return;
5661
Johan Hedberga70f4b52014-07-07 15:19:50 +03005662 /* No point in doing scanning if LE support hasn't been enabled */
5663 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5664 return;
5665
Johan Hedbergae23ada2014-07-07 13:24:59 +03005666 /* If discovery is active don't interfere with it */
5667 if (hdev->discovery.state != DISCOVERY_STOPPED)
5668 return;
5669
Andre Guedesa4790db2014-02-26 20:21:47 -03005670 hci_req_init(&req, hdev);
5671
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005672 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005673 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005674 /* If there is no pending LE connections or devices
5675 * to be scanned for, we should stop the background
5676 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005677 */
5678
5679 /* If controller is not scanning we are done. */
5680 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5681 return;
5682
5683 hci_req_add_le_scan_disable(&req);
5684
5685 BT_DBG("%s stopping background scanning", hdev->name);
5686 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005687 /* If there is at least one pending LE connection, we should
5688 * keep the background scan running.
5689 */
5690
Andre Guedesa4790db2014-02-26 20:21:47 -03005691 /* If controller is connecting, we should not start scanning
5692 * since some controllers are not able to scan and connect at
5693 * the same time.
5694 */
5695 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5696 if (conn)
5697 return;
5698
Andre Guedes4340a122014-03-10 18:26:24 -03005699 /* If controller is currently scanning, we stop it to ensure we
5700 * don't miss any advertising (due to duplicates filter).
5701 */
5702 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5703 hci_req_add_le_scan_disable(&req);
5704
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005705 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005706
5707 BT_DBG("%s starting background scanning", hdev->name);
5708 }
5709
5710 err = hci_req_run(&req, update_background_scan_complete);
5711 if (err)
5712 BT_ERR("Failed to run HCI request: err %d", err);
5713}
Johan Hedberg432df052014-08-01 11:13:31 +03005714
Johan Hedberg22f433d2014-08-01 11:13:32 +03005715static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5716{
5717 struct bdaddr_list *b;
5718
5719 list_for_each_entry(b, &hdev->whitelist, list) {
5720 struct hci_conn *conn;
5721
5722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5723 if (!conn)
5724 return true;
5725
5726 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5727 return true;
5728 }
5729
5730 return false;
5731}
5732
Johan Hedberg432df052014-08-01 11:13:31 +03005733void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5734{
5735 u8 scan;
5736
5737 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5738 return;
5739
5740 if (!hdev_is_powered(hdev))
5741 return;
5742
5743 if (mgmt_powering_down(hdev))
5744 return;
5745
5746 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005747 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005748 scan = SCAN_PAGE;
5749 else
5750 scan = SCAN_DISABLED;
5751
5752 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5753 return;
5754
5755 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5756 scan |= SCAN_INQUIRY;
5757
5758 if (req)
5759 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5760 else
5761 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5762}