blob: da8969e08b0cf44865f5ee26b152e208f54ac9c5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Johan Hedberg858cdc72014-10-16 10:45:31 +0200409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700478 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700537 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700565 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
Marcel Holtmannac345812014-02-23 12:44:25 -0800641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200644 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800650
Johan Hedberga1f4c312014-02-27 14:05:41 +0200651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800652 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700720{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700747 return -EINVAL;
748
Marcel Holtmann111902f2014-06-21 04:53:17 +0200749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800750 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700751
Marcel Holtmann111902f2014-06-21 04:53:17 +0200752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800753
754 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700755}
756
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
Marcel Holtmann92202182013-10-18 16:38:10 -0700763
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
Marcel Holtmann3698d702014-02-18 21:54:49 -0800789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200792 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800793
Johan Hedbergadae20c2014-11-13 14:37:48 +0200794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200800 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200821 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700822
Johan Hedberg970d0f12014-11-13 14:37:47 +0200823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200829 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700854 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700882 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
Marcel Holtmannf1649572014-06-30 12:34:38 +0200930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
Georg Lukas729a1052014-07-26 13:59:58 +0200986static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200987{
Georg Lukas729a1052014-07-26 13:59:58 +0200988 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200989
Georg Lukas729a1052014-07-26 13:59:58 +0200990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200991 return -EINVAL;
992
Andre Guedes7d474e02014-02-26 20:21:54 -0300993 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
Georg Lukas729a1052014-07-26 13:59:58 +02001000static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001001{
Georg Lukas729a1052014-07-26 13:59:58 +02001002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009}
1010
Georg Lukas729a1052014-07-26 13:59:58 +02001011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001015{
Georg Lukas729a1052014-07-26 13:59:58 +02001016 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017
Georg Lukas729a1052014-07-26 13:59:58 +02001018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return -EINVAL;
1020
Georg Lukas729a1052014-07-26 13:59:58 +02001021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001024
Georg Lukas729a1052014-07-26 13:59:58 +02001025 return 0;
1026}
Andre Guedes7d474e02014-02-26 20:21:54 -03001027
Georg Lukas729a1052014-07-26 13:59:58 +02001028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001031
Georg Lukas729a1052014-07-26 13:59:58 +02001032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035
Georg Lukas729a1052014-07-26 13:59:58 +02001036 return 0;
1037}
Andre Guedes7d474e02014-02-26 20:21:54 -03001038
Georg Lukas729a1052014-07-26 13:59:58 +02001039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001041
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001042static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001043{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001045 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001046 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001047
Andre Guedes7d474e02014-02-26 20:21:54 -03001048 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001053 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001056
Andre Guedes7d474e02014-02-26 20:21:54 -03001057 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001058}
1059
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001060static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001061{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001062 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001063}
1064
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001067 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* ---- HCI requests ---- */
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
Fengguang Wu77a63e02013-04-20 16:24:31 +03001096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
Johan Hedberg75e84b72013-04-02 13:35:04 +03001127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001152 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163
1164 hdev->req_status = HCI_REQ_PEND;
1165
Johan Hedberg75e84b72013-04-02 13:35:04 +03001166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
Chan-yeol Park039fada2014-10-31 14:23:06 +09001169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001172 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001173 return ERR_PTR(err);
1174 }
1175
Johan Hedberg75e84b72013-04-02 13:35:04 +03001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001209 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_init(&req, hdev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hdev->req_status = HCI_REQ_PEND;
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001232
Chan-yeol Park039fada2014-10-31 14:23:06 +09001233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
Chan-yeol Park039fada2014-10-31 14:23:06 +09001240 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001241 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001242
Andre Guedes920c8302013-03-08 11:20:15 -03001243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 */
Andre Guedes920c8302013-03-08 11:20:15 -03001248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001252 }
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
1390 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402
1403 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001404 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001406
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001414}
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001418 struct hci_dev *hdev = req->hdev;
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
1423 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001434
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438}
1439
1440static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441{
1442 if (lmp_ext_inq_capable(hdev))
1443 return 0x02;
1444
1445 if (lmp_inq_rssi_capable(hdev))
1446 return 0x01;
1447
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1450 return 0x01;
1451
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454 return 0x01;
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456 return 0x01;
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458 return 0x01;
1459 }
1460
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1463 return 0x01;
1464
1465 return 0x00;
1466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
1470 u8 mode;
1471
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475}
1476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 struct hci_dev *hdev = req->hdev;
1480
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1484 */
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1489 */
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001499 } else {
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001509
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552}
1553
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 struct hci_dev *hdev = req->hdev;
1557
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001560 else
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1568 */
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571
1572 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1578 */
1579 hdev->max_page = 0x01;
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 } else {
1586 struct hci_cp_write_eir cp;
1587
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1590
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001592 }
1593 }
1594
1595 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597
1598 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001600
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1603
1604 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607 }
1608
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613 }
1614}
1615
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001617{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1621
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1630
1631 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633}
1634
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001636{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001638 struct hci_cp_write_le_host_supported cp;
1639
Johan Hedbergc73eee92013-04-19 18:35:21 +03001640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1642 return;
1643
Johan Hedberg2177bab2013-03-05 20:37:43 +02001644 memset(&cp, 0, sizeof(cp));
1645
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001648 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001649 }
1650
1651 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001654}
1655
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656static void hci_set_event_mask_page_2(struct hci_request *req)
1657{
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1663 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001664 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1669 }
1670
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1673 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001674 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1679 }
1680
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001681 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001683 events[2] |= 0x80;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001689{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001690 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001691 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001692
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001693 hci_setup_event_mask(req);
1694
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1698 *
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001703 *
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001707 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001710 struct hci_cp_delete_stored_link_key cp;
1711
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715 sizeof(cp), &cp);
1716 }
1717
Johan Hedberg2177bab2013-03-05 20:37:43 +02001718 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001720
Andre Guedes9193c6e2014-07-01 18:10:09 -03001721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
Marcel Holtmann4b71bba2014-12-05 16:20:12 +01001738 /* If the controller supports Extended Scanner Filter
1739 * Policies, enable the correspondig event.
1740 */
1741 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742 events[1] |= 0x04; /* LE Direct Advertising
1743 * Report
1744 */
1745
Andre Guedes9193c6e2014-07-01 18:10:09 -03001746 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1747 events);
1748
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001749 if (hdev->commands[25] & 0x40) {
1750 /* Read LE Advertising Channel TX Power */
1751 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1752 }
1753
Johan Hedberg42c6b122013-03-05 20:37:49 +02001754 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001755 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001756
1757 /* Read features beyond page 1 if available */
1758 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1759 struct hci_cp_read_local_ext_features cp;
1760
1761 cp.page = p;
1762 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1763 sizeof(cp), &cp);
1764 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001765}
1766
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001767static void hci_init4_req(struct hci_request *req, unsigned long opt)
1768{
1769 struct hci_dev *hdev = req->hdev;
1770
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001771 /* Set event mask page 2 if the HCI command for it is supported */
1772 if (hdev->commands[22] & 0x04)
1773 hci_set_event_mask_page_2(req);
1774
Marcel Holtmann109e3192014-07-23 19:24:56 +02001775 /* Read local codec list if the HCI command is supported */
1776 if (hdev->commands[29] & 0x20)
1777 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1778
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001779 /* Get MWS transport configuration if the HCI command is supported */
1780 if (hdev->commands[30] & 0x08)
1781 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1782
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001783 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001784 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001785 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001786
1787 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001788 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001789 u8 support = 0x01;
1790 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1791 sizeof(support), &support);
1792 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001793}
1794
Johan Hedberg2177bab2013-03-05 20:37:43 +02001795static int __hci_init(struct hci_dev *hdev)
1796{
1797 int err;
1798
1799 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1800 if (err < 0)
1801 return err;
1802
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001803 /* The Device Under Test (DUT) mode is special and available for
1804 * all controller types. So just create it early on.
1805 */
1806 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1807 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1808 &dut_mode_fops);
1809 }
1810
Johan Hedberg2177bab2013-03-05 20:37:43 +02001811 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1812 * BR/EDR/LE type controllers. AMP controllers only need the
1813 * first stage init.
1814 */
1815 if (hdev->dev_type != HCI_BREDR)
1816 return 0;
1817
1818 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1819 if (err < 0)
1820 return err;
1821
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001822 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1823 if (err < 0)
1824 return err;
1825
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001826 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1827 if (err < 0)
1828 return err;
1829
1830 /* Only create debugfs entries during the initial setup
1831 * phase and not every time the controller gets powered on.
1832 */
1833 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1834 return 0;
1835
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001836 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1837 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001838 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1839 &hdev->manufacturer);
1840 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1841 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001842 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1843 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001844 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1845 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001846 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1847
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001848 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1849 &conn_info_min_age_fops);
1850 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1851 &conn_info_max_age_fops);
1852
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001853 if (lmp_bredr_capable(hdev)) {
1854 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1855 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001856 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1857 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001858 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1859 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001860 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1861 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001862 }
1863
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001864 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001865 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1866 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001867 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1868 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001869 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1870 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001871 if (lmp_le_capable(hdev))
1872 debugfs_create_file("force_lesc_support", 0644,
1873 hdev->debugfs, hdev,
1874 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001875 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001876
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001877 if (lmp_sniff_capable(hdev)) {
1878 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1879 hdev, &idle_timeout_fops);
1880 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1881 hdev, &sniff_min_interval_fops);
1882 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1883 hdev, &sniff_max_interval_fops);
1884 }
1885
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001886 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001887 debugfs_create_file("identity", 0400, hdev->debugfs,
1888 hdev, &identity_fops);
1889 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1890 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001891 debugfs_create_file("random_address", 0444, hdev->debugfs,
1892 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001893 debugfs_create_file("static_address", 0444, hdev->debugfs,
1894 hdev, &static_address_fops);
1895
1896 /* For controllers with a public address, provide a debug
1897 * option to force the usage of the configured static
1898 * address. By default the public address is used.
1899 */
1900 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1901 debugfs_create_file("force_static_address", 0644,
1902 hdev->debugfs, hdev,
1903 &force_static_address_fops);
1904
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001905 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1906 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001907 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1908 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001909 debugfs_create_file("identity_resolving_keys", 0400,
1910 hdev->debugfs, hdev,
1911 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001912 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1913 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001914 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1915 hdev, &conn_min_interval_fops);
1916 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1917 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001918 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1919 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001920 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1921 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001922 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1923 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001924 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1925 hdev, &adv_min_interval_fops);
1926 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1927 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001928 debugfs_create_u16("discov_interleaved_timeout", 0644,
1929 hdev->debugfs,
1930 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001931
Johan Hedberg711eafe2014-08-08 09:32:52 +03001932 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001933 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001934
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001935 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001936}
1937
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001938static void hci_init0_req(struct hci_request *req, unsigned long opt)
1939{
1940 struct hci_dev *hdev = req->hdev;
1941
1942 BT_DBG("%s %ld", hdev->name, opt);
1943
1944 /* Reset */
1945 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1946 hci_reset_req(req, 0);
1947
1948 /* Read Local Version */
1949 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1950
1951 /* Read BD Address */
1952 if (hdev->set_bdaddr)
1953 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1954}
1955
1956static int __hci_unconf_init(struct hci_dev *hdev)
1957{
1958 int err;
1959
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001960 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1961 return 0;
1962
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001963 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1964 if (err < 0)
1965 return err;
1966
1967 return 0;
1968}
1969
Johan Hedberg42c6b122013-03-05 20:37:49 +02001970static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971{
1972 __u8 scan = opt;
1973
Johan Hedberg42c6b122013-03-05 20:37:49 +02001974 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001977 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978}
1979
Johan Hedberg42c6b122013-03-05 20:37:49 +02001980static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981{
1982 __u8 auth = opt;
1983
Johan Hedberg42c6b122013-03-05 20:37:49 +02001984 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001987 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988}
1989
Johan Hedberg42c6b122013-03-05 20:37:49 +02001990static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
1992 __u8 encrypt = opt;
1993
Johan Hedberg42c6b122013-03-05 20:37:49 +02001994 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001996 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001997 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998}
1999
Johan Hedberg42c6b122013-03-05 20:37:49 +02002000static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002001{
2002 __le16 policy = cpu_to_le16(opt);
2003
Johan Hedberg42c6b122013-03-05 20:37:49 +02002004 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002005
2006 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002007 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002008}
2009
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002010/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 * Device is held on return. */
2012struct hci_dev *hci_dev_get(int index)
2013{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002014 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
2016 BT_DBG("%d", index);
2017
2018 if (index < 0)
2019 return NULL;
2020
2021 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002022 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 if (d->id == index) {
2024 hdev = hci_dev_hold(d);
2025 break;
2026 }
2027 }
2028 read_unlock(&hci_dev_list_lock);
2029 return hdev;
2030}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
2032/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002033
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002034bool hci_discovery_active(struct hci_dev *hdev)
2035{
2036 struct discovery_state *discov = &hdev->discovery;
2037
Andre Guedes6fbe1952012-02-03 17:47:58 -03002038 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002039 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002040 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002041 return true;
2042
Andre Guedes6fbe1952012-02-03 17:47:58 -03002043 default:
2044 return false;
2045 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002046}
2047
Johan Hedbergff9ef572012-01-04 14:23:45 +02002048void hci_discovery_set_state(struct hci_dev *hdev, int state)
2049{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002050 int old_state = hdev->discovery.state;
2051
Johan Hedbergff9ef572012-01-04 14:23:45 +02002052 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2053
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002054 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002055 return;
2056
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002057 hdev->discovery.state = state;
2058
Johan Hedbergff9ef572012-01-04 14:23:45 +02002059 switch (state) {
2060 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002061 hci_update_background_scan(hdev);
2062
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002063 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002064 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002065 break;
2066 case DISCOVERY_STARTING:
2067 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002068 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002069 mgmt_discovering(hdev, 1);
2070 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002071 case DISCOVERY_RESOLVING:
2072 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002073 case DISCOVERY_STOPPING:
2074 break;
2075 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002076}
2077
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002078void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079{
Johan Hedberg30883512012-01-04 14:16:21 +02002080 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002081 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Johan Hedberg561aafb2012-01-04 13:31:59 +02002083 list_for_each_entry_safe(p, n, &cache->all, all) {
2084 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002085 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002087
2088 INIT_LIST_HEAD(&cache->unknown);
2089 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090}
2091
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002092struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2093 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Johan Hedberg30883512012-01-04 14:16:21 +02002095 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 struct inquiry_entry *e;
2097
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002098 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
Johan Hedberg561aafb2012-01-04 13:31:59 +02002100 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002102 return e;
2103 }
2104
2105 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106}
2107
Johan Hedberg561aafb2012-01-04 13:31:59 +02002108struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002109 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002110{
Johan Hedberg30883512012-01-04 14:16:21 +02002111 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002112 struct inquiry_entry *e;
2113
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002114 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002115
2116 list_for_each_entry(e, &cache->unknown, list) {
2117 if (!bacmp(&e->data.bdaddr, bdaddr))
2118 return e;
2119 }
2120
2121 return NULL;
2122}
2123
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002124struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002125 bdaddr_t *bdaddr,
2126 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002127{
2128 struct discovery_state *cache = &hdev->discovery;
2129 struct inquiry_entry *e;
2130
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002131 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002132
2133 list_for_each_entry(e, &cache->resolve, list) {
2134 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2135 return e;
2136 if (!bacmp(&e->data.bdaddr, bdaddr))
2137 return e;
2138 }
2139
2140 return NULL;
2141}
2142
Johan Hedberga3d4e202012-01-09 00:53:02 +02002143void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002144 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002145{
2146 struct discovery_state *cache = &hdev->discovery;
2147 struct list_head *pos = &cache->resolve;
2148 struct inquiry_entry *p;
2149
2150 list_del(&ie->list);
2151
2152 list_for_each_entry(p, &cache->resolve, list) {
2153 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002154 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002155 break;
2156 pos = &p->list;
2157 }
2158
2159 list_add(&ie->list, pos);
2160}
2161
Marcel Holtmannaf589252014-07-01 14:11:20 +02002162u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2163 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164{
Johan Hedberg30883512012-01-04 14:16:21 +02002165 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002166 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002167 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002169 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Johan Hedberg6928a922014-10-26 20:46:09 +01002171 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002172
Marcel Holtmannaf589252014-07-01 14:11:20 +02002173 if (!data->ssp_mode)
2174 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002175
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002176 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002177 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002178 if (!ie->data.ssp_mode)
2179 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002180
Johan Hedberga3d4e202012-01-09 00:53:02 +02002181 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002182 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002183 ie->data.rssi = data->rssi;
2184 hci_inquiry_cache_update_resolve(hdev, ie);
2185 }
2186
Johan Hedberg561aafb2012-01-04 13:31:59 +02002187 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002188 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002189
Johan Hedberg561aafb2012-01-04 13:31:59 +02002190 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002191 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002192 if (!ie) {
2193 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2194 goto done;
2195 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002196
2197 list_add(&ie->all, &cache->all);
2198
2199 if (name_known) {
2200 ie->name_state = NAME_KNOWN;
2201 } else {
2202 ie->name_state = NAME_NOT_KNOWN;
2203 list_add(&ie->list, &cache->unknown);
2204 }
2205
2206update:
2207 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002208 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002209 ie->name_state = NAME_KNOWN;
2210 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 }
2212
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002213 memcpy(&ie->data, data, sizeof(*data));
2214 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002216
2217 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002218 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002219
Marcel Holtmannaf589252014-07-01 14:11:20 +02002220done:
2221 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222}
2223
2224static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2225{
Johan Hedberg30883512012-01-04 14:16:21 +02002226 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 struct inquiry_info *info = (struct inquiry_info *) buf;
2228 struct inquiry_entry *e;
2229 int copied = 0;
2230
Johan Hedberg561aafb2012-01-04 13:31:59 +02002231 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002233
2234 if (copied >= num)
2235 break;
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 bacpy(&info->bdaddr, &data->bdaddr);
2238 info->pscan_rep_mode = data->pscan_rep_mode;
2239 info->pscan_period_mode = data->pscan_period_mode;
2240 info->pscan_mode = data->pscan_mode;
2241 memcpy(info->dev_class, data->dev_class, 3);
2242 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002245 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 }
2247
2248 BT_DBG("cache %p, copied %d", cache, copied);
2249 return copied;
2250}
2251
Johan Hedberg42c6b122013-03-05 20:37:49 +02002252static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253{
2254 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002255 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 struct hci_cp_inquiry cp;
2257
2258 BT_DBG("%s", hdev->name);
2259
2260 if (test_bit(HCI_INQUIRY, &hdev->flags))
2261 return;
2262
2263 /* Start Inquiry */
2264 memcpy(&cp.lap, &ir->lap, 3);
2265 cp.length = ir->length;
2266 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002267 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268}
2269
2270int hci_inquiry(void __user *arg)
2271{
2272 __u8 __user *ptr = arg;
2273 struct hci_inquiry_req ir;
2274 struct hci_dev *hdev;
2275 int err = 0, do_inquiry = 0, max_rsp;
2276 long timeo;
2277 __u8 *buf;
2278
2279 if (copy_from_user(&ir, ptr, sizeof(ir)))
2280 return -EFAULT;
2281
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002282 hdev = hci_dev_get(ir.dev_id);
2283 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 return -ENODEV;
2285
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002286 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2287 err = -EBUSY;
2288 goto done;
2289 }
2290
Marcel Holtmann4a964402014-07-02 19:10:33 +02002291 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002292 err = -EOPNOTSUPP;
2293 goto done;
2294 }
2295
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002296 if (hdev->dev_type != HCI_BREDR) {
2297 err = -EOPNOTSUPP;
2298 goto done;
2299 }
2300
Johan Hedberg56f87902013-10-02 13:43:13 +03002301 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2302 err = -EOPNOTSUPP;
2303 goto done;
2304 }
2305
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002306 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002307 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002308 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002309 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 do_inquiry = 1;
2311 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002312 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Marcel Holtmann04837f62006-07-03 10:02:33 +02002314 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002315
2316 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002317 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2318 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002319 if (err < 0)
2320 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002321
2322 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2323 * cleared). If it is interrupted by a signal, return -EINTR.
2324 */
NeilBrown74316202014-07-07 15:16:04 +10002325 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002326 TASK_INTERRUPTIBLE))
2327 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002330 /* for unlimited number of responses we will use buffer with
2331 * 255 entries
2332 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2334
2335 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2336 * copy it to the user space.
2337 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002338 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002339 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 err = -ENOMEM;
2341 goto done;
2342 }
2343
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002344 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002346 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
2348 BT_DBG("num_rsp %d", ir.num_rsp);
2349
2350 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2351 ptr += sizeof(ir);
2352 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002353 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002355 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 err = -EFAULT;
2357
2358 kfree(buf);
2359
2360done:
2361 hci_dev_put(hdev);
2362 return err;
2363}
2364
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002365static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 int ret = 0;
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 BT_DBG("%s %p", hdev->name, hdev);
2370
2371 hci_req_lock(hdev);
2372
Johan Hovold94324962012-03-15 14:48:41 +01002373 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2374 ret = -ENODEV;
2375 goto done;
2376 }
2377
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002378 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2379 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002380 /* Check for rfkill but allow the HCI setup stage to
2381 * proceed (which in itself doesn't cause any RF activity).
2382 */
2383 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2384 ret = -ERFKILL;
2385 goto done;
2386 }
2387
2388 /* Check for valid public address or a configured static
2389 * random adddress, but let the HCI setup proceed to
2390 * be able to determine if there is a public address
2391 * or not.
2392 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002393 * In case of user channel usage, it is not important
2394 * if a public address or static random address is
2395 * available.
2396 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002397 * This check is only valid for BR/EDR controllers
2398 * since AMP controllers do not have an address.
2399 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002400 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2401 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002402 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2403 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2404 ret = -EADDRNOTAVAIL;
2405 goto done;
2406 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002407 }
2408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 if (test_bit(HCI_UP, &hdev->flags)) {
2410 ret = -EALREADY;
2411 goto done;
2412 }
2413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 if (hdev->open(hdev)) {
2415 ret = -EIO;
2416 goto done;
2417 }
2418
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002419 atomic_set(&hdev->cmd_cnt, 1);
2420 set_bit(HCI_INIT, &hdev->flags);
2421
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002422 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2423 if (hdev->setup)
2424 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002425
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002426 /* The transport driver can set these quirks before
2427 * creating the HCI device or in its setup callback.
2428 *
2429 * In case any of them is set, the controller has to
2430 * start up as unconfigured.
2431 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002432 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2433 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002434 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002435
2436 /* For an unconfigured controller it is required to
2437 * read at least the version information provided by
2438 * the Read Local Version Information command.
2439 *
2440 * If the set_bdaddr driver callback is provided, then
2441 * also the original Bluetooth public device address
2442 * will be read using the Read BD Address command.
2443 */
2444 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2445 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002446 }
2447
Marcel Holtmann9713c172014-07-06 12:11:15 +02002448 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2449 /* If public address change is configured, ensure that
2450 * the address gets programmed. If the driver does not
2451 * support changing the public address, fail the power
2452 * on procedure.
2453 */
2454 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2455 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002456 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2457 else
2458 ret = -EADDRNOTAVAIL;
2459 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002460
2461 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002462 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002463 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002464 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 }
2466
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002467 clear_bit(HCI_INIT, &hdev->flags);
2468
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 if (!ret) {
2470 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002471 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 set_bit(HCI_UP, &hdev->flags);
2473 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002474 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002475 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002476 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002478 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002479 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002480 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002481 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002482 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002483 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002485 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002486 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002487 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
2489 skb_queue_purge(&hdev->cmd_q);
2490 skb_queue_purge(&hdev->rx_q);
2491
2492 if (hdev->flush)
2493 hdev->flush(hdev);
2494
2495 if (hdev->sent_cmd) {
2496 kfree_skb(hdev->sent_cmd);
2497 hdev->sent_cmd = NULL;
2498 }
2499
2500 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002501 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 }
2503
2504done:
2505 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 return ret;
2507}
2508
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002509/* ---- HCI ioctl helpers ---- */
2510
2511int hci_dev_open(__u16 dev)
2512{
2513 struct hci_dev *hdev;
2514 int err;
2515
2516 hdev = hci_dev_get(dev);
2517 if (!hdev)
2518 return -ENODEV;
2519
Marcel Holtmann4a964402014-07-02 19:10:33 +02002520 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002521 * up as user channel. Trying to bring them up as normal devices
2522 * will result into a failure. Only user channel operation is
2523 * possible.
2524 *
2525 * When this function is called for a user channel, the flag
2526 * HCI_USER_CHANNEL will be set first before attempting to
2527 * open the device.
2528 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002529 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002530 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2531 err = -EOPNOTSUPP;
2532 goto done;
2533 }
2534
Johan Hedberge1d08f42013-10-01 22:44:50 +03002535 /* We need to ensure that no other power on/off work is pending
2536 * before proceeding to call hci_dev_do_open. This is
2537 * particularly important if the setup procedure has not yet
2538 * completed.
2539 */
2540 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2541 cancel_delayed_work(&hdev->power_off);
2542
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002543 /* After this call it is guaranteed that the setup procedure
2544 * has finished. This means that error conditions like RFKILL
2545 * or no valid public or static random address apply.
2546 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002547 flush_workqueue(hdev->req_workqueue);
2548
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002549 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002550 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002551 * so that pairing works for them. Once the management interface
2552 * is in use this bit will be cleared again and userspace has
2553 * to explicitly enable it.
2554 */
2555 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2556 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002557 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002558
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002559 err = hci_dev_do_open(hdev);
2560
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002561done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002562 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002563 return err;
2564}
2565
Johan Hedbergd7347f32014-07-04 12:37:23 +03002566/* This function requires the caller holds hdev->lock */
2567static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2568{
2569 struct hci_conn_params *p;
2570
Johan Hedbergf161dd42014-08-15 21:06:54 +03002571 list_for_each_entry(p, &hdev->le_conn_params, list) {
2572 if (p->conn) {
2573 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002574 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002575 p->conn = NULL;
2576 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002577 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002578 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002579
2580 BT_DBG("All LE pending actions cleared");
2581}
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583static int hci_dev_do_close(struct hci_dev *hdev)
2584{
2585 BT_DBG("%s %p", hdev->name, hdev);
2586
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002587 cancel_delayed_work(&hdev->power_off);
2588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 hci_req_cancel(hdev, ENODEV);
2590 hci_req_lock(hdev);
2591
2592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002593 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 hci_req_unlock(hdev);
2595 return 0;
2596 }
2597
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002598 /* Flush RX and TX works */
2599 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002600 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002602 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002603 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002604 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002605 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002606 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002607 }
2608
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002609 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002610 cancel_delayed_work(&hdev->service_cache);
2611
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002612 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002613
2614 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2615 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002616
Johan Hedberg76727c02014-11-18 09:00:14 +02002617 /* Avoid potential lockdep warnings from the *_flush() calls by
2618 * ensuring the workqueue is empty up front.
2619 */
2620 drain_workqueue(hdev->workqueue);
2621
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002622 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002623 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002624 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002625 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
2628 hci_notify(hdev, HCI_DEV_DOWN);
2629
2630 if (hdev->flush)
2631 hdev->flush(hdev);
2632
2633 /* Reset device */
2634 skb_queue_purge(&hdev->cmd_q);
2635 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002636 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2637 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002638 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002640 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 clear_bit(HCI_INIT, &hdev->flags);
2642 }
2643
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002644 /* flush cmd work */
2645 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
2647 /* Drop queues */
2648 skb_queue_purge(&hdev->rx_q);
2649 skb_queue_purge(&hdev->cmd_q);
2650 skb_queue_purge(&hdev->raw_q);
2651
2652 /* Drop last sent command */
2653 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002654 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 kfree_skb(hdev->sent_cmd);
2656 hdev->sent_cmd = NULL;
2657 }
2658
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002659 kfree_skb(hdev->recv_evt);
2660 hdev->recv_evt = NULL;
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 /* After this point our queues are empty
2663 * and no tasks are scheduled. */
2664 hdev->close(hdev);
2665
Johan Hedberg35b973c2013-03-15 17:06:59 -05002666 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002667 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002668 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2669
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002670 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2671 if (hdev->dev_type == HCI_BREDR) {
2672 hci_dev_lock(hdev);
2673 mgmt_powered(hdev, 0);
2674 hci_dev_unlock(hdev);
2675 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002676 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002677
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002678 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002679 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002680
Johan Hedberge59fda82012-02-22 18:11:53 +02002681 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002682 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002683 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 hci_req_unlock(hdev);
2686
2687 hci_dev_put(hdev);
2688 return 0;
2689}
2690
2691int hci_dev_close(__u16 dev)
2692{
2693 struct hci_dev *hdev;
2694 int err;
2695
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002696 hdev = hci_dev_get(dev);
2697 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002699
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002700 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2701 err = -EBUSY;
2702 goto done;
2703 }
2704
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002705 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2706 cancel_delayed_work(&hdev->power_off);
2707
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002709
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002710done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 hci_dev_put(hdev);
2712 return err;
2713}
2714
2715int hci_dev_reset(__u16 dev)
2716{
2717 struct hci_dev *hdev;
2718 int ret = 0;
2719
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002720 hdev = hci_dev_get(dev);
2721 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 return -ENODEV;
2723
2724 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Marcel Holtmann808a0492013-08-26 20:57:58 -07002726 if (!test_bit(HCI_UP, &hdev->flags)) {
2727 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002729 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2732 ret = -EBUSY;
2733 goto done;
2734 }
2735
Marcel Holtmann4a964402014-07-02 19:10:33 +02002736 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002737 ret = -EOPNOTSUPP;
2738 goto done;
2739 }
2740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 /* Drop queues */
2742 skb_queue_purge(&hdev->rx_q);
2743 skb_queue_purge(&hdev->cmd_q);
2744
Johan Hedberg76727c02014-11-18 09:00:14 +02002745 /* Avoid potential lockdep warnings from the *_flush() calls by
2746 * ensuring the workqueue is empty up front.
2747 */
2748 drain_workqueue(hdev->workqueue);
2749
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002750 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002751 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002753 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754
2755 if (hdev->flush)
2756 hdev->flush(hdev);
2757
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002758 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002759 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002761 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
2763done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 hci_req_unlock(hdev);
2765 hci_dev_put(hdev);
2766 return ret;
2767}
2768
2769int hci_dev_reset_stat(__u16 dev)
2770{
2771 struct hci_dev *hdev;
2772 int ret = 0;
2773
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002774 hdev = hci_dev_get(dev);
2775 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 return -ENODEV;
2777
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002778 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2779 ret = -EBUSY;
2780 goto done;
2781 }
2782
Marcel Holtmann4a964402014-07-02 19:10:33 +02002783 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002784 ret = -EOPNOTSUPP;
2785 goto done;
2786 }
2787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2789
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002790done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 return ret;
2793}
2794
Johan Hedberg123abc02014-07-10 12:09:07 +03002795static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2796{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002797 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002798
2799 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2800
2801 if ((scan & SCAN_PAGE))
2802 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2803 &hdev->dev_flags);
2804 else
2805 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2806 &hdev->dev_flags);
2807
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002808 if ((scan & SCAN_INQUIRY)) {
2809 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2810 &hdev->dev_flags);
2811 } else {
2812 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2813 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2814 &hdev->dev_flags);
2815 }
2816
Johan Hedberg123abc02014-07-10 12:09:07 +03002817 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2818 return;
2819
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002820 if (conn_changed || discov_changed) {
2821 /* In case this was disabled through mgmt */
2822 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2823
2824 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2825 mgmt_update_adv_data(hdev);
2826
Johan Hedberg123abc02014-07-10 12:09:07 +03002827 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002828 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002829}
2830
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831int hci_dev_cmd(unsigned int cmd, void __user *arg)
2832{
2833 struct hci_dev *hdev;
2834 struct hci_dev_req dr;
2835 int err = 0;
2836
2837 if (copy_from_user(&dr, arg, sizeof(dr)))
2838 return -EFAULT;
2839
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002840 hdev = hci_dev_get(dr.dev_id);
2841 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 return -ENODEV;
2843
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002844 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2845 err = -EBUSY;
2846 goto done;
2847 }
2848
Marcel Holtmann4a964402014-07-02 19:10:33 +02002849 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002850 err = -EOPNOTSUPP;
2851 goto done;
2852 }
2853
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002854 if (hdev->dev_type != HCI_BREDR) {
2855 err = -EOPNOTSUPP;
2856 goto done;
2857 }
2858
Johan Hedberg56f87902013-10-02 13:43:13 +03002859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2860 err = -EOPNOTSUPP;
2861 goto done;
2862 }
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 switch (cmd) {
2865 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002866 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2867 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 break;
2869
2870 case HCISETENCRYPT:
2871 if (!lmp_encrypt_capable(hdev)) {
2872 err = -EOPNOTSUPP;
2873 break;
2874 }
2875
2876 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2877 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002878 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2879 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 if (err)
2881 break;
2882 }
2883
Johan Hedberg01178cd2013-03-05 20:37:41 +02002884 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2885 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 break;
2887
2888 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002889 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2890 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002891
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002892 /* Ensure that the connectable and discoverable states
2893 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002894 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002895 if (!err)
2896 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 break;
2898
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002899 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002900 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2901 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002902 break;
2903
2904 case HCISETLINKMODE:
2905 hdev->link_mode = ((__u16) dr.dev_opt) &
2906 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2907 break;
2908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 case HCISETPTYPE:
2910 hdev->pkt_type = (__u16) dr.dev_opt;
2911 break;
2912
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002914 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2915 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 break;
2917
2918 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002919 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2920 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 break;
2922
2923 default:
2924 err = -EINVAL;
2925 break;
2926 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002927
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002928done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 hci_dev_put(hdev);
2930 return err;
2931}
2932
2933int hci_get_dev_list(void __user *arg)
2934{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002935 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 struct hci_dev_list_req *dl;
2937 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 int n = 0, size, err;
2939 __u16 dev_num;
2940
2941 if (get_user(dev_num, (__u16 __user *) arg))
2942 return -EFAULT;
2943
2944 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2945 return -EINVAL;
2946
2947 size = sizeof(*dl) + dev_num * sizeof(*dr);
2948
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002949 dl = kzalloc(size, GFP_KERNEL);
2950 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 return -ENOMEM;
2952
2953 dr = dl->dev_req;
2954
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002955 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002956 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002957 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002958
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002959 /* When the auto-off is configured it means the transport
2960 * is running, but in that case still indicate that the
2961 * device is actually down.
2962 */
2963 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2964 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002965
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002967 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002968
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 if (++n >= dev_num)
2970 break;
2971 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002972 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
2974 dl->dev_num = n;
2975 size = sizeof(*dl) + n * sizeof(*dr);
2976
2977 err = copy_to_user(arg, dl, size);
2978 kfree(dl);
2979
2980 return err ? -EFAULT : 0;
2981}
2982
2983int hci_get_dev_info(void __user *arg)
2984{
2985 struct hci_dev *hdev;
2986 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002987 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 int err = 0;
2989
2990 if (copy_from_user(&di, arg, sizeof(di)))
2991 return -EFAULT;
2992
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002993 hdev = hci_dev_get(di.dev_id);
2994 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 return -ENODEV;
2996
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002997 /* When the auto-off is configured it means the transport
2998 * is running, but in that case still indicate that the
2999 * device is actually down.
3000 */
3001 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3002 flags = hdev->flags & ~BIT(HCI_UP);
3003 else
3004 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003005
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 strcpy(di.name, hdev->name);
3007 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003008 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003009 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003011 if (lmp_bredr_capable(hdev)) {
3012 di.acl_mtu = hdev->acl_mtu;
3013 di.acl_pkts = hdev->acl_pkts;
3014 di.sco_mtu = hdev->sco_mtu;
3015 di.sco_pkts = hdev->sco_pkts;
3016 } else {
3017 di.acl_mtu = hdev->le_mtu;
3018 di.acl_pkts = hdev->le_pkts;
3019 di.sco_mtu = 0;
3020 di.sco_pkts = 0;
3021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 di.link_policy = hdev->link_policy;
3023 di.link_mode = hdev->link_mode;
3024
3025 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3026 memcpy(&di.features, &hdev->features, sizeof(di.features));
3027
3028 if (copy_to_user(arg, &di, sizeof(di)))
3029 err = -EFAULT;
3030
3031 hci_dev_put(hdev);
3032
3033 return err;
3034}
3035
3036/* ---- Interface to HCI drivers ---- */
3037
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003038static int hci_rfkill_set_block(void *data, bool blocked)
3039{
3040 struct hci_dev *hdev = data;
3041
3042 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3043
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003044 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3045 return -EBUSY;
3046
Johan Hedberg5e130362013-09-13 08:58:17 +03003047 if (blocked) {
3048 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003049 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3050 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003051 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003052 } else {
3053 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003054 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003055
3056 return 0;
3057}
3058
3059static const struct rfkill_ops hci_rfkill_ops = {
3060 .set_block = hci_rfkill_set_block,
3061};
3062
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003063static void hci_power_on(struct work_struct *work)
3064{
3065 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003066 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003067
3068 BT_DBG("%s", hdev->name);
3069
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003070 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003071 if (err < 0) {
3072 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003073 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003074 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003075
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003076 /* During the HCI setup phase, a few error conditions are
3077 * ignored and they need to be checked now. If they are still
3078 * valid, it is important to turn the device back off.
3079 */
3080 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003081 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003082 (hdev->dev_type == HCI_BREDR &&
3083 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3084 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003085 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3086 hci_dev_do_close(hdev);
3087 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003088 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3089 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003090 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003091
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003092 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003093 /* For unconfigured devices, set the HCI_RAW flag
3094 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003095 */
3096 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3097 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003098
3099 /* For fully configured devices, this will send
3100 * the Index Added event. For unconfigured devices,
3101 * it will send Unconfigued Index Added event.
3102 *
3103 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3104 * and no event will be send.
3105 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003106 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003107 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003108 /* When the controller is now configured, then it
3109 * is important to clear the HCI_RAW flag.
3110 */
3111 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3112 clear_bit(HCI_RAW, &hdev->flags);
3113
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003114 /* Powering on the controller with HCI_CONFIG set only
3115 * happens with the transition from unconfigured to
3116 * configured. This will send the Index Added event.
3117 */
3118 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003119 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003120}
3121
3122static void hci_power_off(struct work_struct *work)
3123{
Johan Hedberg32435532011-11-07 22:16:04 +02003124 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003125 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003126
3127 BT_DBG("%s", hdev->name);
3128
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003129 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003130}
3131
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003132static void hci_discov_off(struct work_struct *work)
3133{
3134 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003135
3136 hdev = container_of(work, struct hci_dev, discov_off.work);
3137
3138 BT_DBG("%s", hdev->name);
3139
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003140 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003141}
3142
Johan Hedberg35f74982014-02-18 17:14:32 +02003143void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003144{
Johan Hedberg48210022013-01-27 00:31:28 +02003145 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003146
Johan Hedberg48210022013-01-27 00:31:28 +02003147 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3148 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003149 kfree(uuid);
3150 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003151}
3152
Johan Hedberg35f74982014-02-18 17:14:32 +02003153void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003154{
Johan Hedberg0378b592014-11-19 15:22:22 +02003155 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003156
Johan Hedberg0378b592014-11-19 15:22:22 +02003157 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3158 list_del_rcu(&key->list);
3159 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003160 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003161}
3162
Johan Hedberg35f74982014-02-18 17:14:32 +02003163void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003164{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003165 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003166
Johan Hedberg970d0f12014-11-13 14:37:47 +02003167 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3168 list_del_rcu(&k->list);
3169 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003170 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003171}
3172
Johan Hedberg970c4e42014-02-18 10:19:33 +02003173void hci_smp_irks_clear(struct hci_dev *hdev)
3174{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003175 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003176
Johan Hedbergadae20c2014-11-13 14:37:48 +02003177 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3178 list_del_rcu(&k->list);
3179 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003180 }
3181}
3182
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003183struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3184{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003185 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003186
Johan Hedberg0378b592014-11-19 15:22:22 +02003187 rcu_read_lock();
3188 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3189 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3190 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003191 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003192 }
3193 }
3194 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003195
3196 return NULL;
3197}
3198
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303199static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003200 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003201{
3202 /* Legacy key */
3203 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303204 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003205
3206 /* Debug keys are insecure so don't store them persistently */
3207 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303208 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003209
3210 /* Changed combination key and there's no previous one */
3211 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303212 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003213
3214 /* Security mode 3 case */
3215 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303216 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003217
Johan Hedberge3befab2014-06-01 16:33:39 +03003218 /* BR/EDR key derived using SC from an LE link */
3219 if (conn->type == LE_LINK)
3220 return true;
3221
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003222 /* Neither local nor remote side had no-bonding as requirement */
3223 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303224 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003225
3226 /* Local side had dedicated bonding as requirement */
3227 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303228 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003229
3230 /* Remote side had dedicated bonding as requirement */
3231 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303232 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003233
3234 /* If none of the above criteria match, then don't store the key
3235 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303236 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003237}
3238
Johan Hedberge804d252014-07-16 11:42:28 +03003239static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003240{
Johan Hedberge804d252014-07-16 11:42:28 +03003241 if (type == SMP_LTK)
3242 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003243
Johan Hedberge804d252014-07-16 11:42:28 +03003244 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003245}
3246
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003247struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3248 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003249{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003250 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003251
Johan Hedberg970d0f12014-11-13 14:37:47 +02003252 rcu_read_lock();
3253 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003254 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3255 continue;
3256
Johan Hedberg923e2412014-12-03 12:43:39 +02003257 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003258 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003259 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003260 }
3261 }
3262 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003263
3264 return NULL;
3265}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003266
Johan Hedberg970c4e42014-02-18 10:19:33 +02003267struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3268{
3269 struct smp_irk *irk;
3270
Johan Hedbergadae20c2014-11-13 14:37:48 +02003271 rcu_read_lock();
3272 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3273 if (!bacmp(&irk->rpa, rpa)) {
3274 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003275 return irk;
3276 }
3277 }
3278
Johan Hedbergadae20c2014-11-13 14:37:48 +02003279 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3280 if (smp_irk_matches(hdev, irk->val, rpa)) {
3281 bacpy(&irk->rpa, rpa);
3282 rcu_read_unlock();
3283 return irk;
3284 }
3285 }
3286 rcu_read_unlock();
3287
Johan Hedberg970c4e42014-02-18 10:19:33 +02003288 return NULL;
3289}
3290
3291struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3292 u8 addr_type)
3293{
3294 struct smp_irk *irk;
3295
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003296 /* Identity Address must be public or static random */
3297 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3298 return NULL;
3299
Johan Hedbergadae20c2014-11-13 14:37:48 +02003300 rcu_read_lock();
3301 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003302 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003303 bacmp(bdaddr, &irk->bdaddr) == 0) {
3304 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003305 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003306 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003307 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003308 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003309
3310 return NULL;
3311}
3312
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003313struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003314 bdaddr_t *bdaddr, u8 *val, u8 type,
3315 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003316{
3317 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303318 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003319
3320 old_key = hci_find_link_key(hdev, bdaddr);
3321 if (old_key) {
3322 old_key_type = old_key->type;
3323 key = old_key;
3324 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003325 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003326 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003327 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003328 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003329 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003330 }
3331
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003332 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003333
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003334 /* Some buggy controller combinations generate a changed
3335 * combination key for legacy pairing even when there's no
3336 * previous key */
3337 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003338 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003339 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003340 if (conn)
3341 conn->key_type = type;
3342 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003343
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003344 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003345 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003346 key->pin_len = pin_len;
3347
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003348 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003349 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003350 else
3351 key->type = type;
3352
Johan Hedberg7652ff62014-06-24 13:15:49 +03003353 if (persistent)
3354 *persistent = hci_persistent_key(hdev, conn, type,
3355 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003356
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003357 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003358}
3359
Johan Hedbergca9142b2014-02-19 14:57:44 +02003360struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003361 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003362 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003363{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003364 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003365 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003366
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003367 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003368 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003369 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003370 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003371 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003372 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003373 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003374 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003375 }
3376
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003377 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003378 key->bdaddr_type = addr_type;
3379 memcpy(key->val, tk, sizeof(key->val));
3380 key->authenticated = authenticated;
3381 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003382 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003383 key->enc_size = enc_size;
3384 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003385
Johan Hedbergca9142b2014-02-19 14:57:44 +02003386 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003387}
3388
Johan Hedbergca9142b2014-02-19 14:57:44 +02003389struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3390 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003391{
3392 struct smp_irk *irk;
3393
3394 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3395 if (!irk) {
3396 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3397 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003398 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003399
3400 bacpy(&irk->bdaddr, bdaddr);
3401 irk->addr_type = addr_type;
3402
Johan Hedbergadae20c2014-11-13 14:37:48 +02003403 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003404 }
3405
3406 memcpy(irk->val, val, 16);
3407 bacpy(&irk->rpa, rpa);
3408
Johan Hedbergca9142b2014-02-19 14:57:44 +02003409 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003410}
3411
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003412int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3413{
3414 struct link_key *key;
3415
3416 key = hci_find_link_key(hdev, bdaddr);
3417 if (!key)
3418 return -ENOENT;
3419
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003420 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003421
Johan Hedberg0378b592014-11-19 15:22:22 +02003422 list_del_rcu(&key->list);
3423 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003424
3425 return 0;
3426}
3427
Johan Hedberge0b2b272014-02-18 17:14:31 +02003428int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003429{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003430 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003431 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003432
Johan Hedberg970d0f12014-11-13 14:37:47 +02003433 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003434 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003435 continue;
3436
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003437 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003438
Johan Hedberg970d0f12014-11-13 14:37:47 +02003439 list_del_rcu(&k->list);
3440 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003441 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003442 }
3443
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003444 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003445}
3446
Johan Hedberga7ec7332014-02-18 17:14:35 +02003447void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3448{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003449 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003450
Johan Hedbergadae20c2014-11-13 14:37:48 +02003451 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003452 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3453 continue;
3454
3455 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3456
Johan Hedbergadae20c2014-11-13 14:37:48 +02003457 list_del_rcu(&k->list);
3458 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003459 }
3460}
3461
Ville Tervo6bd32322011-02-16 16:32:41 +02003462/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003463static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003464{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003465 struct hci_dev *hdev = container_of(work, struct hci_dev,
3466 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003467
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003468 if (hdev->sent_cmd) {
3469 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3470 u16 opcode = __le16_to_cpu(sent->opcode);
3471
3472 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3473 } else {
3474 BT_ERR("%s command tx timeout", hdev->name);
3475 }
3476
Ville Tervo6bd32322011-02-16 16:32:41 +02003477 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003478 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003479}
3480
Szymon Janc2763eda2011-03-22 13:12:22 +01003481struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003482 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003483{
3484 struct oob_data *data;
3485
Johan Hedberg6928a922014-10-26 20:46:09 +01003486 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3487 if (bacmp(bdaddr, &data->bdaddr) != 0)
3488 continue;
3489 if (data->bdaddr_type != bdaddr_type)
3490 continue;
3491 return data;
3492 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003493
3494 return NULL;
3495}
3496
Johan Hedberg6928a922014-10-26 20:46:09 +01003497int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3498 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003499{
3500 struct oob_data *data;
3501
Johan Hedberg6928a922014-10-26 20:46:09 +01003502 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003503 if (!data)
3504 return -ENOENT;
3505
Johan Hedberg6928a922014-10-26 20:46:09 +01003506 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003507
3508 list_del(&data->list);
3509 kfree(data);
3510
3511 return 0;
3512}
3513
Johan Hedberg35f74982014-02-18 17:14:32 +02003514void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003515{
3516 struct oob_data *data, *n;
3517
3518 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3519 list_del(&data->list);
3520 kfree(data);
3521 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003522}
3523
Marcel Holtmann07988722014-01-10 02:07:29 -08003524int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003525 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003526 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003527{
3528 struct oob_data *data;
3529
Johan Hedberg6928a922014-10-26 20:46:09 +01003530 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003531 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003532 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003533 if (!data)
3534 return -ENOMEM;
3535
3536 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003537 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003538 list_add(&data->list, &hdev->remote_oob_data);
3539 }
3540
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003541 if (hash192 && rand192) {
3542 memcpy(data->hash192, hash192, sizeof(data->hash192));
3543 memcpy(data->rand192, rand192, sizeof(data->rand192));
3544 } else {
3545 memset(data->hash192, 0, sizeof(data->hash192));
3546 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003547 }
3548
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003549 if (hash256 && rand256) {
3550 memcpy(data->hash256, hash256, sizeof(data->hash256));
3551 memcpy(data->rand256, rand256, sizeof(data->rand256));
3552 } else {
3553 memset(data->hash256, 0, sizeof(data->hash256));
3554 memset(data->rand256, 0, sizeof(data->rand256));
3555 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003556
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003557 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003558
3559 return 0;
3560}
3561
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003562struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003563 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003564{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003565 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003566
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003567 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003568 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003569 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003570 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003571
3572 return NULL;
3573}
3574
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003575void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003576{
3577 struct list_head *p, *n;
3578
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003579 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003580 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003581
3582 list_del(p);
3583 kfree(b);
3584 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003585}
3586
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003587int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003588{
3589 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003590
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003591 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003592 return -EBADF;
3593
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003594 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003595 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003596
Johan Hedberg27f70f32014-07-21 10:50:06 +03003597 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003598 if (!entry)
3599 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003600
3601 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003602 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003603
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003604 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003605
3606 return 0;
3607}
3608
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003609int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003610{
3611 struct bdaddr_list *entry;
3612
Johan Hedberg35f74982014-02-18 17:14:32 +02003613 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003614 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003615 return 0;
3616 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003617
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003618 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003619 if (!entry)
3620 return -ENOENT;
3621
3622 list_del(&entry->list);
3623 kfree(entry);
3624
3625 return 0;
3626}
3627
Andre Guedes15819a72014-02-03 13:56:18 -03003628/* This function requires the caller holds hdev->lock */
3629struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3630 bdaddr_t *addr, u8 addr_type)
3631{
3632 struct hci_conn_params *params;
3633
Johan Hedberg738f6182014-07-03 19:33:51 +03003634 /* The conn params list only contains identity addresses */
3635 if (!hci_is_identity_address(addr, addr_type))
3636 return NULL;
3637
Andre Guedes15819a72014-02-03 13:56:18 -03003638 list_for_each_entry(params, &hdev->le_conn_params, list) {
3639 if (bacmp(&params->addr, addr) == 0 &&
3640 params->addr_type == addr_type) {
3641 return params;
3642 }
3643 }
3644
3645 return NULL;
3646}
3647
Andre Guedescef952c2014-02-26 20:21:49 -03003648static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3649{
3650 struct hci_conn *conn;
3651
3652 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3653 if (!conn)
3654 return false;
3655
3656 if (conn->dst_type != type)
3657 return false;
3658
3659 if (conn->state != BT_CONNECTED)
3660 return false;
3661
3662 return true;
3663}
3664
Andre Guedes15819a72014-02-03 13:56:18 -03003665/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003666struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3667 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003668{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003669 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003670
Johan Hedberg738f6182014-07-03 19:33:51 +03003671 /* The list only contains identity addresses */
3672 if (!hci_is_identity_address(addr, addr_type))
3673 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003674
Johan Hedberg501f8822014-07-04 12:37:26 +03003675 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003676 if (bacmp(&param->addr, addr) == 0 &&
3677 param->addr_type == addr_type)
3678 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003679 }
3680
3681 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003682}
3683
3684/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003685struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3686 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003687{
3688 struct hci_conn_params *params;
3689
Johan Hedbergc46245b2014-07-02 17:37:33 +03003690 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003691 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003692
Andre Guedes15819a72014-02-03 13:56:18 -03003693 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003694 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003695 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003696
3697 params = kzalloc(sizeof(*params), GFP_KERNEL);
3698 if (!params) {
3699 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003700 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003701 }
3702
3703 bacpy(&params->addr, addr);
3704 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003705
3706 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003707 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003708
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003709 params->conn_min_interval = hdev->le_conn_min_interval;
3710 params->conn_max_interval = hdev->le_conn_max_interval;
3711 params->conn_latency = hdev->le_conn_latency;
3712 params->supervision_timeout = hdev->le_supv_timeout;
3713 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3714
3715 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3716
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003717 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003718}
3719
3720/* This function requires the caller holds hdev->lock */
3721int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003722 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003723{
3724 struct hci_conn_params *params;
3725
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003726 params = hci_conn_params_add(hdev, addr, addr_type);
3727 if (!params)
3728 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003729
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003730 if (params->auto_connect == auto_connect)
3731 return 0;
3732
Johan Hedberg95305ba2014-07-04 12:37:21 +03003733 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003734
Andre Guedescef952c2014-02-26 20:21:49 -03003735 switch (auto_connect) {
3736 case HCI_AUTO_CONN_DISABLED:
3737 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003738 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003739 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003740 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003741 list_add(&params->action, &hdev->pend_le_reports);
3742 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003743 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003744 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003745 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003746 if (!is_connected(hdev, addr, addr_type)) {
3747 list_add(&params->action, &hdev->pend_le_conns);
3748 hci_update_background_scan(hdev);
3749 }
Andre Guedescef952c2014-02-26 20:21:49 -03003750 break;
3751 }
Andre Guedes15819a72014-02-03 13:56:18 -03003752
Johan Hedberg851efca2014-07-02 22:42:00 +03003753 params->auto_connect = auto_connect;
3754
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003755 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3756 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003757
3758 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003759}
3760
Johan Hedbergf6c63242014-08-15 21:06:59 +03003761static void hci_conn_params_free(struct hci_conn_params *params)
3762{
3763 if (params->conn) {
3764 hci_conn_drop(params->conn);
3765 hci_conn_put(params->conn);
3766 }
3767
3768 list_del(&params->action);
3769 list_del(&params->list);
3770 kfree(params);
3771}
3772
Andre Guedes15819a72014-02-03 13:56:18 -03003773/* This function requires the caller holds hdev->lock */
3774void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3775{
3776 struct hci_conn_params *params;
3777
3778 params = hci_conn_params_lookup(hdev, addr, addr_type);
3779 if (!params)
3780 return;
3781
Johan Hedbergf6c63242014-08-15 21:06:59 +03003782 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003783
Johan Hedberg95305ba2014-07-04 12:37:21 +03003784 hci_update_background_scan(hdev);
3785
Andre Guedes15819a72014-02-03 13:56:18 -03003786 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3787}
3788
3789/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003790void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003791{
3792 struct hci_conn_params *params, *tmp;
3793
3794 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003795 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3796 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003797 list_del(&params->list);
3798 kfree(params);
3799 }
3800
Johan Hedberg55af49a82014-07-02 17:37:26 +03003801 BT_DBG("All LE disabled connection parameters were removed");
3802}
3803
3804/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003805void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003806{
3807 struct hci_conn_params *params, *tmp;
3808
Johan Hedbergf6c63242014-08-15 21:06:59 +03003809 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3810 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003811
Johan Hedberga2f41a82014-07-04 12:37:19 +03003812 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003813
Andre Guedes15819a72014-02-03 13:56:18 -03003814 BT_DBG("All LE connection parameters were removed");
3815}
3816
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003817static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003818{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003819 if (status) {
3820 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003821
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003822 hci_dev_lock(hdev);
3823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3824 hci_dev_unlock(hdev);
3825 return;
3826 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003827}
3828
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003829static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003830{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003831 /* General inquiry access code (GIAC) */
3832 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3833 struct hci_request req;
3834 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003835 int err;
3836
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003837 if (status) {
3838 BT_ERR("Failed to disable LE scanning: status %d", status);
3839 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003840 }
3841
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003842 switch (hdev->discovery.type) {
3843 case DISCOV_TYPE_LE:
3844 hci_dev_lock(hdev);
3845 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3846 hci_dev_unlock(hdev);
3847 break;
3848
3849 case DISCOV_TYPE_INTERLEAVED:
3850 hci_req_init(&req, hdev);
3851
3852 memset(&cp, 0, sizeof(cp));
3853 memcpy(&cp.lap, lap, sizeof(cp.lap));
3854 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3855 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3856
3857 hci_dev_lock(hdev);
3858
3859 hci_inquiry_cache_flush(hdev);
3860
3861 err = hci_req_run(&req, inquiry_complete);
3862 if (err) {
3863 BT_ERR("Inquiry request failed: err %d", err);
3864 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3865 }
3866
3867 hci_dev_unlock(hdev);
3868 break;
3869 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003870}
3871
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003872static void le_scan_disable_work(struct work_struct *work)
3873{
3874 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003875 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003876 struct hci_request req;
3877 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003878
3879 BT_DBG("%s", hdev->name);
3880
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003881 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003882
Andre Guedesb1efcc22014-02-26 20:21:40 -03003883 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003884
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003885 err = hci_req_run(&req, le_scan_disable_work_complete);
3886 if (err)
3887 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003888}
3889
Johan Hedberg8d972502014-02-28 12:54:14 +02003890static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3891{
3892 struct hci_dev *hdev = req->hdev;
3893
3894 /* If we're advertising or initiating an LE connection we can't
3895 * go ahead and change the random address at this time. This is
3896 * because the eventual initiator address used for the
3897 * subsequently created connection will be undefined (some
3898 * controllers use the new address and others the one we had
3899 * when the operation started).
3900 *
3901 * In this kind of scenario skip the update and let the random
3902 * address be updated at the next cycle.
3903 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003904 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003905 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3906 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003907 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003908 return;
3909 }
3910
3911 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3912}
3913
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003914int hci_update_random_address(struct hci_request *req, bool require_privacy,
3915 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003916{
3917 struct hci_dev *hdev = req->hdev;
3918 int err;
3919
3920 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003921 * current RPA has expired or there is something else than
3922 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003923 */
3924 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003925 int to;
3926
3927 *own_addr_type = ADDR_LE_DEV_RANDOM;
3928
3929 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003930 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003931 return 0;
3932
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003933 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003934 if (err < 0) {
3935 BT_ERR("%s failed to generate new RPA", hdev->name);
3936 return err;
3937 }
3938
Johan Hedberg8d972502014-02-28 12:54:14 +02003939 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003940
3941 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3942 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3943
3944 return 0;
3945 }
3946
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003947 /* In case of required privacy without resolvable private address,
3948 * use an unresolvable private address. This is useful for active
3949 * scanning and non-connectable advertising.
3950 */
3951 if (require_privacy) {
3952 bdaddr_t urpa;
3953
3954 get_random_bytes(&urpa, 6);
3955 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3956
3957 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003958 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003959 return 0;
3960 }
3961
Johan Hedbergebd3a742014-02-23 19:42:21 +02003962 /* If forcing static address is in use or there is no public
3963 * address use the static address as random address (but skip
3964 * the HCI command if the current random address is already the
3965 * static one.
3966 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003967 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003968 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3969 *own_addr_type = ADDR_LE_DEV_RANDOM;
3970 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3971 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3972 &hdev->static_addr);
3973 return 0;
3974 }
3975
3976 /* Neither privacy nor static address is being used so use a
3977 * public address.
3978 */
3979 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3980
3981 return 0;
3982}
3983
Johan Hedberga1f4c312014-02-27 14:05:41 +02003984/* Copy the Identity Address of the controller.
3985 *
3986 * If the controller has a public BD_ADDR, then by default use that one.
3987 * If this is a LE only controller without a public address, default to
3988 * the static random address.
3989 *
3990 * For debugging purposes it is possible to force controllers with a
3991 * public address to use the static random address instead.
3992 */
3993void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3994 u8 *bdaddr_type)
3995{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003996 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003997 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3998 bacpy(bdaddr, &hdev->static_addr);
3999 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4000 } else {
4001 bacpy(bdaddr, &hdev->bdaddr);
4002 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4003 }
4004}
4005
David Herrmann9be0dab2012-04-22 14:39:57 +02004006/* Alloc HCI device */
4007struct hci_dev *hci_alloc_dev(void)
4008{
4009 struct hci_dev *hdev;
4010
Johan Hedberg27f70f32014-07-21 10:50:06 +03004011 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02004012 if (!hdev)
4013 return NULL;
4014
David Herrmannb1b813d2012-04-22 14:39:58 +02004015 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4016 hdev->esco_type = (ESCO_HV1);
4017 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004018 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4019 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004020 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004021 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4022 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004023
David Herrmannb1b813d2012-04-22 14:39:58 +02004024 hdev->sniff_max_interval = 800;
4025 hdev->sniff_min_interval = 80;
4026
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004027 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004028 hdev->le_adv_min_interval = 0x0800;
4029 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004030 hdev->le_scan_interval = 0x0060;
4031 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004032 hdev->le_conn_min_interval = 0x0028;
4033 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004034 hdev->le_conn_latency = 0x0000;
4035 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004036
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004037 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004038 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004039 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4040 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004041
David Herrmannb1b813d2012-04-22 14:39:58 +02004042 mutex_init(&hdev->lock);
4043 mutex_init(&hdev->req_lock);
4044
4045 INIT_LIST_HEAD(&hdev->mgmt_pending);
4046 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004047 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004048 INIT_LIST_HEAD(&hdev->uuids);
4049 INIT_LIST_HEAD(&hdev->link_keys);
4050 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004051 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004052 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004053 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004054 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004055 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004056 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004057 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004058
4059 INIT_WORK(&hdev->rx_work, hci_rx_work);
4060 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4061 INIT_WORK(&hdev->tx_work, hci_tx_work);
4062 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004063
David Herrmannb1b813d2012-04-22 14:39:58 +02004064 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4065 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4066 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4067
David Herrmannb1b813d2012-04-22 14:39:58 +02004068 skb_queue_head_init(&hdev->rx_q);
4069 skb_queue_head_init(&hdev->cmd_q);
4070 skb_queue_head_init(&hdev->raw_q);
4071
4072 init_waitqueue_head(&hdev->req_wait_q);
4073
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004074 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004075
David Herrmannb1b813d2012-04-22 14:39:58 +02004076 hci_init_sysfs(hdev);
4077 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004078
4079 return hdev;
4080}
4081EXPORT_SYMBOL(hci_alloc_dev);
4082
4083/* Free HCI device */
4084void hci_free_dev(struct hci_dev *hdev)
4085{
David Herrmann9be0dab2012-04-22 14:39:57 +02004086 /* will free via device release */
4087 put_device(&hdev->dev);
4088}
4089EXPORT_SYMBOL(hci_free_dev);
4090
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091/* Register HCI device */
4092int hci_register_dev(struct hci_dev *hdev)
4093{
David Herrmannb1b813d2012-04-22 14:39:58 +02004094 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095
Marcel Holtmann74292d52014-07-06 15:50:27 +02004096 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097 return -EINVAL;
4098
Mat Martineau08add512011-11-02 16:18:36 -07004099 /* Do not allow HCI_AMP devices to register at index 0,
4100 * so the index can be used as the AMP controller ID.
4101 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004102 switch (hdev->dev_type) {
4103 case HCI_BREDR:
4104 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4105 break;
4106 case HCI_AMP:
4107 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4108 break;
4109 default:
4110 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004112
Sasha Levin3df92b32012-05-27 22:36:56 +02004113 if (id < 0)
4114 return id;
4115
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116 sprintf(hdev->name, "hci%d", id);
4117 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004118
4119 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4120
Kees Cookd8537542013-07-03 15:04:57 -07004121 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4122 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004123 if (!hdev->workqueue) {
4124 error = -ENOMEM;
4125 goto err;
4126 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004127
Kees Cookd8537542013-07-03 15:04:57 -07004128 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4129 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004130 if (!hdev->req_workqueue) {
4131 destroy_workqueue(hdev->workqueue);
4132 error = -ENOMEM;
4133 goto err;
4134 }
4135
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004136 if (!IS_ERR_OR_NULL(bt_debugfs))
4137 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4138
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004139 dev_set_name(&hdev->dev, "%s", hdev->name);
4140
4141 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004142 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004143 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004145 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004146 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4147 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004148 if (hdev->rfkill) {
4149 if (rfkill_register(hdev->rfkill) < 0) {
4150 rfkill_destroy(hdev->rfkill);
4151 hdev->rfkill = NULL;
4152 }
4153 }
4154
Johan Hedberg5e130362013-09-13 08:58:17 +03004155 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4156 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4157
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004158 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004159 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004160
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004161 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004162 /* Assume BR/EDR support until proven otherwise (such as
4163 * through reading supported features during init.
4164 */
4165 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4166 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004167
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004168 write_lock(&hci_dev_list_lock);
4169 list_add(&hdev->list, &hci_dev_list);
4170 write_unlock(&hci_dev_list_lock);
4171
Marcel Holtmann4a964402014-07-02 19:10:33 +02004172 /* Devices that are marked for raw-only usage are unconfigured
4173 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004174 */
4175 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004176 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004177
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004179 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180
Johan Hedberg19202572013-01-14 22:33:51 +02004181 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004184
David Herrmann33ca9542011-10-08 14:58:49 +02004185err_wqueue:
4186 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004187 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004188err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004189 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004190
David Herrmann33ca9542011-10-08 14:58:49 +02004191 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192}
4193EXPORT_SYMBOL(hci_register_dev);
4194
4195/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004196void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197{
Sasha Levin3df92b32012-05-27 22:36:56 +02004198 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004199
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004200 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Johan Hovold94324962012-03-15 14:48:41 +01004202 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4203
Sasha Levin3df92b32012-05-27 22:36:56 +02004204 id = hdev->id;
4205
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004206 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004208 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209
4210 hci_dev_do_close(hdev);
4211
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304212 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004213 kfree_skb(hdev->reassembly[i]);
4214
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004215 cancel_work_sync(&hdev->power_on);
4216
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004217 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004218 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4219 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004220 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004221 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004222 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004223 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004224
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004225 /* mgmt_index_removed should take care of emptying the
4226 * pending list */
4227 BUG_ON(!list_empty(&hdev->mgmt_pending));
4228
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 hci_notify(hdev, HCI_DEV_UNREG);
4230
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004231 if (hdev->rfkill) {
4232 rfkill_unregister(hdev->rfkill);
4233 rfkill_destroy(hdev->rfkill);
4234 }
4235
Johan Hedberg711eafe2014-08-08 09:32:52 +03004236 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004237
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004238 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004239
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004240 debugfs_remove_recursive(hdev->debugfs);
4241
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004242 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004243 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004244
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004245 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004246 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004247 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004248 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004249 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004250 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004251 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004252 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004253 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004254 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01004255 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004256 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004257
David Herrmanndc946bd2012-01-07 15:47:24 +01004258 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004259
4260 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261}
4262EXPORT_SYMBOL(hci_unregister_dev);
4263
4264/* Suspend HCI device */
4265int hci_suspend_dev(struct hci_dev *hdev)
4266{
4267 hci_notify(hdev, HCI_DEV_SUSPEND);
4268 return 0;
4269}
4270EXPORT_SYMBOL(hci_suspend_dev);
4271
4272/* Resume HCI device */
4273int hci_resume_dev(struct hci_dev *hdev)
4274{
4275 hci_notify(hdev, HCI_DEV_RESUME);
4276 return 0;
4277}
4278EXPORT_SYMBOL(hci_resume_dev);
4279
Marcel Holtmann75e05692014-11-02 08:15:38 +01004280/* Reset HCI device */
4281int hci_reset_dev(struct hci_dev *hdev)
4282{
4283 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4284 struct sk_buff *skb;
4285
4286 skb = bt_skb_alloc(3, GFP_ATOMIC);
4287 if (!skb)
4288 return -ENOMEM;
4289
4290 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4291 memcpy(skb_put(skb, 3), hw_err, 3);
4292
4293 /* Send Hardware Error to upper stack */
4294 return hci_recv_frame(hdev, skb);
4295}
4296EXPORT_SYMBOL(hci_reset_dev);
4297
Marcel Holtmann76bca882009-11-18 00:40:39 +01004298/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004299int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004300{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004301 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004302 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004303 kfree_skb(skb);
4304 return -ENXIO;
4305 }
4306
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004307 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004308 bt_cb(skb)->incoming = 1;
4309
4310 /* Time stamp */
4311 __net_timestamp(skb);
4312
Marcel Holtmann76bca882009-11-18 00:40:39 +01004313 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004314 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004315
Marcel Holtmann76bca882009-11-18 00:40:39 +01004316 return 0;
4317}
4318EXPORT_SYMBOL(hci_recv_frame);
4319
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304320static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004321 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304322{
4323 int len = 0;
4324 int hlen = 0;
4325 int remain = count;
4326 struct sk_buff *skb;
4327 struct bt_skb_cb *scb;
4328
4329 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004330 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304331 return -EILSEQ;
4332
4333 skb = hdev->reassembly[index];
4334
4335 if (!skb) {
4336 switch (type) {
4337 case HCI_ACLDATA_PKT:
4338 len = HCI_MAX_FRAME_SIZE;
4339 hlen = HCI_ACL_HDR_SIZE;
4340 break;
4341 case HCI_EVENT_PKT:
4342 len = HCI_MAX_EVENT_SIZE;
4343 hlen = HCI_EVENT_HDR_SIZE;
4344 break;
4345 case HCI_SCODATA_PKT:
4346 len = HCI_MAX_SCO_SIZE;
4347 hlen = HCI_SCO_HDR_SIZE;
4348 break;
4349 }
4350
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004351 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304352 if (!skb)
4353 return -ENOMEM;
4354
4355 scb = (void *) skb->cb;
4356 scb->expect = hlen;
4357 scb->pkt_type = type;
4358
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304359 hdev->reassembly[index] = skb;
4360 }
4361
4362 while (count) {
4363 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004364 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304365
4366 memcpy(skb_put(skb, len), data, len);
4367
4368 count -= len;
4369 data += len;
4370 scb->expect -= len;
4371 remain = count;
4372
4373 switch (type) {
4374 case HCI_EVENT_PKT:
4375 if (skb->len == HCI_EVENT_HDR_SIZE) {
4376 struct hci_event_hdr *h = hci_event_hdr(skb);
4377 scb->expect = h->plen;
4378
4379 if (skb_tailroom(skb) < scb->expect) {
4380 kfree_skb(skb);
4381 hdev->reassembly[index] = NULL;
4382 return -ENOMEM;
4383 }
4384 }
4385 break;
4386
4387 case HCI_ACLDATA_PKT:
4388 if (skb->len == HCI_ACL_HDR_SIZE) {
4389 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4390 scb->expect = __le16_to_cpu(h->dlen);
4391
4392 if (skb_tailroom(skb) < scb->expect) {
4393 kfree_skb(skb);
4394 hdev->reassembly[index] = NULL;
4395 return -ENOMEM;
4396 }
4397 }
4398 break;
4399
4400 case HCI_SCODATA_PKT:
4401 if (skb->len == HCI_SCO_HDR_SIZE) {
4402 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4403 scb->expect = h->dlen;
4404
4405 if (skb_tailroom(skb) < scb->expect) {
4406 kfree_skb(skb);
4407 hdev->reassembly[index] = NULL;
4408 return -ENOMEM;
4409 }
4410 }
4411 break;
4412 }
4413
4414 if (scb->expect == 0) {
4415 /* Complete frame */
4416
4417 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004418 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304419
4420 hdev->reassembly[index] = NULL;
4421 return remain;
4422 }
4423 }
4424
4425 return remain;
4426}
4427
Suraj Sumangala99811512010-07-14 13:02:19 +05304428#define STREAM_REASSEMBLY 0
4429
4430int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4431{
4432 int type;
4433 int rem = 0;
4434
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004435 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304436 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4437
4438 if (!skb) {
4439 struct { char type; } *pkt;
4440
4441 /* Start of the frame */
4442 pkt = data;
4443 type = pkt->type;
4444
4445 data++;
4446 count--;
4447 } else
4448 type = bt_cb(skb)->pkt_type;
4449
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004450 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004451 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304452 if (rem < 0)
4453 return rem;
4454
4455 data += (count - rem);
4456 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004457 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304458
4459 return rem;
4460}
4461EXPORT_SYMBOL(hci_recv_stream_fragment);
4462
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463/* ---- Interface to upper protocols ---- */
4464
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465int hci_register_cb(struct hci_cb *cb)
4466{
4467 BT_DBG("%p name %s", cb, cb->name);
4468
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004469 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004471 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
4473 return 0;
4474}
4475EXPORT_SYMBOL(hci_register_cb);
4476
4477int hci_unregister_cb(struct hci_cb *cb)
4478{
4479 BT_DBG("%p name %s", cb, cb->name);
4480
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004481 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004483 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484
4485 return 0;
4486}
4487EXPORT_SYMBOL(hci_unregister_cb);
4488
Marcel Holtmann51086992013-10-10 14:54:19 -07004489static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004491 int err;
4492
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004493 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004495 /* Time stamp */
4496 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004498 /* Send copy to monitor */
4499 hci_send_to_monitor(hdev, skb);
4500
4501 if (atomic_read(&hdev->promisc)) {
4502 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004503 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 }
4505
4506 /* Get rid of skb owner, prior to sending to the driver. */
4507 skb_orphan(skb);
4508
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004509 err = hdev->send(hdev, skb);
4510 if (err < 0) {
4511 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4512 kfree_skb(skb);
4513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514}
4515
Johan Hedberg3119ae92013-03-05 20:37:44 +02004516void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4517{
4518 skb_queue_head_init(&req->cmd_q);
4519 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004520 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004521}
4522
4523int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4524{
4525 struct hci_dev *hdev = req->hdev;
4526 struct sk_buff *skb;
4527 unsigned long flags;
4528
4529 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4530
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004531 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004532 * commands queued on the HCI request queue.
4533 */
4534 if (req->err) {
4535 skb_queue_purge(&req->cmd_q);
4536 return req->err;
4537 }
4538
Johan Hedberg3119ae92013-03-05 20:37:44 +02004539 /* Do not allow empty requests */
4540 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004541 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004542
4543 skb = skb_peek_tail(&req->cmd_q);
4544 bt_cb(skb)->req.complete = complete;
4545
4546 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4547 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4548 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4549
4550 queue_work(hdev->workqueue, &hdev->cmd_work);
4551
4552 return 0;
4553}
4554
Marcel Holtmann899de762014-07-11 05:51:58 +02004555bool hci_req_pending(struct hci_dev *hdev)
4556{
4557 return (hdev->req_status == HCI_REQ_PEND);
4558}
4559
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004560static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004561 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562{
4563 int len = HCI_COMMAND_HDR_SIZE + plen;
4564 struct hci_command_hdr *hdr;
4565 struct sk_buff *skb;
4566
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004568 if (!skb)
4569 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570
4571 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004572 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 hdr->plen = plen;
4574
4575 if (plen)
4576 memcpy(skb_put(skb, plen), param, plen);
4577
4578 BT_DBG("skb len %d", skb->len);
4579
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004580 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004581 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004582
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004583 return skb;
4584}
4585
4586/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004587int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4588 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004589{
4590 struct sk_buff *skb;
4591
4592 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4593
4594 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4595 if (!skb) {
4596 BT_ERR("%s no memory for command", hdev->name);
4597 return -ENOMEM;
4598 }
4599
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004600 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004601 * single-command requests.
4602 */
4603 bt_cb(skb)->req.start = true;
4604
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004606 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607
4608 return 0;
4609}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610
Johan Hedberg71c76a12013-03-05 20:37:46 +02004611/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004612void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4613 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004614{
4615 struct hci_dev *hdev = req->hdev;
4616 struct sk_buff *skb;
4617
4618 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4619
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004620 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004621 * queueing the HCI command. We can simply return.
4622 */
4623 if (req->err)
4624 return;
4625
Johan Hedberg71c76a12013-03-05 20:37:46 +02004626 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4627 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004628 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4629 hdev->name, opcode);
4630 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004631 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004632 }
4633
4634 if (skb_queue_empty(&req->cmd_q))
4635 bt_cb(skb)->req.start = true;
4636
Johan Hedberg02350a72013-04-03 21:50:29 +03004637 bt_cb(skb)->req.event = event;
4638
Johan Hedberg71c76a12013-03-05 20:37:46 +02004639 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004640}
4641
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004642void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4643 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004644{
4645 hci_req_add_ev(req, opcode, plen, param, 0);
4646}
4647
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004649void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650{
4651 struct hci_command_hdr *hdr;
4652
4653 if (!hdev->sent_cmd)
4654 return NULL;
4655
4656 hdr = (void *) hdev->sent_cmd->data;
4657
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004658 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 return NULL;
4660
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004661 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
4663 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4664}
4665
4666/* Send ACL data */
4667static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4668{
4669 struct hci_acl_hdr *hdr;
4670 int len = skb->len;
4671
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004672 skb_push(skb, HCI_ACL_HDR_SIZE);
4673 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004674 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004675 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4676 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677}
4678
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004679static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004680 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004682 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 struct hci_dev *hdev = conn->hdev;
4684 struct sk_buff *list;
4685
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004686 skb->len = skb_headlen(skb);
4687 skb->data_len = 0;
4688
4689 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004690
4691 switch (hdev->dev_type) {
4692 case HCI_BREDR:
4693 hci_add_acl_hdr(skb, conn->handle, flags);
4694 break;
4695 case HCI_AMP:
4696 hci_add_acl_hdr(skb, chan->handle, flags);
4697 break;
4698 default:
4699 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4700 return;
4701 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004702
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004703 list = skb_shinfo(skb)->frag_list;
4704 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 /* Non fragmented */
4706 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4707
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004708 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 } else {
4710 /* Fragmented */
4711 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4712
4713 skb_shinfo(skb)->frag_list = NULL;
4714
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004715 /* Queue all fragments atomically. We need to use spin_lock_bh
4716 * here because of 6LoWPAN links, as there this function is
4717 * called from softirq and using normal spin lock could cause
4718 * deadlocks.
4719 */
4720 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004722 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004723
4724 flags &= ~ACL_START;
4725 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 do {
4727 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004728
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004729 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004730 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731
4732 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4733
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004734 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 } while (list);
4736
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004737 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004739}
4740
4741void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4742{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004743 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004744
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004745 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004746
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004747 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004749 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751
4752/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004753void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754{
4755 struct hci_dev *hdev = conn->hdev;
4756 struct hci_sco_hdr hdr;
4757
4758 BT_DBG("%s len %d", hdev->name, skb->len);
4759
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004760 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 hdr.dlen = skb->len;
4762
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004763 skb_push(skb, HCI_SCO_HDR_SIZE);
4764 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004765 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004767 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004768
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004770 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772
4773/* ---- HCI TX task (outgoing data) ---- */
4774
4775/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004776static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4777 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778{
4779 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004780 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004781 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004783 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004785
4786 rcu_read_lock();
4787
4788 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004789 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004791
4792 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4793 continue;
4794
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 num++;
4796
4797 if (c->sent < min) {
4798 min = c->sent;
4799 conn = c;
4800 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004801
4802 if (hci_conn_num(hdev, type) == num)
4803 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 }
4805
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004806 rcu_read_unlock();
4807
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004809 int cnt, q;
4810
4811 switch (conn->type) {
4812 case ACL_LINK:
4813 cnt = hdev->acl_cnt;
4814 break;
4815 case SCO_LINK:
4816 case ESCO_LINK:
4817 cnt = hdev->sco_cnt;
4818 break;
4819 case LE_LINK:
4820 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4821 break;
4822 default:
4823 cnt = 0;
4824 BT_ERR("Unknown link type");
4825 }
4826
4827 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828 *quote = q ? q : 1;
4829 } else
4830 *quote = 0;
4831
4832 BT_DBG("conn %p quote %d", conn, *quote);
4833 return conn;
4834}
4835
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004836static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837{
4838 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004839 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840
Ville Tervobae1f5d92011-02-10 22:38:53 -03004841 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004843 rcu_read_lock();
4844
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004846 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004847 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004848 BT_ERR("%s killing stalled connection %pMR",
4849 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004850 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851 }
4852 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004853
4854 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855}
4856
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004857static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4858 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004859{
4860 struct hci_conn_hash *h = &hdev->conn_hash;
4861 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004862 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004863 struct hci_conn *conn;
4864 int cnt, q, conn_num = 0;
4865
4866 BT_DBG("%s", hdev->name);
4867
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004868 rcu_read_lock();
4869
4870 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004871 struct hci_chan *tmp;
4872
4873 if (conn->type != type)
4874 continue;
4875
4876 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4877 continue;
4878
4879 conn_num++;
4880
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004881 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004882 struct sk_buff *skb;
4883
4884 if (skb_queue_empty(&tmp->data_q))
4885 continue;
4886
4887 skb = skb_peek(&tmp->data_q);
4888 if (skb->priority < cur_prio)
4889 continue;
4890
4891 if (skb->priority > cur_prio) {
4892 num = 0;
4893 min = ~0;
4894 cur_prio = skb->priority;
4895 }
4896
4897 num++;
4898
4899 if (conn->sent < min) {
4900 min = conn->sent;
4901 chan = tmp;
4902 }
4903 }
4904
4905 if (hci_conn_num(hdev, type) == conn_num)
4906 break;
4907 }
4908
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004909 rcu_read_unlock();
4910
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004911 if (!chan)
4912 return NULL;
4913
4914 switch (chan->conn->type) {
4915 case ACL_LINK:
4916 cnt = hdev->acl_cnt;
4917 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004918 case AMP_LINK:
4919 cnt = hdev->block_cnt;
4920 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004921 case SCO_LINK:
4922 case ESCO_LINK:
4923 cnt = hdev->sco_cnt;
4924 break;
4925 case LE_LINK:
4926 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4927 break;
4928 default:
4929 cnt = 0;
4930 BT_ERR("Unknown link type");
4931 }
4932
4933 q = cnt / num;
4934 *quote = q ? q : 1;
4935 BT_DBG("chan %p quote %d", chan, *quote);
4936 return chan;
4937}
4938
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004939static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4940{
4941 struct hci_conn_hash *h = &hdev->conn_hash;
4942 struct hci_conn *conn;
4943 int num = 0;
4944
4945 BT_DBG("%s", hdev->name);
4946
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004947 rcu_read_lock();
4948
4949 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004950 struct hci_chan *chan;
4951
4952 if (conn->type != type)
4953 continue;
4954
4955 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4956 continue;
4957
4958 num++;
4959
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004960 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004961 struct sk_buff *skb;
4962
4963 if (chan->sent) {
4964 chan->sent = 0;
4965 continue;
4966 }
4967
4968 if (skb_queue_empty(&chan->data_q))
4969 continue;
4970
4971 skb = skb_peek(&chan->data_q);
4972 if (skb->priority >= HCI_PRIO_MAX - 1)
4973 continue;
4974
4975 skb->priority = HCI_PRIO_MAX - 1;
4976
4977 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004978 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004979 }
4980
4981 if (hci_conn_num(hdev, type) == num)
4982 break;
4983 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004984
4985 rcu_read_unlock();
4986
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004987}
4988
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004989static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4990{
4991 /* Calculate count of blocks used by this packet */
4992 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4993}
4994
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004995static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004997 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 /* ACL tx timeout must be longer than maximum
4999 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005000 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005001 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005002 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005004}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005006static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005007{
5008 unsigned int cnt = hdev->acl_cnt;
5009 struct hci_chan *chan;
5010 struct sk_buff *skb;
5011 int quote;
5012
5013 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005014
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005015 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005016 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005017 u32 priority = (skb_peek(&chan->data_q))->priority;
5018 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005019 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005020 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005021
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005022 /* Stop if priority has changed */
5023 if (skb->priority < priority)
5024 break;
5025
5026 skb = skb_dequeue(&chan->data_q);
5027
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005028 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005029 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005030
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005031 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 hdev->acl_last_tx = jiffies;
5033
5034 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005035 chan->sent++;
5036 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037 }
5038 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005039
5040 if (cnt != hdev->acl_cnt)
5041 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042}
5043
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005044static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005045{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005046 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005047 struct hci_chan *chan;
5048 struct sk_buff *skb;
5049 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005050 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005051
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005052 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005053
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005054 BT_DBG("%s", hdev->name);
5055
5056 if (hdev->dev_type == HCI_AMP)
5057 type = AMP_LINK;
5058 else
5059 type = ACL_LINK;
5060
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005061 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005062 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005063 u32 priority = (skb_peek(&chan->data_q))->priority;
5064 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5065 int blocks;
5066
5067 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005068 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005069
5070 /* Stop if priority has changed */
5071 if (skb->priority < priority)
5072 break;
5073
5074 skb = skb_dequeue(&chan->data_q);
5075
5076 blocks = __get_blocks(hdev, skb);
5077 if (blocks > hdev->block_cnt)
5078 return;
5079
5080 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005081 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005082
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005083 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005084 hdev->acl_last_tx = jiffies;
5085
5086 hdev->block_cnt -= blocks;
5087 quote -= blocks;
5088
5089 chan->sent += blocks;
5090 chan->conn->sent += blocks;
5091 }
5092 }
5093
5094 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005095 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005096}
5097
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005098static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005099{
5100 BT_DBG("%s", hdev->name);
5101
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005102 /* No ACL link over BR/EDR controller */
5103 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5104 return;
5105
5106 /* No AMP link over AMP controller */
5107 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005108 return;
5109
5110 switch (hdev->flow_ctl_mode) {
5111 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5112 hci_sched_acl_pkt(hdev);
5113 break;
5114
5115 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5116 hci_sched_acl_blk(hdev);
5117 break;
5118 }
5119}
5120
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005122static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123{
5124 struct hci_conn *conn;
5125 struct sk_buff *skb;
5126 int quote;
5127
5128 BT_DBG("%s", hdev->name);
5129
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005130 if (!hci_conn_num(hdev, SCO_LINK))
5131 return;
5132
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5134 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5135 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005136 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137
5138 conn->sent++;
5139 if (conn->sent == ~0)
5140 conn->sent = 0;
5141 }
5142 }
5143}
5144
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005145static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005146{
5147 struct hci_conn *conn;
5148 struct sk_buff *skb;
5149 int quote;
5150
5151 BT_DBG("%s", hdev->name);
5152
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005153 if (!hci_conn_num(hdev, ESCO_LINK))
5154 return;
5155
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005156 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5157 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005158 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5159 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005160 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005161
5162 conn->sent++;
5163 if (conn->sent == ~0)
5164 conn->sent = 0;
5165 }
5166 }
5167}
5168
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005169static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005170{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005171 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005172 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005173 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005174
5175 BT_DBG("%s", hdev->name);
5176
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005177 if (!hci_conn_num(hdev, LE_LINK))
5178 return;
5179
Marcel Holtmann4a964402014-07-02 19:10:33 +02005180 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005181 /* LE tx timeout must be longer than maximum
5182 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005183 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005184 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005185 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005186 }
5187
5188 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005189 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005190 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005191 u32 priority = (skb_peek(&chan->data_q))->priority;
5192 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005194 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005195
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005196 /* Stop if priority has changed */
5197 if (skb->priority < priority)
5198 break;
5199
5200 skb = skb_dequeue(&chan->data_q);
5201
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005202 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005203 hdev->le_last_tx = jiffies;
5204
5205 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005206 chan->sent++;
5207 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005208 }
5209 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005210
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005211 if (hdev->le_pkts)
5212 hdev->le_cnt = cnt;
5213 else
5214 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005215
5216 if (cnt != tmp)
5217 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005218}
5219
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005220static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005222 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 struct sk_buff *skb;
5224
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005225 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005226 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227
Marcel Holtmann52de5992013-09-03 18:08:38 -07005228 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5229 /* Schedule queues and send stuff to HCI driver */
5230 hci_sched_acl(hdev);
5231 hci_sched_sco(hdev);
5232 hci_sched_esco(hdev);
5233 hci_sched_le(hdev);
5234 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005235
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 /* Send next queued raw (unknown type) packet */
5237 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005238 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239}
5240
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005241/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242
5243/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005244static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245{
5246 struct hci_acl_hdr *hdr = (void *) skb->data;
5247 struct hci_conn *conn;
5248 __u16 handle, flags;
5249
5250 skb_pull(skb, HCI_ACL_HDR_SIZE);
5251
5252 handle = __le16_to_cpu(hdr->handle);
5253 flags = hci_flags(handle);
5254 handle = hci_handle(handle);
5255
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005256 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005257 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258
5259 hdev->stat.acl_rx++;
5260
5261 hci_dev_lock(hdev);
5262 conn = hci_conn_hash_lookup_handle(hdev, handle);
5263 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005264
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005266 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005267
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005269 l2cap_recv_acldata(conn, skb, flags);
5270 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005272 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005273 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 }
5275
5276 kfree_skb(skb);
5277}
5278
5279/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005280static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281{
5282 struct hci_sco_hdr *hdr = (void *) skb->data;
5283 struct hci_conn *conn;
5284 __u16 handle;
5285
5286 skb_pull(skb, HCI_SCO_HDR_SIZE);
5287
5288 handle = __le16_to_cpu(hdr->handle);
5289
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005290 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291
5292 hdev->stat.sco_rx++;
5293
5294 hci_dev_lock(hdev);
5295 conn = hci_conn_hash_lookup_handle(hdev, handle);
5296 hci_dev_unlock(hdev);
5297
5298 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005300 sco_recv_scodata(conn, skb);
5301 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005303 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005304 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 }
5306
5307 kfree_skb(skb);
5308}
5309
Johan Hedberg9238f362013-03-05 20:37:48 +02005310static bool hci_req_is_complete(struct hci_dev *hdev)
5311{
5312 struct sk_buff *skb;
5313
5314 skb = skb_peek(&hdev->cmd_q);
5315 if (!skb)
5316 return true;
5317
5318 return bt_cb(skb)->req.start;
5319}
5320
Johan Hedberg42c6b122013-03-05 20:37:49 +02005321static void hci_resend_last(struct hci_dev *hdev)
5322{
5323 struct hci_command_hdr *sent;
5324 struct sk_buff *skb;
5325 u16 opcode;
5326
5327 if (!hdev->sent_cmd)
5328 return;
5329
5330 sent = (void *) hdev->sent_cmd->data;
5331 opcode = __le16_to_cpu(sent->opcode);
5332 if (opcode == HCI_OP_RESET)
5333 return;
5334
5335 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5336 if (!skb)
5337 return;
5338
5339 skb_queue_head(&hdev->cmd_q, skb);
5340 queue_work(hdev->workqueue, &hdev->cmd_work);
5341}
5342
Johan Hedberg9238f362013-03-05 20:37:48 +02005343void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5344{
5345 hci_req_complete_t req_complete = NULL;
5346 struct sk_buff *skb;
5347 unsigned long flags;
5348
5349 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5350
Johan Hedberg42c6b122013-03-05 20:37:49 +02005351 /* If the completed command doesn't match the last one that was
5352 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005353 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005354 if (!hci_sent_cmd_data(hdev, opcode)) {
5355 /* Some CSR based controllers generate a spontaneous
5356 * reset complete event during init and any pending
5357 * command will never be completed. In such a case we
5358 * need to resend whatever was the last sent
5359 * command.
5360 */
5361 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5362 hci_resend_last(hdev);
5363
Johan Hedberg9238f362013-03-05 20:37:48 +02005364 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005365 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005366
5367 /* If the command succeeded and there's still more commands in
5368 * this request the request is not yet complete.
5369 */
5370 if (!status && !hci_req_is_complete(hdev))
5371 return;
5372
5373 /* If this was the last command in a request the complete
5374 * callback would be found in hdev->sent_cmd instead of the
5375 * command queue (hdev->cmd_q).
5376 */
5377 if (hdev->sent_cmd) {
5378 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005379
5380 if (req_complete) {
5381 /* We must set the complete callback to NULL to
5382 * avoid calling the callback more than once if
5383 * this function gets called again.
5384 */
5385 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5386
Johan Hedberg9238f362013-03-05 20:37:48 +02005387 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005388 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005389 }
5390
5391 /* Remove all pending commands belonging to this request */
5392 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5393 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5394 if (bt_cb(skb)->req.start) {
5395 __skb_queue_head(&hdev->cmd_q, skb);
5396 break;
5397 }
5398
5399 req_complete = bt_cb(skb)->req.complete;
5400 kfree_skb(skb);
5401 }
5402 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5403
5404call_complete:
5405 if (req_complete)
5406 req_complete(hdev, status);
5407}
5408
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005409static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005411 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412 struct sk_buff *skb;
5413
5414 BT_DBG("%s", hdev->name);
5415
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005417 /* Send copy to monitor */
5418 hci_send_to_monitor(hdev, skb);
5419
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 if (atomic_read(&hdev->promisc)) {
5421 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005422 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 }
5424
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005425 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426 kfree_skb(skb);
5427 continue;
5428 }
5429
5430 if (test_bit(HCI_INIT, &hdev->flags)) {
5431 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005432 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 case HCI_ACLDATA_PKT:
5434 case HCI_SCODATA_PKT:
5435 kfree_skb(skb);
5436 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 }
5439
5440 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005441 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005443 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 hci_event_packet(hdev, skb);
5445 break;
5446
5447 case HCI_ACLDATA_PKT:
5448 BT_DBG("%s ACL data packet", hdev->name);
5449 hci_acldata_packet(hdev, skb);
5450 break;
5451
5452 case HCI_SCODATA_PKT:
5453 BT_DBG("%s SCO data packet", hdev->name);
5454 hci_scodata_packet(hdev, skb);
5455 break;
5456
5457 default:
5458 kfree_skb(skb);
5459 break;
5460 }
5461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462}
5463
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005464static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005466 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467 struct sk_buff *skb;
5468
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005469 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5470 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005473 if (atomic_read(&hdev->cmd_cnt)) {
5474 skb = skb_dequeue(&hdev->cmd_q);
5475 if (!skb)
5476 return;
5477
Wei Yongjun7585b972009-02-25 18:29:52 +08005478 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005480 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005481 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005483 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005484 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005485 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005486 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005487 schedule_delayed_work(&hdev->cmd_timer,
5488 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 } else {
5490 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005491 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492 }
5493 }
5494}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005495
5496void hci_req_add_le_scan_disable(struct hci_request *req)
5497{
5498 struct hci_cp_le_set_scan_enable cp;
5499
5500 memset(&cp, 0, sizeof(cp));
5501 cp.enable = LE_SCAN_DISABLE;
5502 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5503}
Andre Guedesa4790db2014-02-26 20:21:47 -03005504
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005505static void add_to_white_list(struct hci_request *req,
5506 struct hci_conn_params *params)
5507{
5508 struct hci_cp_le_add_to_white_list cp;
5509
5510 cp.bdaddr_type = params->addr_type;
5511 bacpy(&cp.bdaddr, &params->addr);
5512
5513 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5514}
5515
5516static u8 update_white_list(struct hci_request *req)
5517{
5518 struct hci_dev *hdev = req->hdev;
5519 struct hci_conn_params *params;
5520 struct bdaddr_list *b;
5521 uint8_t white_list_entries = 0;
5522
5523 /* Go through the current white list programmed into the
5524 * controller one by one and check if that address is still
5525 * in the list of pending connections or list of devices to
5526 * report. If not present in either list, then queue the
5527 * command to remove it from the controller.
5528 */
5529 list_for_each_entry(b, &hdev->le_white_list, list) {
5530 struct hci_cp_le_del_from_white_list cp;
5531
5532 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5533 &b->bdaddr, b->bdaddr_type) ||
5534 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5535 &b->bdaddr, b->bdaddr_type)) {
5536 white_list_entries++;
5537 continue;
5538 }
5539
5540 cp.bdaddr_type = b->bdaddr_type;
5541 bacpy(&cp.bdaddr, &b->bdaddr);
5542
5543 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5544 sizeof(cp), &cp);
5545 }
5546
5547 /* Since all no longer valid white list entries have been
5548 * removed, walk through the list of pending connections
5549 * and ensure that any new device gets programmed into
5550 * the controller.
5551 *
5552 * If the list of the devices is larger than the list of
5553 * available white list entries in the controller, then
5554 * just abort and return filer policy value to not use the
5555 * white list.
5556 */
5557 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5558 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5559 &params->addr, params->addr_type))
5560 continue;
5561
5562 if (white_list_entries >= hdev->le_white_list_size) {
5563 /* Select filter policy to accept all advertising */
5564 return 0x00;
5565 }
5566
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005567 if (hci_find_irk_by_addr(hdev, &params->addr,
5568 params->addr_type)) {
5569 /* White list can not be used with RPAs */
5570 return 0x00;
5571 }
5572
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005573 white_list_entries++;
5574 add_to_white_list(req, params);
5575 }
5576
5577 /* After adding all new pending connections, walk through
5578 * the list of pending reports and also add these to the
5579 * white list if there is still space.
5580 */
5581 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5582 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5583 &params->addr, params->addr_type))
5584 continue;
5585
5586 if (white_list_entries >= hdev->le_white_list_size) {
5587 /* Select filter policy to accept all advertising */
5588 return 0x00;
5589 }
5590
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005591 if (hci_find_irk_by_addr(hdev, &params->addr,
5592 params->addr_type)) {
5593 /* White list can not be used with RPAs */
5594 return 0x00;
5595 }
5596
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005597 white_list_entries++;
5598 add_to_white_list(req, params);
5599 }
5600
5601 /* Select filter policy to use white list */
5602 return 0x01;
5603}
5604
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005605void hci_req_add_le_passive_scan(struct hci_request *req)
5606{
5607 struct hci_cp_le_set_scan_param param_cp;
5608 struct hci_cp_le_set_scan_enable enable_cp;
5609 struct hci_dev *hdev = req->hdev;
5610 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005611 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005612
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005613 /* Set require_privacy to false since no SCAN_REQ are send
5614 * during passive scanning. Not using an unresolvable address
5615 * here is important so that peer devices using direct
5616 * advertising with our address will be correctly reported
5617 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005618 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005619 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005620 return;
5621
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005622 /* Adding or removing entries from the white list must
5623 * happen before enabling scanning. The controller does
5624 * not allow white list modification while scanning.
5625 */
5626 filter_policy = update_white_list(req);
5627
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005628 memset(&param_cp, 0, sizeof(param_cp));
5629 param_cp.type = LE_SCAN_PASSIVE;
5630 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5631 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5632 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005633 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005634 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5635 &param_cp);
5636
5637 memset(&enable_cp, 0, sizeof(enable_cp));
5638 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005639 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005640 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5641 &enable_cp);
5642}
5643
Andre Guedesa4790db2014-02-26 20:21:47 -03005644static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5645{
5646 if (status)
5647 BT_DBG("HCI request failed to update background scanning: "
5648 "status 0x%2.2x", status);
5649}
5650
5651/* This function controls the background scanning based on hdev->pend_le_conns
5652 * list. If there are pending LE connection we start the background scanning,
5653 * otherwise we stop it.
5654 *
5655 * This function requires the caller holds hdev->lock.
5656 */
5657void hci_update_background_scan(struct hci_dev *hdev)
5658{
Andre Guedesa4790db2014-02-26 20:21:47 -03005659 struct hci_request req;
5660 struct hci_conn *conn;
5661 int err;
5662
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005663 if (!test_bit(HCI_UP, &hdev->flags) ||
5664 test_bit(HCI_INIT, &hdev->flags) ||
5665 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005666 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005667 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005668 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005669 return;
5670
Johan Hedberga70f4b52014-07-07 15:19:50 +03005671 /* No point in doing scanning if LE support hasn't been enabled */
5672 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5673 return;
5674
Johan Hedbergae23ada2014-07-07 13:24:59 +03005675 /* If discovery is active don't interfere with it */
5676 if (hdev->discovery.state != DISCOVERY_STOPPED)
5677 return;
5678
Marcel Holtmannee3c3ca2014-12-05 11:45:23 +01005679 /* Reset RSSI and UUID filters when starting background scanning
5680 * since these filters are meant for service discovery only.
5681 *
5682 * The Start Discovery and Start Service Discovery operations
5683 * ensure to set proper values for RSSI threshold and UUID
5684 * filter list. So it is safe to just reset them here.
5685 */
5686 hci_discovery_filter_clear(hdev);
5687
Andre Guedesa4790db2014-02-26 20:21:47 -03005688 hci_req_init(&req, hdev);
5689
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005690 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005691 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005692 /* If there is no pending LE connections or devices
5693 * to be scanned for, we should stop the background
5694 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005695 */
5696
5697 /* If controller is not scanning we are done. */
5698 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5699 return;
5700
5701 hci_req_add_le_scan_disable(&req);
5702
5703 BT_DBG("%s stopping background scanning", hdev->name);
5704 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005705 /* If there is at least one pending LE connection, we should
5706 * keep the background scan running.
5707 */
5708
Andre Guedesa4790db2014-02-26 20:21:47 -03005709 /* If controller is connecting, we should not start scanning
5710 * since some controllers are not able to scan and connect at
5711 * the same time.
5712 */
5713 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5714 if (conn)
5715 return;
5716
Andre Guedes4340a122014-03-10 18:26:24 -03005717 /* If controller is currently scanning, we stop it to ensure we
5718 * don't miss any advertising (due to duplicates filter).
5719 */
5720 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5721 hci_req_add_le_scan_disable(&req);
5722
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005723 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005724
5725 BT_DBG("%s starting background scanning", hdev->name);
5726 }
5727
5728 err = hci_req_run(&req, update_background_scan_complete);
5729 if (err)
5730 BT_ERR("Failed to run HCI request: err %d", err);
5731}
Johan Hedberg432df052014-08-01 11:13:31 +03005732
Johan Hedberg22f433d2014-08-01 11:13:32 +03005733static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5734{
5735 struct bdaddr_list *b;
5736
5737 list_for_each_entry(b, &hdev->whitelist, list) {
5738 struct hci_conn *conn;
5739
5740 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5741 if (!conn)
5742 return true;
5743
5744 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5745 return true;
5746 }
5747
5748 return false;
5749}
5750
Johan Hedberg432df052014-08-01 11:13:31 +03005751void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5752{
5753 u8 scan;
5754
5755 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5756 return;
5757
5758 if (!hdev_is_powered(hdev))
5759 return;
5760
5761 if (mgmt_powering_down(hdev))
5762 return;
5763
5764 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005765 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005766 scan = SCAN_PAGE;
5767 else
5768 scan = SCAN_DISABLED;
5769
5770 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5771 return;
5772
5773 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5774 scan |= SCAN_INQUIRY;
5775
5776 if (req)
5777 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5778 else
5779 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5780}