blob: 8b3f839ba82662bd424c024916e116fd4c8f58e2 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Johan Hedberg858cdc72014-10-16 10:45:31 +0200409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700478 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700537 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700565 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
Marcel Holtmannac345812014-02-23 12:44:25 -0800641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200644 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800650
Johan Hedberga1f4c312014-02-27 14:05:41 +0200651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800652 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700720{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700747 return -EINVAL;
748
Marcel Holtmann111902f2014-06-21 04:53:17 +0200749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800750 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700751
Marcel Holtmann111902f2014-06-21 04:53:17 +0200752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800753
754 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700755}
756
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
Marcel Holtmann92202182013-10-18 16:38:10 -0700763
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
Marcel Holtmann3698d702014-02-18 21:54:49 -0800789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200792 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800793
Johan Hedbergadae20c2014-11-13 14:37:48 +0200794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200800 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200821 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700822
Johan Hedberg970d0f12014-11-13 14:37:47 +0200823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200829 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700854 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700882 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
Marcel Holtmannf1649572014-06-30 12:34:38 +0200930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
Georg Lukas729a1052014-07-26 13:59:58 +0200986static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200987{
Georg Lukas729a1052014-07-26 13:59:58 +0200988 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200989
Georg Lukas729a1052014-07-26 13:59:58 +0200990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200991 return -EINVAL;
992
Andre Guedes7d474e02014-02-26 20:21:54 -0300993 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
Georg Lukas729a1052014-07-26 13:59:58 +02001000static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001001{
Georg Lukas729a1052014-07-26 13:59:58 +02001002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009}
1010
Georg Lukas729a1052014-07-26 13:59:58 +02001011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001015{
Georg Lukas729a1052014-07-26 13:59:58 +02001016 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017
Georg Lukas729a1052014-07-26 13:59:58 +02001018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return -EINVAL;
1020
Georg Lukas729a1052014-07-26 13:59:58 +02001021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001024
Georg Lukas729a1052014-07-26 13:59:58 +02001025 return 0;
1026}
Andre Guedes7d474e02014-02-26 20:21:54 -03001027
Georg Lukas729a1052014-07-26 13:59:58 +02001028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001031
Georg Lukas729a1052014-07-26 13:59:58 +02001032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035
Georg Lukas729a1052014-07-26 13:59:58 +02001036 return 0;
1037}
Andre Guedes7d474e02014-02-26 20:21:54 -03001038
Georg Lukas729a1052014-07-26 13:59:58 +02001039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001041
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001042static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001043{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001045 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001046 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001047
Andre Guedes7d474e02014-02-26 20:21:54 -03001048 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001053 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001056
Andre Guedes7d474e02014-02-26 20:21:54 -03001057 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001058}
1059
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001060static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001061{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001062 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001063}
1064
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001067 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* ---- HCI requests ---- */
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
Fengguang Wu77a63e02013-04-20 16:24:31 +03001096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
Johan Hedberg75e84b72013-04-02 13:35:04 +03001127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001152 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163
1164 hdev->req_status = HCI_REQ_PEND;
1165
Johan Hedberg75e84b72013-04-02 13:35:04 +03001166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
Chan-yeol Park039fada2014-10-31 14:23:06 +09001169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001172 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001173 return ERR_PTR(err);
1174 }
1175
Johan Hedberg75e84b72013-04-02 13:35:04 +03001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001209 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_init(&req, hdev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hdev->req_status = HCI_REQ_PEND;
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001232
Chan-yeol Park039fada2014-10-31 14:23:06 +09001233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
Chan-yeol Park039fada2014-10-31 14:23:06 +09001240 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001241 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001242
Andre Guedes920c8302013-03-08 11:20:15 -03001243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 */
Andre Guedes920c8302013-03-08 11:20:15 -03001248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001252 }
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
1390 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402
1403 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001404 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001406
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001414}
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001418 struct hci_dev *hdev = req->hdev;
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
1423 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001434
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438}
1439
1440static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441{
1442 if (lmp_ext_inq_capable(hdev))
1443 return 0x02;
1444
1445 if (lmp_inq_rssi_capable(hdev))
1446 return 0x01;
1447
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1450 return 0x01;
1451
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454 return 0x01;
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456 return 0x01;
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458 return 0x01;
1459 }
1460
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1463 return 0x01;
1464
1465 return 0x00;
1466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
1470 u8 mode;
1471
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475}
1476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 struct hci_dev *hdev = req->hdev;
1480
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1484 */
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1489 */
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001499 } else {
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001509
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552}
1553
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 struct hci_dev *hdev = req->hdev;
1557
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001560 else
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1568 */
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571
1572 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1578 */
1579 hdev->max_page = 0x01;
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 } else {
1586 struct hci_cp_write_eir cp;
1587
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1590
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001592 }
1593 }
1594
1595 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597
1598 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001600
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1603
1604 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607 }
1608
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613 }
1614}
1615
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001617{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1621
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1630
1631 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633}
1634
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001636{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001638 struct hci_cp_write_le_host_supported cp;
1639
Johan Hedbergc73eee92013-04-19 18:35:21 +03001640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1642 return;
1643
Johan Hedberg2177bab2013-03-05 20:37:43 +02001644 memset(&cp, 0, sizeof(cp));
1645
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001648 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001649 }
1650
1651 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001654}
1655
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656static void hci_set_event_mask_page_2(struct hci_request *req)
1657{
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1663 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001664 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1669 }
1670
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1673 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001674 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1679 }
1680
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001681 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001683 events[2] |= 0x80;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001689{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001690 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001691 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001692
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001693 hci_setup_event_mask(req);
1694
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1698 *
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001703 *
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001707 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001710 struct hci_cp_delete_stored_link_key cp;
1711
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715 sizeof(cp), &cp);
1716 }
1717
Johan Hedberg2177bab2013-03-05 20:37:43 +02001718 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001720
Andre Guedes9193c6e2014-07-01 18:10:09 -03001721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
Andre Guedes9193c6e2014-07-01 18:10:09 -03001738 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1739 events);
1740
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001741 if (hdev->commands[25] & 0x40) {
1742 /* Read LE Advertising Channel TX Power */
1743 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1744 }
1745
Johan Hedberg42c6b122013-03-05 20:37:49 +02001746 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001747 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001748
1749 /* Read features beyond page 1 if available */
1750 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1751 struct hci_cp_read_local_ext_features cp;
1752
1753 cp.page = p;
1754 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1755 sizeof(cp), &cp);
1756 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001757}
1758
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001759static void hci_init4_req(struct hci_request *req, unsigned long opt)
1760{
1761 struct hci_dev *hdev = req->hdev;
1762
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001763 /* Set event mask page 2 if the HCI command for it is supported */
1764 if (hdev->commands[22] & 0x04)
1765 hci_set_event_mask_page_2(req);
1766
Marcel Holtmann109e3192014-07-23 19:24:56 +02001767 /* Read local codec list if the HCI command is supported */
1768 if (hdev->commands[29] & 0x20)
1769 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1770
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001771 /* Get MWS transport configuration if the HCI command is supported */
1772 if (hdev->commands[30] & 0x08)
1773 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1774
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001775 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001776 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001777 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001778
1779 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001780 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001781 u8 support = 0x01;
1782 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1783 sizeof(support), &support);
1784 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001785}
1786
Johan Hedberg2177bab2013-03-05 20:37:43 +02001787static int __hci_init(struct hci_dev *hdev)
1788{
1789 int err;
1790
1791 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1792 if (err < 0)
1793 return err;
1794
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001795 /* The Device Under Test (DUT) mode is special and available for
1796 * all controller types. So just create it early on.
1797 */
1798 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1799 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1800 &dut_mode_fops);
1801 }
1802
Johan Hedberg2177bab2013-03-05 20:37:43 +02001803 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1804 * BR/EDR/LE type controllers. AMP controllers only need the
1805 * first stage init.
1806 */
1807 if (hdev->dev_type != HCI_BREDR)
1808 return 0;
1809
1810 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1811 if (err < 0)
1812 return err;
1813
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001814 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001818 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1819 if (err < 0)
1820 return err;
1821
1822 /* Only create debugfs entries during the initial setup
1823 * phase and not every time the controller gets powered on.
1824 */
1825 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1826 return 0;
1827
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001828 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1829 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001830 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1831 &hdev->manufacturer);
1832 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1833 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001834 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1835 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001836 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1837 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001838 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1839
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001840 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1841 &conn_info_min_age_fops);
1842 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1843 &conn_info_max_age_fops);
1844
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001845 if (lmp_bredr_capable(hdev)) {
1846 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1847 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001848 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1849 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001850 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1851 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001852 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1853 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001854 }
1855
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001856 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001857 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1858 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001859 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1860 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001861 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1862 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001863 if (lmp_le_capable(hdev))
1864 debugfs_create_file("force_lesc_support", 0644,
1865 hdev->debugfs, hdev,
1866 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001867 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001868
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001869 if (lmp_sniff_capable(hdev)) {
1870 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1871 hdev, &idle_timeout_fops);
1872 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1873 hdev, &sniff_min_interval_fops);
1874 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1875 hdev, &sniff_max_interval_fops);
1876 }
1877
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001878 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001879 debugfs_create_file("identity", 0400, hdev->debugfs,
1880 hdev, &identity_fops);
1881 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1882 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001883 debugfs_create_file("random_address", 0444, hdev->debugfs,
1884 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001885 debugfs_create_file("static_address", 0444, hdev->debugfs,
1886 hdev, &static_address_fops);
1887
1888 /* For controllers with a public address, provide a debug
1889 * option to force the usage of the configured static
1890 * address. By default the public address is used.
1891 */
1892 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1893 debugfs_create_file("force_static_address", 0644,
1894 hdev->debugfs, hdev,
1895 &force_static_address_fops);
1896
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001897 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1898 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001899 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1900 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001901 debugfs_create_file("identity_resolving_keys", 0400,
1902 hdev->debugfs, hdev,
1903 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001904 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1905 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001906 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1907 hdev, &conn_min_interval_fops);
1908 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1909 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001910 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1911 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001912 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1913 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001914 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1915 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001916 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1917 hdev, &adv_min_interval_fops);
1918 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1919 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001920 debugfs_create_u16("discov_interleaved_timeout", 0644,
1921 hdev->debugfs,
1922 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001923
Johan Hedberg711eafe2014-08-08 09:32:52 +03001924 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001925 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001926
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001927 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001928}
1929
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001930static void hci_init0_req(struct hci_request *req, unsigned long opt)
1931{
1932 struct hci_dev *hdev = req->hdev;
1933
1934 BT_DBG("%s %ld", hdev->name, opt);
1935
1936 /* Reset */
1937 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1938 hci_reset_req(req, 0);
1939
1940 /* Read Local Version */
1941 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1942
1943 /* Read BD Address */
1944 if (hdev->set_bdaddr)
1945 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1946}
1947
1948static int __hci_unconf_init(struct hci_dev *hdev)
1949{
1950 int err;
1951
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001952 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1953 return 0;
1954
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001955 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1956 if (err < 0)
1957 return err;
1958
1959 return 0;
1960}
1961
Johan Hedberg42c6b122013-03-05 20:37:49 +02001962static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
1964 __u8 scan = opt;
1965
Johan Hedberg42c6b122013-03-05 20:37:49 +02001966 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001969 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970}
1971
Johan Hedberg42c6b122013-03-05 20:37:49 +02001972static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
1974 __u8 auth = opt;
1975
Johan Hedberg42c6b122013-03-05 20:37:49 +02001976 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001979 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980}
1981
Johan Hedberg42c6b122013-03-05 20:37:49 +02001982static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
1984 __u8 encrypt = opt;
1985
Johan Hedberg42c6b122013-03-05 20:37:49 +02001986 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001988 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001989 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
1991
Johan Hedberg42c6b122013-03-05 20:37:49 +02001992static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001993{
1994 __le16 policy = cpu_to_le16(opt);
1995
Johan Hedberg42c6b122013-03-05 20:37:49 +02001996 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001997
1998 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001999 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002000}
2001
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002002/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 * Device is held on return. */
2004struct hci_dev *hci_dev_get(int index)
2005{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002006 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
2008 BT_DBG("%d", index);
2009
2010 if (index < 0)
2011 return NULL;
2012
2013 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002014 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (d->id == index) {
2016 hdev = hci_dev_hold(d);
2017 break;
2018 }
2019 }
2020 read_unlock(&hci_dev_list_lock);
2021 return hdev;
2022}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002025
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002026bool hci_discovery_active(struct hci_dev *hdev)
2027{
2028 struct discovery_state *discov = &hdev->discovery;
2029
Andre Guedes6fbe1952012-02-03 17:47:58 -03002030 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002031 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002032 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002033 return true;
2034
Andre Guedes6fbe1952012-02-03 17:47:58 -03002035 default:
2036 return false;
2037 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002038}
2039
Johan Hedbergff9ef572012-01-04 14:23:45 +02002040void hci_discovery_set_state(struct hci_dev *hdev, int state)
2041{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002042 int old_state = hdev->discovery.state;
2043
Johan Hedbergff9ef572012-01-04 14:23:45 +02002044 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2045
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002046 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002047 return;
2048
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002049 hdev->discovery.state = state;
2050
Johan Hedbergff9ef572012-01-04 14:23:45 +02002051 switch (state) {
2052 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002053 hci_update_background_scan(hdev);
2054
Jakub Pawlowski37eab042014-12-05 10:55:55 +01002055 /* Reset RSSI and UUID filters to ensure Start Discovery
2056 * and Start Service Discovery operate properly no matter
2057 * which one started the previous discovery.
2058 *
2059 * While the Start Discovery and Start Service Discovery
2060 * operations will set proper values for RSSI and UUID
2061 * count, it is important to actually free the allocated
2062 * list of UUIDs here.
2063 */
Marcel Holtmann02563252014-12-05 11:45:21 +01002064 hci_discovery_filter_clear(hdev);
Jakub Pawlowski37eab042014-12-05 10:55:55 +01002065
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002066 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002067 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002068 break;
2069 case DISCOVERY_STARTING:
2070 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002071 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002072 mgmt_discovering(hdev, 1);
2073 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002074 case DISCOVERY_RESOLVING:
2075 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002076 case DISCOVERY_STOPPING:
2077 break;
2078 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002079}
2080
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002081void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082{
Johan Hedberg30883512012-01-04 14:16:21 +02002083 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002084 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Johan Hedberg561aafb2012-01-04 13:31:59 +02002086 list_for_each_entry_safe(p, n, &cache->all, all) {
2087 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002088 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002090
2091 INIT_LIST_HEAD(&cache->unknown);
2092 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}
2094
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002095struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2096 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097{
Johan Hedberg30883512012-01-04 14:16:21 +02002098 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 struct inquiry_entry *e;
2100
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002101 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
Johan Hedberg561aafb2012-01-04 13:31:59 +02002103 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002105 return e;
2106 }
2107
2108 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109}
2110
Johan Hedberg561aafb2012-01-04 13:31:59 +02002111struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002112 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002113{
Johan Hedberg30883512012-01-04 14:16:21 +02002114 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002115 struct inquiry_entry *e;
2116
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002117 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002118
2119 list_for_each_entry(e, &cache->unknown, list) {
2120 if (!bacmp(&e->data.bdaddr, bdaddr))
2121 return e;
2122 }
2123
2124 return NULL;
2125}
2126
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002127struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002128 bdaddr_t *bdaddr,
2129 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002130{
2131 struct discovery_state *cache = &hdev->discovery;
2132 struct inquiry_entry *e;
2133
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002134 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002135
2136 list_for_each_entry(e, &cache->resolve, list) {
2137 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2138 return e;
2139 if (!bacmp(&e->data.bdaddr, bdaddr))
2140 return e;
2141 }
2142
2143 return NULL;
2144}
2145
Johan Hedberga3d4e202012-01-09 00:53:02 +02002146void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002147 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002148{
2149 struct discovery_state *cache = &hdev->discovery;
2150 struct list_head *pos = &cache->resolve;
2151 struct inquiry_entry *p;
2152
2153 list_del(&ie->list);
2154
2155 list_for_each_entry(p, &cache->resolve, list) {
2156 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002157 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002158 break;
2159 pos = &p->list;
2160 }
2161
2162 list_add(&ie->list, pos);
2163}
2164
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2166 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167{
Johan Hedberg30883512012-01-04 14:16:21 +02002168 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002169 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002170 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002172 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Johan Hedberg6928a922014-10-26 20:46:09 +01002174 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002175
Marcel Holtmannaf589252014-07-01 14:11:20 +02002176 if (!data->ssp_mode)
2177 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002178
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002179 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002180 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002181 if (!ie->data.ssp_mode)
2182 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002183
Johan Hedberga3d4e202012-01-09 00:53:02 +02002184 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002185 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002186 ie->data.rssi = data->rssi;
2187 hci_inquiry_cache_update_resolve(hdev, ie);
2188 }
2189
Johan Hedberg561aafb2012-01-04 13:31:59 +02002190 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002191 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002192
Johan Hedberg561aafb2012-01-04 13:31:59 +02002193 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002194 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002195 if (!ie) {
2196 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2197 goto done;
2198 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002199
2200 list_add(&ie->all, &cache->all);
2201
2202 if (name_known) {
2203 ie->name_state = NAME_KNOWN;
2204 } else {
2205 ie->name_state = NAME_NOT_KNOWN;
2206 list_add(&ie->list, &cache->unknown);
2207 }
2208
2209update:
2210 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002211 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002212 ie->name_state = NAME_KNOWN;
2213 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 }
2215
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002216 memcpy(&ie->data, data, sizeof(*data));
2217 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002219
2220 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002221 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002222
Marcel Holtmannaf589252014-07-01 14:11:20 +02002223done:
2224 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225}
2226
2227static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2228{
Johan Hedberg30883512012-01-04 14:16:21 +02002229 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 struct inquiry_info *info = (struct inquiry_info *) buf;
2231 struct inquiry_entry *e;
2232 int copied = 0;
2233
Johan Hedberg561aafb2012-01-04 13:31:59 +02002234 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002236
2237 if (copied >= num)
2238 break;
2239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 bacpy(&info->bdaddr, &data->bdaddr);
2241 info->pscan_rep_mode = data->pscan_rep_mode;
2242 info->pscan_period_mode = data->pscan_period_mode;
2243 info->pscan_mode = data->pscan_mode;
2244 memcpy(info->dev_class, data->dev_class, 3);
2245 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002248 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 }
2250
2251 BT_DBG("cache %p, copied %d", cache, copied);
2252 return copied;
2253}
2254
Johan Hedberg42c6b122013-03-05 20:37:49 +02002255static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256{
2257 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002258 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 struct hci_cp_inquiry cp;
2260
2261 BT_DBG("%s", hdev->name);
2262
2263 if (test_bit(HCI_INQUIRY, &hdev->flags))
2264 return;
2265
2266 /* Start Inquiry */
2267 memcpy(&cp.lap, &ir->lap, 3);
2268 cp.length = ir->length;
2269 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002270 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271}
2272
2273int hci_inquiry(void __user *arg)
2274{
2275 __u8 __user *ptr = arg;
2276 struct hci_inquiry_req ir;
2277 struct hci_dev *hdev;
2278 int err = 0, do_inquiry = 0, max_rsp;
2279 long timeo;
2280 __u8 *buf;
2281
2282 if (copy_from_user(&ir, ptr, sizeof(ir)))
2283 return -EFAULT;
2284
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002285 hdev = hci_dev_get(ir.dev_id);
2286 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 return -ENODEV;
2288
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002289 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2290 err = -EBUSY;
2291 goto done;
2292 }
2293
Marcel Holtmann4a964402014-07-02 19:10:33 +02002294 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002295 err = -EOPNOTSUPP;
2296 goto done;
2297 }
2298
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002299 if (hdev->dev_type != HCI_BREDR) {
2300 err = -EOPNOTSUPP;
2301 goto done;
2302 }
2303
Johan Hedberg56f87902013-10-02 13:43:13 +03002304 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2305 err = -EOPNOTSUPP;
2306 goto done;
2307 }
2308
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002309 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002310 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002311 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002312 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 do_inquiry = 1;
2314 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002315 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
Marcel Holtmann04837f62006-07-03 10:02:33 +02002317 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002318
2319 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002320 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2321 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002322 if (err < 0)
2323 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002324
2325 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2326 * cleared). If it is interrupted by a signal, return -EINTR.
2327 */
NeilBrown74316202014-07-07 15:16:04 +10002328 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002329 TASK_INTERRUPTIBLE))
2330 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002333 /* for unlimited number of responses we will use buffer with
2334 * 255 entries
2335 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2337
2338 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2339 * copy it to the user space.
2340 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002341 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 err = -ENOMEM;
2344 goto done;
2345 }
2346
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002347 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002349 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
2351 BT_DBG("num_rsp %d", ir.num_rsp);
2352
2353 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2354 ptr += sizeof(ir);
2355 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002356 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002358 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 err = -EFAULT;
2360
2361 kfree(buf);
2362
2363done:
2364 hci_dev_put(hdev);
2365 return err;
2366}
2367
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002368static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 int ret = 0;
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 BT_DBG("%s %p", hdev->name, hdev);
2373
2374 hci_req_lock(hdev);
2375
Johan Hovold94324962012-03-15 14:48:41 +01002376 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2377 ret = -ENODEV;
2378 goto done;
2379 }
2380
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002381 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2382 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002383 /* Check for rfkill but allow the HCI setup stage to
2384 * proceed (which in itself doesn't cause any RF activity).
2385 */
2386 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2387 ret = -ERFKILL;
2388 goto done;
2389 }
2390
2391 /* Check for valid public address or a configured static
2392 * random adddress, but let the HCI setup proceed to
2393 * be able to determine if there is a public address
2394 * or not.
2395 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002396 * In case of user channel usage, it is not important
2397 * if a public address or static random address is
2398 * available.
2399 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002400 * This check is only valid for BR/EDR controllers
2401 * since AMP controllers do not have an address.
2402 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002403 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2404 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002405 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2406 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2407 ret = -EADDRNOTAVAIL;
2408 goto done;
2409 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002410 }
2411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 if (test_bit(HCI_UP, &hdev->flags)) {
2413 ret = -EALREADY;
2414 goto done;
2415 }
2416
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 if (hdev->open(hdev)) {
2418 ret = -EIO;
2419 goto done;
2420 }
2421
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002422 atomic_set(&hdev->cmd_cnt, 1);
2423 set_bit(HCI_INIT, &hdev->flags);
2424
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002425 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2426 if (hdev->setup)
2427 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002428
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002429 /* The transport driver can set these quirks before
2430 * creating the HCI device or in its setup callback.
2431 *
2432 * In case any of them is set, the controller has to
2433 * start up as unconfigured.
2434 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002435 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2436 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002437 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002438
2439 /* For an unconfigured controller it is required to
2440 * read at least the version information provided by
2441 * the Read Local Version Information command.
2442 *
2443 * If the set_bdaddr driver callback is provided, then
2444 * also the original Bluetooth public device address
2445 * will be read using the Read BD Address command.
2446 */
2447 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2448 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002449 }
2450
Marcel Holtmann9713c172014-07-06 12:11:15 +02002451 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2452 /* If public address change is configured, ensure that
2453 * the address gets programmed. If the driver does not
2454 * support changing the public address, fail the power
2455 * on procedure.
2456 */
2457 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2458 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002459 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2460 else
2461 ret = -EADDRNOTAVAIL;
2462 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002463
2464 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002465 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002466 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002467 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 }
2469
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002470 clear_bit(HCI_INIT, &hdev->flags);
2471
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 if (!ret) {
2473 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002474 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 set_bit(HCI_UP, &hdev->flags);
2476 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002477 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002478 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002479 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002480 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002481 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002482 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002483 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002484 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002485 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002486 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002488 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002489 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002490 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491
2492 skb_queue_purge(&hdev->cmd_q);
2493 skb_queue_purge(&hdev->rx_q);
2494
2495 if (hdev->flush)
2496 hdev->flush(hdev);
2497
2498 if (hdev->sent_cmd) {
2499 kfree_skb(hdev->sent_cmd);
2500 hdev->sent_cmd = NULL;
2501 }
2502
2503 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002504 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 }
2506
2507done:
2508 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 return ret;
2510}
2511
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002512/* ---- HCI ioctl helpers ---- */
2513
2514int hci_dev_open(__u16 dev)
2515{
2516 struct hci_dev *hdev;
2517 int err;
2518
2519 hdev = hci_dev_get(dev);
2520 if (!hdev)
2521 return -ENODEV;
2522
Marcel Holtmann4a964402014-07-02 19:10:33 +02002523 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002524 * up as user channel. Trying to bring them up as normal devices
2525 * will result into a failure. Only user channel operation is
2526 * possible.
2527 *
2528 * When this function is called for a user channel, the flag
2529 * HCI_USER_CHANNEL will be set first before attempting to
2530 * open the device.
2531 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002532 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002533 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2534 err = -EOPNOTSUPP;
2535 goto done;
2536 }
2537
Johan Hedberge1d08f42013-10-01 22:44:50 +03002538 /* We need to ensure that no other power on/off work is pending
2539 * before proceeding to call hci_dev_do_open. This is
2540 * particularly important if the setup procedure has not yet
2541 * completed.
2542 */
2543 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2544 cancel_delayed_work(&hdev->power_off);
2545
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002546 /* After this call it is guaranteed that the setup procedure
2547 * has finished. This means that error conditions like RFKILL
2548 * or no valid public or static random address apply.
2549 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002550 flush_workqueue(hdev->req_workqueue);
2551
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002552 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002553 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002554 * so that pairing works for them. Once the management interface
2555 * is in use this bit will be cleared again and userspace has
2556 * to explicitly enable it.
2557 */
2558 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2559 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002560 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002561
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002562 err = hci_dev_do_open(hdev);
2563
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002564done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002565 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002566 return err;
2567}
2568
Johan Hedbergd7347f32014-07-04 12:37:23 +03002569/* This function requires the caller holds hdev->lock */
2570static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2571{
2572 struct hci_conn_params *p;
2573
Johan Hedbergf161dd42014-08-15 21:06:54 +03002574 list_for_each_entry(p, &hdev->le_conn_params, list) {
2575 if (p->conn) {
2576 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002577 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002578 p->conn = NULL;
2579 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002580 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002581 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002582
2583 BT_DBG("All LE pending actions cleared");
2584}
2585
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586static int hci_dev_do_close(struct hci_dev *hdev)
2587{
2588 BT_DBG("%s %p", hdev->name, hdev);
2589
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002590 cancel_delayed_work(&hdev->power_off);
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 hci_req_cancel(hdev, ENODEV);
2593 hci_req_lock(hdev);
2594
2595 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002596 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 hci_req_unlock(hdev);
2598 return 0;
2599 }
2600
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002601 /* Flush RX and TX works */
2602 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002603 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002605 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002606 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002607 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002608 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002609 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002610 }
2611
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002612 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002613 cancel_delayed_work(&hdev->service_cache);
2614
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002615 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002616
2617 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2618 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002619
Johan Hedberg76727c02014-11-18 09:00:14 +02002620 /* Avoid potential lockdep warnings from the *_flush() calls by
2621 * ensuring the workqueue is empty up front.
2622 */
2623 drain_workqueue(hdev->workqueue);
2624
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002625 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002626 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002627 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002628 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002629 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
2631 hci_notify(hdev, HCI_DEV_DOWN);
2632
2633 if (hdev->flush)
2634 hdev->flush(hdev);
2635
2636 /* Reset device */
2637 skb_queue_purge(&hdev->cmd_q);
2638 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002639 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2640 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002641 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002643 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 clear_bit(HCI_INIT, &hdev->flags);
2645 }
2646
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002647 /* flush cmd work */
2648 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649
2650 /* Drop queues */
2651 skb_queue_purge(&hdev->rx_q);
2652 skb_queue_purge(&hdev->cmd_q);
2653 skb_queue_purge(&hdev->raw_q);
2654
2655 /* Drop last sent command */
2656 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002657 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 kfree_skb(hdev->sent_cmd);
2659 hdev->sent_cmd = NULL;
2660 }
2661
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002662 kfree_skb(hdev->recv_evt);
2663 hdev->recv_evt = NULL;
2664
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 /* After this point our queues are empty
2666 * and no tasks are scheduled. */
2667 hdev->close(hdev);
2668
Johan Hedberg35b973c2013-03-15 17:06:59 -05002669 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002670 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002671 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2672
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002673 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2674 if (hdev->dev_type == HCI_BREDR) {
2675 hci_dev_lock(hdev);
2676 mgmt_powered(hdev, 0);
2677 hci_dev_unlock(hdev);
2678 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002679 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002680
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002681 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002682 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002683
Johan Hedberge59fda82012-02-22 18:11:53 +02002684 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002685 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002686 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 hci_req_unlock(hdev);
2689
2690 hci_dev_put(hdev);
2691 return 0;
2692}
2693
2694int hci_dev_close(__u16 dev)
2695{
2696 struct hci_dev *hdev;
2697 int err;
2698
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002699 hdev = hci_dev_get(dev);
2700 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002702
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002703 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2704 err = -EBUSY;
2705 goto done;
2706 }
2707
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002708 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2709 cancel_delayed_work(&hdev->power_off);
2710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002712
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002713done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 hci_dev_put(hdev);
2715 return err;
2716}
2717
2718int hci_dev_reset(__u16 dev)
2719{
2720 struct hci_dev *hdev;
2721 int ret = 0;
2722
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002723 hdev = hci_dev_get(dev);
2724 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 return -ENODEV;
2726
2727 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728
Marcel Holtmann808a0492013-08-26 20:57:58 -07002729 if (!test_bit(HCI_UP, &hdev->flags)) {
2730 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002734 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2735 ret = -EBUSY;
2736 goto done;
2737 }
2738
Marcel Holtmann4a964402014-07-02 19:10:33 +02002739 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002740 ret = -EOPNOTSUPP;
2741 goto done;
2742 }
2743
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 /* Drop queues */
2745 skb_queue_purge(&hdev->rx_q);
2746 skb_queue_purge(&hdev->cmd_q);
2747
Johan Hedberg76727c02014-11-18 09:00:14 +02002748 /* Avoid potential lockdep warnings from the *_flush() calls by
2749 * ensuring the workqueue is empty up front.
2750 */
2751 drain_workqueue(hdev->workqueue);
2752
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002753 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002754 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002756 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
2758 if (hdev->flush)
2759 hdev->flush(hdev);
2760
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002761 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002762 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002764 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 hci_req_unlock(hdev);
2768 hci_dev_put(hdev);
2769 return ret;
2770}
2771
2772int hci_dev_reset_stat(__u16 dev)
2773{
2774 struct hci_dev *hdev;
2775 int ret = 0;
2776
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002777 hdev = hci_dev_get(dev);
2778 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 return -ENODEV;
2780
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002781 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2782 ret = -EBUSY;
2783 goto done;
2784 }
2785
Marcel Holtmann4a964402014-07-02 19:10:33 +02002786 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002787 ret = -EOPNOTSUPP;
2788 goto done;
2789 }
2790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2792
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002793done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 return ret;
2796}
2797
Johan Hedberg123abc02014-07-10 12:09:07 +03002798static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2799{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002800 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002801
2802 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2803
2804 if ((scan & SCAN_PAGE))
2805 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2806 &hdev->dev_flags);
2807 else
2808 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2809 &hdev->dev_flags);
2810
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002811 if ((scan & SCAN_INQUIRY)) {
2812 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2813 &hdev->dev_flags);
2814 } else {
2815 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2816 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2817 &hdev->dev_flags);
2818 }
2819
Johan Hedberg123abc02014-07-10 12:09:07 +03002820 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2821 return;
2822
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002823 if (conn_changed || discov_changed) {
2824 /* In case this was disabled through mgmt */
2825 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2826
2827 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2828 mgmt_update_adv_data(hdev);
2829
Johan Hedberg123abc02014-07-10 12:09:07 +03002830 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002831 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002832}
2833
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834int hci_dev_cmd(unsigned int cmd, void __user *arg)
2835{
2836 struct hci_dev *hdev;
2837 struct hci_dev_req dr;
2838 int err = 0;
2839
2840 if (copy_from_user(&dr, arg, sizeof(dr)))
2841 return -EFAULT;
2842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002843 hdev = hci_dev_get(dr.dev_id);
2844 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 return -ENODEV;
2846
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002847 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2848 err = -EBUSY;
2849 goto done;
2850 }
2851
Marcel Holtmann4a964402014-07-02 19:10:33 +02002852 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002853 err = -EOPNOTSUPP;
2854 goto done;
2855 }
2856
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002857 if (hdev->dev_type != HCI_BREDR) {
2858 err = -EOPNOTSUPP;
2859 goto done;
2860 }
2861
Johan Hedberg56f87902013-10-02 13:43:13 +03002862 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2863 err = -EOPNOTSUPP;
2864 goto done;
2865 }
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 switch (cmd) {
2868 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002869 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2870 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 break;
2872
2873 case HCISETENCRYPT:
2874 if (!lmp_encrypt_capable(hdev)) {
2875 err = -EOPNOTSUPP;
2876 break;
2877 }
2878
2879 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2880 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002881 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2882 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 if (err)
2884 break;
2885 }
2886
Johan Hedberg01178cd2013-03-05 20:37:41 +02002887 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2888 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 break;
2890
2891 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002892 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2893 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002894
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002895 /* Ensure that the connectable and discoverable states
2896 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002897 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002898 if (!err)
2899 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 break;
2901
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002902 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002903 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2904 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002905 break;
2906
2907 case HCISETLINKMODE:
2908 hdev->link_mode = ((__u16) dr.dev_opt) &
2909 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2910 break;
2911
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 case HCISETPTYPE:
2913 hdev->pkt_type = (__u16) dr.dev_opt;
2914 break;
2915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002917 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2918 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 break;
2920
2921 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002922 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2923 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 break;
2925
2926 default:
2927 err = -EINVAL;
2928 break;
2929 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002930
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002931done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 hci_dev_put(hdev);
2933 return err;
2934}
2935
2936int hci_get_dev_list(void __user *arg)
2937{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002938 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 struct hci_dev_list_req *dl;
2940 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 int n = 0, size, err;
2942 __u16 dev_num;
2943
2944 if (get_user(dev_num, (__u16 __user *) arg))
2945 return -EFAULT;
2946
2947 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2948 return -EINVAL;
2949
2950 size = sizeof(*dl) + dev_num * sizeof(*dr);
2951
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002952 dl = kzalloc(size, GFP_KERNEL);
2953 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 return -ENOMEM;
2955
2956 dr = dl->dev_req;
2957
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002958 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002959 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002960 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002961
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002962 /* When the auto-off is configured it means the transport
2963 * is running, but in that case still indicate that the
2964 * device is actually down.
2965 */
2966 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2967 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002968
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002970 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002971
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 if (++n >= dev_num)
2973 break;
2974 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002975 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976
2977 dl->dev_num = n;
2978 size = sizeof(*dl) + n * sizeof(*dr);
2979
2980 err = copy_to_user(arg, dl, size);
2981 kfree(dl);
2982
2983 return err ? -EFAULT : 0;
2984}
2985
2986int hci_get_dev_info(void __user *arg)
2987{
2988 struct hci_dev *hdev;
2989 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002990 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 int err = 0;
2992
2993 if (copy_from_user(&di, arg, sizeof(di)))
2994 return -EFAULT;
2995
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002996 hdev = hci_dev_get(di.dev_id);
2997 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 return -ENODEV;
2999
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003000 /* When the auto-off is configured it means the transport
3001 * is running, but in that case still indicate that the
3002 * device is actually down.
3003 */
3004 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3005 flags = hdev->flags & ~BIT(HCI_UP);
3006 else
3007 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003008
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 strcpy(di.name, hdev->name);
3010 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003011 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003012 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003014 if (lmp_bredr_capable(hdev)) {
3015 di.acl_mtu = hdev->acl_mtu;
3016 di.acl_pkts = hdev->acl_pkts;
3017 di.sco_mtu = hdev->sco_mtu;
3018 di.sco_pkts = hdev->sco_pkts;
3019 } else {
3020 di.acl_mtu = hdev->le_mtu;
3021 di.acl_pkts = hdev->le_pkts;
3022 di.sco_mtu = 0;
3023 di.sco_pkts = 0;
3024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 di.link_policy = hdev->link_policy;
3026 di.link_mode = hdev->link_mode;
3027
3028 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3029 memcpy(&di.features, &hdev->features, sizeof(di.features));
3030
3031 if (copy_to_user(arg, &di, sizeof(di)))
3032 err = -EFAULT;
3033
3034 hci_dev_put(hdev);
3035
3036 return err;
3037}
3038
3039/* ---- Interface to HCI drivers ---- */
3040
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003041static int hci_rfkill_set_block(void *data, bool blocked)
3042{
3043 struct hci_dev *hdev = data;
3044
3045 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3046
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003047 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3048 return -EBUSY;
3049
Johan Hedberg5e130362013-09-13 08:58:17 +03003050 if (blocked) {
3051 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003052 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3053 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003054 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003055 } else {
3056 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003057 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003058
3059 return 0;
3060}
3061
3062static const struct rfkill_ops hci_rfkill_ops = {
3063 .set_block = hci_rfkill_set_block,
3064};
3065
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003066static void hci_power_on(struct work_struct *work)
3067{
3068 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003069 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003070
3071 BT_DBG("%s", hdev->name);
3072
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003073 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003074 if (err < 0) {
3075 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003076 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003077 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003078
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003079 /* During the HCI setup phase, a few error conditions are
3080 * ignored and they need to be checked now. If they are still
3081 * valid, it is important to turn the device back off.
3082 */
3083 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003084 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003085 (hdev->dev_type == HCI_BREDR &&
3086 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3087 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003088 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3089 hci_dev_do_close(hdev);
3090 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003091 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3092 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003093 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003094
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003095 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003096 /* For unconfigured devices, set the HCI_RAW flag
3097 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003098 */
3099 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3100 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003101
3102 /* For fully configured devices, this will send
3103 * the Index Added event. For unconfigured devices,
3104 * it will send Unconfigued Index Added event.
3105 *
3106 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3107 * and no event will be send.
3108 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003109 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003110 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003111 /* When the controller is now configured, then it
3112 * is important to clear the HCI_RAW flag.
3113 */
3114 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3115 clear_bit(HCI_RAW, &hdev->flags);
3116
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003117 /* Powering on the controller with HCI_CONFIG set only
3118 * happens with the transition from unconfigured to
3119 * configured. This will send the Index Added event.
3120 */
3121 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003122 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003123}
3124
3125static void hci_power_off(struct work_struct *work)
3126{
Johan Hedberg32435532011-11-07 22:16:04 +02003127 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003128 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003129
3130 BT_DBG("%s", hdev->name);
3131
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003132 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003133}
3134
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003135static void hci_discov_off(struct work_struct *work)
3136{
3137 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003138
3139 hdev = container_of(work, struct hci_dev, discov_off.work);
3140
3141 BT_DBG("%s", hdev->name);
3142
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003143 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003144}
3145
Johan Hedberg35f74982014-02-18 17:14:32 +02003146void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003147{
Johan Hedberg48210022013-01-27 00:31:28 +02003148 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003149
Johan Hedberg48210022013-01-27 00:31:28 +02003150 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3151 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003152 kfree(uuid);
3153 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003154}
3155
Johan Hedberg35f74982014-02-18 17:14:32 +02003156void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003157{
Johan Hedberg0378b592014-11-19 15:22:22 +02003158 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003159
Johan Hedberg0378b592014-11-19 15:22:22 +02003160 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3161 list_del_rcu(&key->list);
3162 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003163 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003164}
3165
Johan Hedberg35f74982014-02-18 17:14:32 +02003166void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003167{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003168 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003169
Johan Hedberg970d0f12014-11-13 14:37:47 +02003170 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3171 list_del_rcu(&k->list);
3172 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003173 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003174}
3175
Johan Hedberg970c4e42014-02-18 10:19:33 +02003176void hci_smp_irks_clear(struct hci_dev *hdev)
3177{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003178 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003179
Johan Hedbergadae20c2014-11-13 14:37:48 +02003180 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3181 list_del_rcu(&k->list);
3182 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003183 }
3184}
3185
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003186struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3187{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003188 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003189
Johan Hedberg0378b592014-11-19 15:22:22 +02003190 rcu_read_lock();
3191 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3192 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3193 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003194 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003195 }
3196 }
3197 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003198
3199 return NULL;
3200}
3201
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303202static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003203 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003204{
3205 /* Legacy key */
3206 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303207 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003208
3209 /* Debug keys are insecure so don't store them persistently */
3210 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303211 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003212
3213 /* Changed combination key and there's no previous one */
3214 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303215 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003216
3217 /* Security mode 3 case */
3218 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303219 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003220
Johan Hedberge3befab2014-06-01 16:33:39 +03003221 /* BR/EDR key derived using SC from an LE link */
3222 if (conn->type == LE_LINK)
3223 return true;
3224
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003225 /* Neither local nor remote side had no-bonding as requirement */
3226 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303227 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003228
3229 /* Local side had dedicated bonding as requirement */
3230 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303231 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003232
3233 /* Remote side had dedicated bonding as requirement */
3234 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303235 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003236
3237 /* If none of the above criteria match, then don't store the key
3238 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303239 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003240}
3241
Johan Hedberge804d252014-07-16 11:42:28 +03003242static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003243{
Johan Hedberge804d252014-07-16 11:42:28 +03003244 if (type == SMP_LTK)
3245 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003246
Johan Hedberge804d252014-07-16 11:42:28 +03003247 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003248}
3249
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003250struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3251 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003252{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003253 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003254
Johan Hedberg970d0f12014-11-13 14:37:47 +02003255 rcu_read_lock();
3256 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003257 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3258 continue;
3259
Johan Hedberg923e2412014-12-03 12:43:39 +02003260 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003261 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003262 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003263 }
3264 }
3265 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003266
3267 return NULL;
3268}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003269
Johan Hedberg970c4e42014-02-18 10:19:33 +02003270struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3271{
3272 struct smp_irk *irk;
3273
Johan Hedbergadae20c2014-11-13 14:37:48 +02003274 rcu_read_lock();
3275 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3276 if (!bacmp(&irk->rpa, rpa)) {
3277 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003278 return irk;
3279 }
3280 }
3281
Johan Hedbergadae20c2014-11-13 14:37:48 +02003282 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3283 if (smp_irk_matches(hdev, irk->val, rpa)) {
3284 bacpy(&irk->rpa, rpa);
3285 rcu_read_unlock();
3286 return irk;
3287 }
3288 }
3289 rcu_read_unlock();
3290
Johan Hedberg970c4e42014-02-18 10:19:33 +02003291 return NULL;
3292}
3293
3294struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3295 u8 addr_type)
3296{
3297 struct smp_irk *irk;
3298
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003299 /* Identity Address must be public or static random */
3300 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3301 return NULL;
3302
Johan Hedbergadae20c2014-11-13 14:37:48 +02003303 rcu_read_lock();
3304 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003305 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003306 bacmp(bdaddr, &irk->bdaddr) == 0) {
3307 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003308 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003309 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003310 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003311 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003312
3313 return NULL;
3314}
3315
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003316struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003317 bdaddr_t *bdaddr, u8 *val, u8 type,
3318 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003319{
3320 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303321 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003322
3323 old_key = hci_find_link_key(hdev, bdaddr);
3324 if (old_key) {
3325 old_key_type = old_key->type;
3326 key = old_key;
3327 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003328 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003329 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003330 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003331 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003332 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003333 }
3334
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003335 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003336
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003337 /* Some buggy controller combinations generate a changed
3338 * combination key for legacy pairing even when there's no
3339 * previous key */
3340 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003341 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003342 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003343 if (conn)
3344 conn->key_type = type;
3345 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003346
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003347 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003348 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003349 key->pin_len = pin_len;
3350
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003351 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003352 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003353 else
3354 key->type = type;
3355
Johan Hedberg7652ff62014-06-24 13:15:49 +03003356 if (persistent)
3357 *persistent = hci_persistent_key(hdev, conn, type,
3358 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003359
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003360 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003361}
3362
Johan Hedbergca9142b2014-02-19 14:57:44 +02003363struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003364 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003365 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003366{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003367 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003368 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003369
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003370 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003371 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003372 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003373 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003374 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003375 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003376 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003377 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003378 }
3379
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003380 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003381 key->bdaddr_type = addr_type;
3382 memcpy(key->val, tk, sizeof(key->val));
3383 key->authenticated = authenticated;
3384 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003385 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003386 key->enc_size = enc_size;
3387 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003388
Johan Hedbergca9142b2014-02-19 14:57:44 +02003389 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003390}
3391
Johan Hedbergca9142b2014-02-19 14:57:44 +02003392struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3393 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003394{
3395 struct smp_irk *irk;
3396
3397 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3398 if (!irk) {
3399 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3400 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003401 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003402
3403 bacpy(&irk->bdaddr, bdaddr);
3404 irk->addr_type = addr_type;
3405
Johan Hedbergadae20c2014-11-13 14:37:48 +02003406 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003407 }
3408
3409 memcpy(irk->val, val, 16);
3410 bacpy(&irk->rpa, rpa);
3411
Johan Hedbergca9142b2014-02-19 14:57:44 +02003412 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003413}
3414
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003415int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3416{
3417 struct link_key *key;
3418
3419 key = hci_find_link_key(hdev, bdaddr);
3420 if (!key)
3421 return -ENOENT;
3422
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003423 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003424
Johan Hedberg0378b592014-11-19 15:22:22 +02003425 list_del_rcu(&key->list);
3426 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003427
3428 return 0;
3429}
3430
Johan Hedberge0b2b272014-02-18 17:14:31 +02003431int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003432{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003433 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003434 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003435
Johan Hedberg970d0f12014-11-13 14:37:47 +02003436 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003437 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003438 continue;
3439
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003440 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003441
Johan Hedberg970d0f12014-11-13 14:37:47 +02003442 list_del_rcu(&k->list);
3443 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003444 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003445 }
3446
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003447 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003448}
3449
Johan Hedberga7ec7332014-02-18 17:14:35 +02003450void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3451{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003452 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003453
Johan Hedbergadae20c2014-11-13 14:37:48 +02003454 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003455 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3456 continue;
3457
3458 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3459
Johan Hedbergadae20c2014-11-13 14:37:48 +02003460 list_del_rcu(&k->list);
3461 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003462 }
3463}
3464
Ville Tervo6bd32322011-02-16 16:32:41 +02003465/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003466static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003467{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003468 struct hci_dev *hdev = container_of(work, struct hci_dev,
3469 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003470
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003471 if (hdev->sent_cmd) {
3472 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3473 u16 opcode = __le16_to_cpu(sent->opcode);
3474
3475 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3476 } else {
3477 BT_ERR("%s command tx timeout", hdev->name);
3478 }
3479
Ville Tervo6bd32322011-02-16 16:32:41 +02003480 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003481 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003482}
3483
Szymon Janc2763eda2011-03-22 13:12:22 +01003484struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003485 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003486{
3487 struct oob_data *data;
3488
Johan Hedberg6928a922014-10-26 20:46:09 +01003489 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3490 if (bacmp(bdaddr, &data->bdaddr) != 0)
3491 continue;
3492 if (data->bdaddr_type != bdaddr_type)
3493 continue;
3494 return data;
3495 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003496
3497 return NULL;
3498}
3499
Johan Hedberg6928a922014-10-26 20:46:09 +01003500int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3501 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003502{
3503 struct oob_data *data;
3504
Johan Hedberg6928a922014-10-26 20:46:09 +01003505 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003506 if (!data)
3507 return -ENOENT;
3508
Johan Hedberg6928a922014-10-26 20:46:09 +01003509 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003510
3511 list_del(&data->list);
3512 kfree(data);
3513
3514 return 0;
3515}
3516
Johan Hedberg35f74982014-02-18 17:14:32 +02003517void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003518{
3519 struct oob_data *data, *n;
3520
3521 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3522 list_del(&data->list);
3523 kfree(data);
3524 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003525}
3526
Marcel Holtmann07988722014-01-10 02:07:29 -08003527int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003528 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003529 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003530{
3531 struct oob_data *data;
3532
Johan Hedberg6928a922014-10-26 20:46:09 +01003533 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003534 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003535 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003536 if (!data)
3537 return -ENOMEM;
3538
3539 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003540 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003541 list_add(&data->list, &hdev->remote_oob_data);
3542 }
3543
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003544 if (hash192 && rand192) {
3545 memcpy(data->hash192, hash192, sizeof(data->hash192));
3546 memcpy(data->rand192, rand192, sizeof(data->rand192));
3547 } else {
3548 memset(data->hash192, 0, sizeof(data->hash192));
3549 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003550 }
3551
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003552 if (hash256 && rand256) {
3553 memcpy(data->hash256, hash256, sizeof(data->hash256));
3554 memcpy(data->rand256, rand256, sizeof(data->rand256));
3555 } else {
3556 memset(data->hash256, 0, sizeof(data->hash256));
3557 memset(data->rand256, 0, sizeof(data->rand256));
3558 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003559
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003560 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003561
3562 return 0;
3563}
3564
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003565struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003566 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003567{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003568 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003569
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003570 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003571 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003572 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003573 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003574
3575 return NULL;
3576}
3577
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003578void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003579{
3580 struct list_head *p, *n;
3581
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003582 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003583 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003584
3585 list_del(p);
3586 kfree(b);
3587 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003588}
3589
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003590int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003591{
3592 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003593
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003594 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003595 return -EBADF;
3596
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003597 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003598 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003599
Johan Hedberg27f70f32014-07-21 10:50:06 +03003600 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003601 if (!entry)
3602 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003603
3604 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003605 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003606
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003607 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003608
3609 return 0;
3610}
3611
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003612int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003613{
3614 struct bdaddr_list *entry;
3615
Johan Hedberg35f74982014-02-18 17:14:32 +02003616 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003617 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003618 return 0;
3619 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003620
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003621 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003622 if (!entry)
3623 return -ENOENT;
3624
3625 list_del(&entry->list);
3626 kfree(entry);
3627
3628 return 0;
3629}
3630
Andre Guedes15819a72014-02-03 13:56:18 -03003631/* This function requires the caller holds hdev->lock */
3632struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3633 bdaddr_t *addr, u8 addr_type)
3634{
3635 struct hci_conn_params *params;
3636
Johan Hedberg738f6182014-07-03 19:33:51 +03003637 /* The conn params list only contains identity addresses */
3638 if (!hci_is_identity_address(addr, addr_type))
3639 return NULL;
3640
Andre Guedes15819a72014-02-03 13:56:18 -03003641 list_for_each_entry(params, &hdev->le_conn_params, list) {
3642 if (bacmp(&params->addr, addr) == 0 &&
3643 params->addr_type == addr_type) {
3644 return params;
3645 }
3646 }
3647
3648 return NULL;
3649}
3650
Andre Guedescef952c2014-02-26 20:21:49 -03003651static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3652{
3653 struct hci_conn *conn;
3654
3655 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3656 if (!conn)
3657 return false;
3658
3659 if (conn->dst_type != type)
3660 return false;
3661
3662 if (conn->state != BT_CONNECTED)
3663 return false;
3664
3665 return true;
3666}
3667
Andre Guedes15819a72014-02-03 13:56:18 -03003668/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003669struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3670 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003671{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003672 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003673
Johan Hedberg738f6182014-07-03 19:33:51 +03003674 /* The list only contains identity addresses */
3675 if (!hci_is_identity_address(addr, addr_type))
3676 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003677
Johan Hedberg501f8822014-07-04 12:37:26 +03003678 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003679 if (bacmp(&param->addr, addr) == 0 &&
3680 param->addr_type == addr_type)
3681 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003682 }
3683
3684 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003685}
3686
3687/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003688struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3689 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003690{
3691 struct hci_conn_params *params;
3692
Johan Hedbergc46245b2014-07-02 17:37:33 +03003693 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003694 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003695
Andre Guedes15819a72014-02-03 13:56:18 -03003696 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003697 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003698 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003699
3700 params = kzalloc(sizeof(*params), GFP_KERNEL);
3701 if (!params) {
3702 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003703 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003704 }
3705
3706 bacpy(&params->addr, addr);
3707 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003708
3709 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003710 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003711
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003712 params->conn_min_interval = hdev->le_conn_min_interval;
3713 params->conn_max_interval = hdev->le_conn_max_interval;
3714 params->conn_latency = hdev->le_conn_latency;
3715 params->supervision_timeout = hdev->le_supv_timeout;
3716 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3717
3718 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3719
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003720 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003721}
3722
3723/* This function requires the caller holds hdev->lock */
3724int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003725 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003726{
3727 struct hci_conn_params *params;
3728
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003729 params = hci_conn_params_add(hdev, addr, addr_type);
3730 if (!params)
3731 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003732
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003733 if (params->auto_connect == auto_connect)
3734 return 0;
3735
Johan Hedberg95305ba2014-07-04 12:37:21 +03003736 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003737
Andre Guedescef952c2014-02-26 20:21:49 -03003738 switch (auto_connect) {
3739 case HCI_AUTO_CONN_DISABLED:
3740 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003741 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003742 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003743 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003744 list_add(&params->action, &hdev->pend_le_reports);
3745 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003746 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003747 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003748 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003749 if (!is_connected(hdev, addr, addr_type)) {
3750 list_add(&params->action, &hdev->pend_le_conns);
3751 hci_update_background_scan(hdev);
3752 }
Andre Guedescef952c2014-02-26 20:21:49 -03003753 break;
3754 }
Andre Guedes15819a72014-02-03 13:56:18 -03003755
Johan Hedberg851efca2014-07-02 22:42:00 +03003756 params->auto_connect = auto_connect;
3757
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003758 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3759 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003760
3761 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003762}
3763
Johan Hedbergf6c63242014-08-15 21:06:59 +03003764static void hci_conn_params_free(struct hci_conn_params *params)
3765{
3766 if (params->conn) {
3767 hci_conn_drop(params->conn);
3768 hci_conn_put(params->conn);
3769 }
3770
3771 list_del(&params->action);
3772 list_del(&params->list);
3773 kfree(params);
3774}
3775
Andre Guedes15819a72014-02-03 13:56:18 -03003776/* This function requires the caller holds hdev->lock */
3777void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3778{
3779 struct hci_conn_params *params;
3780
3781 params = hci_conn_params_lookup(hdev, addr, addr_type);
3782 if (!params)
3783 return;
3784
Johan Hedbergf6c63242014-08-15 21:06:59 +03003785 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003786
Johan Hedberg95305ba2014-07-04 12:37:21 +03003787 hci_update_background_scan(hdev);
3788
Andre Guedes15819a72014-02-03 13:56:18 -03003789 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3790}
3791
3792/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003793void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003794{
3795 struct hci_conn_params *params, *tmp;
3796
3797 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003798 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3799 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003800 list_del(&params->list);
3801 kfree(params);
3802 }
3803
Johan Hedberg55af49a82014-07-02 17:37:26 +03003804 BT_DBG("All LE disabled connection parameters were removed");
3805}
3806
3807/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003808void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003809{
3810 struct hci_conn_params *params, *tmp;
3811
Johan Hedbergf6c63242014-08-15 21:06:59 +03003812 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3813 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003814
Johan Hedberga2f41a82014-07-04 12:37:19 +03003815 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003816
Andre Guedes15819a72014-02-03 13:56:18 -03003817 BT_DBG("All LE connection parameters were removed");
3818}
3819
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003820static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003821{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003822 if (status) {
3823 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003824
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003825 hci_dev_lock(hdev);
3826 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3827 hci_dev_unlock(hdev);
3828 return;
3829 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003830}
3831
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003832static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003833{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003834 /* General inquiry access code (GIAC) */
3835 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3836 struct hci_request req;
3837 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003838 int err;
3839
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003840 if (status) {
3841 BT_ERR("Failed to disable LE scanning: status %d", status);
3842 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003843 }
3844
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003845 switch (hdev->discovery.type) {
3846 case DISCOV_TYPE_LE:
3847 hci_dev_lock(hdev);
3848 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3849 hci_dev_unlock(hdev);
3850 break;
3851
3852 case DISCOV_TYPE_INTERLEAVED:
3853 hci_req_init(&req, hdev);
3854
3855 memset(&cp, 0, sizeof(cp));
3856 memcpy(&cp.lap, lap, sizeof(cp.lap));
3857 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3858 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3859
3860 hci_dev_lock(hdev);
3861
3862 hci_inquiry_cache_flush(hdev);
3863
3864 err = hci_req_run(&req, inquiry_complete);
3865 if (err) {
3866 BT_ERR("Inquiry request failed: err %d", err);
3867 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3868 }
3869
3870 hci_dev_unlock(hdev);
3871 break;
3872 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003873}
3874
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003875static void le_scan_disable_work(struct work_struct *work)
3876{
3877 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003878 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003879 struct hci_request req;
3880 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003881
3882 BT_DBG("%s", hdev->name);
3883
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003884 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003885
Andre Guedesb1efcc22014-02-26 20:21:40 -03003886 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003887
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003888 err = hci_req_run(&req, le_scan_disable_work_complete);
3889 if (err)
3890 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003891}
3892
Johan Hedberg8d972502014-02-28 12:54:14 +02003893static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3894{
3895 struct hci_dev *hdev = req->hdev;
3896
3897 /* If we're advertising or initiating an LE connection we can't
3898 * go ahead and change the random address at this time. This is
3899 * because the eventual initiator address used for the
3900 * subsequently created connection will be undefined (some
3901 * controllers use the new address and others the one we had
3902 * when the operation started).
3903 *
3904 * In this kind of scenario skip the update and let the random
3905 * address be updated at the next cycle.
3906 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003907 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003908 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3909 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003911 return;
3912 }
3913
3914 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3915}
3916
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003917int hci_update_random_address(struct hci_request *req, bool require_privacy,
3918 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003919{
3920 struct hci_dev *hdev = req->hdev;
3921 int err;
3922
3923 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003924 * current RPA has expired or there is something else than
3925 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003926 */
3927 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003928 int to;
3929
3930 *own_addr_type = ADDR_LE_DEV_RANDOM;
3931
3932 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003933 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003934 return 0;
3935
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003936 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003937 if (err < 0) {
3938 BT_ERR("%s failed to generate new RPA", hdev->name);
3939 return err;
3940 }
3941
Johan Hedberg8d972502014-02-28 12:54:14 +02003942 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003943
3944 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3945 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3946
3947 return 0;
3948 }
3949
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003950 /* In case of required privacy without resolvable private address,
3951 * use an unresolvable private address. This is useful for active
3952 * scanning and non-connectable advertising.
3953 */
3954 if (require_privacy) {
3955 bdaddr_t urpa;
3956
3957 get_random_bytes(&urpa, 6);
3958 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3959
3960 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003961 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003962 return 0;
3963 }
3964
Johan Hedbergebd3a742014-02-23 19:42:21 +02003965 /* If forcing static address is in use or there is no public
3966 * address use the static address as random address (but skip
3967 * the HCI command if the current random address is already the
3968 * static one.
3969 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003971 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3972 *own_addr_type = ADDR_LE_DEV_RANDOM;
3973 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3974 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3975 &hdev->static_addr);
3976 return 0;
3977 }
3978
3979 /* Neither privacy nor static address is being used so use a
3980 * public address.
3981 */
3982 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3983
3984 return 0;
3985}
3986
Johan Hedberga1f4c312014-02-27 14:05:41 +02003987/* Copy the Identity Address of the controller.
3988 *
3989 * If the controller has a public BD_ADDR, then by default use that one.
3990 * If this is a LE only controller without a public address, default to
3991 * the static random address.
3992 *
3993 * For debugging purposes it is possible to force controllers with a
3994 * public address to use the static random address instead.
3995 */
3996void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3997 u8 *bdaddr_type)
3998{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003999 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02004000 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4001 bacpy(bdaddr, &hdev->static_addr);
4002 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4003 } else {
4004 bacpy(bdaddr, &hdev->bdaddr);
4005 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4006 }
4007}
4008
David Herrmann9be0dab2012-04-22 14:39:57 +02004009/* Alloc HCI device */
4010struct hci_dev *hci_alloc_dev(void)
4011{
4012 struct hci_dev *hdev;
4013
Johan Hedberg27f70f32014-07-21 10:50:06 +03004014 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02004015 if (!hdev)
4016 return NULL;
4017
David Herrmannb1b813d2012-04-22 14:39:58 +02004018 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4019 hdev->esco_type = (ESCO_HV1);
4020 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004021 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4022 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004023 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004024 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4025 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004026
David Herrmannb1b813d2012-04-22 14:39:58 +02004027 hdev->sniff_max_interval = 800;
4028 hdev->sniff_min_interval = 80;
4029
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004030 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004031 hdev->le_adv_min_interval = 0x0800;
4032 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004033 hdev->le_scan_interval = 0x0060;
4034 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004035 hdev->le_conn_min_interval = 0x0028;
4036 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004037 hdev->le_conn_latency = 0x0000;
4038 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004039
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004040 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004041 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004042 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4043 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004044
David Herrmannb1b813d2012-04-22 14:39:58 +02004045 mutex_init(&hdev->lock);
4046 mutex_init(&hdev->req_lock);
4047
4048 INIT_LIST_HEAD(&hdev->mgmt_pending);
4049 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004050 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004051 INIT_LIST_HEAD(&hdev->uuids);
4052 INIT_LIST_HEAD(&hdev->link_keys);
4053 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004054 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004055 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004056 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004057 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004058 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004059 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004060 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004061
4062 INIT_WORK(&hdev->rx_work, hci_rx_work);
4063 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4064 INIT_WORK(&hdev->tx_work, hci_tx_work);
4065 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004066
David Herrmannb1b813d2012-04-22 14:39:58 +02004067 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4068 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4069 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4070
David Herrmannb1b813d2012-04-22 14:39:58 +02004071 skb_queue_head_init(&hdev->rx_q);
4072 skb_queue_head_init(&hdev->cmd_q);
4073 skb_queue_head_init(&hdev->raw_q);
4074
4075 init_waitqueue_head(&hdev->req_wait_q);
4076
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004077 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004078
David Herrmannb1b813d2012-04-22 14:39:58 +02004079 hci_init_sysfs(hdev);
4080 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004081
4082 return hdev;
4083}
4084EXPORT_SYMBOL(hci_alloc_dev);
4085
4086/* Free HCI device */
4087void hci_free_dev(struct hci_dev *hdev)
4088{
David Herrmann9be0dab2012-04-22 14:39:57 +02004089 /* will free via device release */
4090 put_device(&hdev->dev);
4091}
4092EXPORT_SYMBOL(hci_free_dev);
4093
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094/* Register HCI device */
4095int hci_register_dev(struct hci_dev *hdev)
4096{
David Herrmannb1b813d2012-04-22 14:39:58 +02004097 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
Marcel Holtmann74292d52014-07-06 15:50:27 +02004099 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 return -EINVAL;
4101
Mat Martineau08add512011-11-02 16:18:36 -07004102 /* Do not allow HCI_AMP devices to register at index 0,
4103 * so the index can be used as the AMP controller ID.
4104 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004105 switch (hdev->dev_type) {
4106 case HCI_BREDR:
4107 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4108 break;
4109 case HCI_AMP:
4110 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4111 break;
4112 default:
4113 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004115
Sasha Levin3df92b32012-05-27 22:36:56 +02004116 if (id < 0)
4117 return id;
4118
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 sprintf(hdev->name, "hci%d", id);
4120 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004121
4122 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4123
Kees Cookd8537542013-07-03 15:04:57 -07004124 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4125 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004126 if (!hdev->workqueue) {
4127 error = -ENOMEM;
4128 goto err;
4129 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004130
Kees Cookd8537542013-07-03 15:04:57 -07004131 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4132 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004133 if (!hdev->req_workqueue) {
4134 destroy_workqueue(hdev->workqueue);
4135 error = -ENOMEM;
4136 goto err;
4137 }
4138
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004139 if (!IS_ERR_OR_NULL(bt_debugfs))
4140 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4141
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004142 dev_set_name(&hdev->dev, "%s", hdev->name);
4143
4144 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004145 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004146 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004148 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004149 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4150 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004151 if (hdev->rfkill) {
4152 if (rfkill_register(hdev->rfkill) < 0) {
4153 rfkill_destroy(hdev->rfkill);
4154 hdev->rfkill = NULL;
4155 }
4156 }
4157
Johan Hedberg5e130362013-09-13 08:58:17 +03004158 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4159 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4160
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004161 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004162 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004163
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004164 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004165 /* Assume BR/EDR support until proven otherwise (such as
4166 * through reading supported features during init.
4167 */
4168 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4169 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004170
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004171 write_lock(&hci_dev_list_lock);
4172 list_add(&hdev->list, &hci_dev_list);
4173 write_unlock(&hci_dev_list_lock);
4174
Marcel Holtmann4a964402014-07-02 19:10:33 +02004175 /* Devices that are marked for raw-only usage are unconfigured
4176 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004177 */
4178 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004179 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004182 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183
Johan Hedberg19202572013-01-14 22:33:51 +02004184 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004185
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004187
David Herrmann33ca9542011-10-08 14:58:49 +02004188err_wqueue:
4189 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004190 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004191err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004192 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004193
David Herrmann33ca9542011-10-08 14:58:49 +02004194 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195}
4196EXPORT_SYMBOL(hci_register_dev);
4197
4198/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004199void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200{
Sasha Levin3df92b32012-05-27 22:36:56 +02004201 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004202
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004203 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204
Johan Hovold94324962012-03-15 14:48:41 +01004205 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4206
Sasha Levin3df92b32012-05-27 22:36:56 +02004207 id = hdev->id;
4208
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004209 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004211 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212
4213 hci_dev_do_close(hdev);
4214
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304215 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004216 kfree_skb(hdev->reassembly[i]);
4217
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004218 cancel_work_sync(&hdev->power_on);
4219
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004220 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004221 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4222 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004223 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004224 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004225 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004226 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004227
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004228 /* mgmt_index_removed should take care of emptying the
4229 * pending list */
4230 BUG_ON(!list_empty(&hdev->mgmt_pending));
4231
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 hci_notify(hdev, HCI_DEV_UNREG);
4233
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004234 if (hdev->rfkill) {
4235 rfkill_unregister(hdev->rfkill);
4236 rfkill_destroy(hdev->rfkill);
4237 }
4238
Johan Hedberg711eafe2014-08-08 09:32:52 +03004239 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004240
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004241 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004242
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004243 debugfs_remove_recursive(hdev->debugfs);
4244
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004245 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004246 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004247
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004248 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004249 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004250 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004251 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004252 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004253 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004254 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004255 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004256 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004257 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01004258 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004259 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004260
David Herrmanndc946bd2012-01-07 15:47:24 +01004261 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004262
4263 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264}
4265EXPORT_SYMBOL(hci_unregister_dev);
4266
4267/* Suspend HCI device */
4268int hci_suspend_dev(struct hci_dev *hdev)
4269{
4270 hci_notify(hdev, HCI_DEV_SUSPEND);
4271 return 0;
4272}
4273EXPORT_SYMBOL(hci_suspend_dev);
4274
4275/* Resume HCI device */
4276int hci_resume_dev(struct hci_dev *hdev)
4277{
4278 hci_notify(hdev, HCI_DEV_RESUME);
4279 return 0;
4280}
4281EXPORT_SYMBOL(hci_resume_dev);
4282
Marcel Holtmann75e05692014-11-02 08:15:38 +01004283/* Reset HCI device */
4284int hci_reset_dev(struct hci_dev *hdev)
4285{
4286 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4287 struct sk_buff *skb;
4288
4289 skb = bt_skb_alloc(3, GFP_ATOMIC);
4290 if (!skb)
4291 return -ENOMEM;
4292
4293 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4294 memcpy(skb_put(skb, 3), hw_err, 3);
4295
4296 /* Send Hardware Error to upper stack */
4297 return hci_recv_frame(hdev, skb);
4298}
4299EXPORT_SYMBOL(hci_reset_dev);
4300
Marcel Holtmann76bca882009-11-18 00:40:39 +01004301/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004302int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004303{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004304 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004305 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004306 kfree_skb(skb);
4307 return -ENXIO;
4308 }
4309
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004310 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004311 bt_cb(skb)->incoming = 1;
4312
4313 /* Time stamp */
4314 __net_timestamp(skb);
4315
Marcel Holtmann76bca882009-11-18 00:40:39 +01004316 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004317 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004318
Marcel Holtmann76bca882009-11-18 00:40:39 +01004319 return 0;
4320}
4321EXPORT_SYMBOL(hci_recv_frame);
4322
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304323static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004324 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304325{
4326 int len = 0;
4327 int hlen = 0;
4328 int remain = count;
4329 struct sk_buff *skb;
4330 struct bt_skb_cb *scb;
4331
4332 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004333 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304334 return -EILSEQ;
4335
4336 skb = hdev->reassembly[index];
4337
4338 if (!skb) {
4339 switch (type) {
4340 case HCI_ACLDATA_PKT:
4341 len = HCI_MAX_FRAME_SIZE;
4342 hlen = HCI_ACL_HDR_SIZE;
4343 break;
4344 case HCI_EVENT_PKT:
4345 len = HCI_MAX_EVENT_SIZE;
4346 hlen = HCI_EVENT_HDR_SIZE;
4347 break;
4348 case HCI_SCODATA_PKT:
4349 len = HCI_MAX_SCO_SIZE;
4350 hlen = HCI_SCO_HDR_SIZE;
4351 break;
4352 }
4353
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004354 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304355 if (!skb)
4356 return -ENOMEM;
4357
4358 scb = (void *) skb->cb;
4359 scb->expect = hlen;
4360 scb->pkt_type = type;
4361
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304362 hdev->reassembly[index] = skb;
4363 }
4364
4365 while (count) {
4366 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004367 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304368
4369 memcpy(skb_put(skb, len), data, len);
4370
4371 count -= len;
4372 data += len;
4373 scb->expect -= len;
4374 remain = count;
4375
4376 switch (type) {
4377 case HCI_EVENT_PKT:
4378 if (skb->len == HCI_EVENT_HDR_SIZE) {
4379 struct hci_event_hdr *h = hci_event_hdr(skb);
4380 scb->expect = h->plen;
4381
4382 if (skb_tailroom(skb) < scb->expect) {
4383 kfree_skb(skb);
4384 hdev->reassembly[index] = NULL;
4385 return -ENOMEM;
4386 }
4387 }
4388 break;
4389
4390 case HCI_ACLDATA_PKT:
4391 if (skb->len == HCI_ACL_HDR_SIZE) {
4392 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4393 scb->expect = __le16_to_cpu(h->dlen);
4394
4395 if (skb_tailroom(skb) < scb->expect) {
4396 kfree_skb(skb);
4397 hdev->reassembly[index] = NULL;
4398 return -ENOMEM;
4399 }
4400 }
4401 break;
4402
4403 case HCI_SCODATA_PKT:
4404 if (skb->len == HCI_SCO_HDR_SIZE) {
4405 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4406 scb->expect = h->dlen;
4407
4408 if (skb_tailroom(skb) < scb->expect) {
4409 kfree_skb(skb);
4410 hdev->reassembly[index] = NULL;
4411 return -ENOMEM;
4412 }
4413 }
4414 break;
4415 }
4416
4417 if (scb->expect == 0) {
4418 /* Complete frame */
4419
4420 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004421 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304422
4423 hdev->reassembly[index] = NULL;
4424 return remain;
4425 }
4426 }
4427
4428 return remain;
4429}
4430
Suraj Sumangala99811512010-07-14 13:02:19 +05304431#define STREAM_REASSEMBLY 0
4432
4433int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4434{
4435 int type;
4436 int rem = 0;
4437
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004438 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304439 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4440
4441 if (!skb) {
4442 struct { char type; } *pkt;
4443
4444 /* Start of the frame */
4445 pkt = data;
4446 type = pkt->type;
4447
4448 data++;
4449 count--;
4450 } else
4451 type = bt_cb(skb)->pkt_type;
4452
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004453 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004454 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304455 if (rem < 0)
4456 return rem;
4457
4458 data += (count - rem);
4459 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004460 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304461
4462 return rem;
4463}
4464EXPORT_SYMBOL(hci_recv_stream_fragment);
4465
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466/* ---- Interface to upper protocols ---- */
4467
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468int hci_register_cb(struct hci_cb *cb)
4469{
4470 BT_DBG("%p name %s", cb, cb->name);
4471
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004472 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004474 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475
4476 return 0;
4477}
4478EXPORT_SYMBOL(hci_register_cb);
4479
4480int hci_unregister_cb(struct hci_cb *cb)
4481{
4482 BT_DBG("%p name %s", cb, cb->name);
4483
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004484 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004486 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487
4488 return 0;
4489}
4490EXPORT_SYMBOL(hci_unregister_cb);
4491
Marcel Holtmann51086992013-10-10 14:54:19 -07004492static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004494 int err;
4495
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004496 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004498 /* Time stamp */
4499 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004501 /* Send copy to monitor */
4502 hci_send_to_monitor(hdev, skb);
4503
4504 if (atomic_read(&hdev->promisc)) {
4505 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004506 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 }
4508
4509 /* Get rid of skb owner, prior to sending to the driver. */
4510 skb_orphan(skb);
4511
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004512 err = hdev->send(hdev, skb);
4513 if (err < 0) {
4514 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4515 kfree_skb(skb);
4516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517}
4518
Johan Hedberg3119ae92013-03-05 20:37:44 +02004519void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4520{
4521 skb_queue_head_init(&req->cmd_q);
4522 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004523 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004524}
4525
4526int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4527{
4528 struct hci_dev *hdev = req->hdev;
4529 struct sk_buff *skb;
4530 unsigned long flags;
4531
4532 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4533
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004534 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004535 * commands queued on the HCI request queue.
4536 */
4537 if (req->err) {
4538 skb_queue_purge(&req->cmd_q);
4539 return req->err;
4540 }
4541
Johan Hedberg3119ae92013-03-05 20:37:44 +02004542 /* Do not allow empty requests */
4543 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004544 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004545
4546 skb = skb_peek_tail(&req->cmd_q);
4547 bt_cb(skb)->req.complete = complete;
4548
4549 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4550 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4551 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4552
4553 queue_work(hdev->workqueue, &hdev->cmd_work);
4554
4555 return 0;
4556}
4557
Marcel Holtmann899de762014-07-11 05:51:58 +02004558bool hci_req_pending(struct hci_dev *hdev)
4559{
4560 return (hdev->req_status == HCI_REQ_PEND);
4561}
4562
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004563static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004564 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565{
4566 int len = HCI_COMMAND_HDR_SIZE + plen;
4567 struct hci_command_hdr *hdr;
4568 struct sk_buff *skb;
4569
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004571 if (!skb)
4572 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573
4574 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004575 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576 hdr->plen = plen;
4577
4578 if (plen)
4579 memcpy(skb_put(skb, plen), param, plen);
4580
4581 BT_DBG("skb len %d", skb->len);
4582
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004583 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004584 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004585
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004586 return skb;
4587}
4588
4589/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004590int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4591 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004592{
4593 struct sk_buff *skb;
4594
4595 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4596
4597 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4598 if (!skb) {
4599 BT_ERR("%s no memory for command", hdev->name);
4600 return -ENOMEM;
4601 }
4602
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004603 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004604 * single-command requests.
4605 */
4606 bt_cb(skb)->req.start = true;
4607
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004609 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610
4611 return 0;
4612}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613
Johan Hedberg71c76a12013-03-05 20:37:46 +02004614/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004615void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4616 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004617{
4618 struct hci_dev *hdev = req->hdev;
4619 struct sk_buff *skb;
4620
4621 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4622
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004623 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004624 * queueing the HCI command. We can simply return.
4625 */
4626 if (req->err)
4627 return;
4628
Johan Hedberg71c76a12013-03-05 20:37:46 +02004629 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4630 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004631 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4632 hdev->name, opcode);
4633 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004634 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004635 }
4636
4637 if (skb_queue_empty(&req->cmd_q))
4638 bt_cb(skb)->req.start = true;
4639
Johan Hedberg02350a72013-04-03 21:50:29 +03004640 bt_cb(skb)->req.event = event;
4641
Johan Hedberg71c76a12013-03-05 20:37:46 +02004642 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004643}
4644
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004645void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4646 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004647{
4648 hci_req_add_ev(req, opcode, plen, param, 0);
4649}
4650
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004652void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653{
4654 struct hci_command_hdr *hdr;
4655
4656 if (!hdev->sent_cmd)
4657 return NULL;
4658
4659 hdr = (void *) hdev->sent_cmd->data;
4660
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004661 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 return NULL;
4663
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004664 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665
4666 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4667}
4668
4669/* Send ACL data */
4670static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4671{
4672 struct hci_acl_hdr *hdr;
4673 int len = skb->len;
4674
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004675 skb_push(skb, HCI_ACL_HDR_SIZE);
4676 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004677 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004678 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4679 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680}
4681
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004682static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004683 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004685 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686 struct hci_dev *hdev = conn->hdev;
4687 struct sk_buff *list;
4688
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004689 skb->len = skb_headlen(skb);
4690 skb->data_len = 0;
4691
4692 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004693
4694 switch (hdev->dev_type) {
4695 case HCI_BREDR:
4696 hci_add_acl_hdr(skb, conn->handle, flags);
4697 break;
4698 case HCI_AMP:
4699 hci_add_acl_hdr(skb, chan->handle, flags);
4700 break;
4701 default:
4702 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4703 return;
4704 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004705
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004706 list = skb_shinfo(skb)->frag_list;
4707 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 /* Non fragmented */
4709 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4710
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004711 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 } else {
4713 /* Fragmented */
4714 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4715
4716 skb_shinfo(skb)->frag_list = NULL;
4717
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004718 /* Queue all fragments atomically. We need to use spin_lock_bh
4719 * here because of 6LoWPAN links, as there this function is
4720 * called from softirq and using normal spin lock could cause
4721 * deadlocks.
4722 */
4723 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004725 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004726
4727 flags &= ~ACL_START;
4728 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 do {
4730 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004731
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004732 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004733 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734
4735 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4736
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004737 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 } while (list);
4739
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004740 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004742}
4743
4744void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4745{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004746 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004747
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004748 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004749
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004750 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004752 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754
4755/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004756void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757{
4758 struct hci_dev *hdev = conn->hdev;
4759 struct hci_sco_hdr hdr;
4760
4761 BT_DBG("%s len %d", hdev->name, skb->len);
4762
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004763 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764 hdr.dlen = skb->len;
4765
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004766 skb_push(skb, HCI_SCO_HDR_SIZE);
4767 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004768 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004770 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004771
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004773 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775
4776/* ---- HCI TX task (outgoing data) ---- */
4777
4778/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004779static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4780 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781{
4782 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004783 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004784 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004786 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004788
4789 rcu_read_lock();
4790
4791 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004792 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004794
4795 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4796 continue;
4797
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 num++;
4799
4800 if (c->sent < min) {
4801 min = c->sent;
4802 conn = c;
4803 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004804
4805 if (hci_conn_num(hdev, type) == num)
4806 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807 }
4808
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004809 rcu_read_unlock();
4810
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004812 int cnt, q;
4813
4814 switch (conn->type) {
4815 case ACL_LINK:
4816 cnt = hdev->acl_cnt;
4817 break;
4818 case SCO_LINK:
4819 case ESCO_LINK:
4820 cnt = hdev->sco_cnt;
4821 break;
4822 case LE_LINK:
4823 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4824 break;
4825 default:
4826 cnt = 0;
4827 BT_ERR("Unknown link type");
4828 }
4829
4830 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 *quote = q ? q : 1;
4832 } else
4833 *quote = 0;
4834
4835 BT_DBG("conn %p quote %d", conn, *quote);
4836 return conn;
4837}
4838
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004839static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840{
4841 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004842 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843
Ville Tervobae1f5d92011-02-10 22:38:53 -03004844 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004846 rcu_read_lock();
4847
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004849 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004850 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004851 BT_ERR("%s killing stalled connection %pMR",
4852 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004853 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854 }
4855 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004856
4857 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858}
4859
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004860static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4861 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004862{
4863 struct hci_conn_hash *h = &hdev->conn_hash;
4864 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004865 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004866 struct hci_conn *conn;
4867 int cnt, q, conn_num = 0;
4868
4869 BT_DBG("%s", hdev->name);
4870
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004871 rcu_read_lock();
4872
4873 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004874 struct hci_chan *tmp;
4875
4876 if (conn->type != type)
4877 continue;
4878
4879 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4880 continue;
4881
4882 conn_num++;
4883
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004884 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004885 struct sk_buff *skb;
4886
4887 if (skb_queue_empty(&tmp->data_q))
4888 continue;
4889
4890 skb = skb_peek(&tmp->data_q);
4891 if (skb->priority < cur_prio)
4892 continue;
4893
4894 if (skb->priority > cur_prio) {
4895 num = 0;
4896 min = ~0;
4897 cur_prio = skb->priority;
4898 }
4899
4900 num++;
4901
4902 if (conn->sent < min) {
4903 min = conn->sent;
4904 chan = tmp;
4905 }
4906 }
4907
4908 if (hci_conn_num(hdev, type) == conn_num)
4909 break;
4910 }
4911
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004912 rcu_read_unlock();
4913
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004914 if (!chan)
4915 return NULL;
4916
4917 switch (chan->conn->type) {
4918 case ACL_LINK:
4919 cnt = hdev->acl_cnt;
4920 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004921 case AMP_LINK:
4922 cnt = hdev->block_cnt;
4923 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004924 case SCO_LINK:
4925 case ESCO_LINK:
4926 cnt = hdev->sco_cnt;
4927 break;
4928 case LE_LINK:
4929 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4930 break;
4931 default:
4932 cnt = 0;
4933 BT_ERR("Unknown link type");
4934 }
4935
4936 q = cnt / num;
4937 *quote = q ? q : 1;
4938 BT_DBG("chan %p quote %d", chan, *quote);
4939 return chan;
4940}
4941
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004942static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4943{
4944 struct hci_conn_hash *h = &hdev->conn_hash;
4945 struct hci_conn *conn;
4946 int num = 0;
4947
4948 BT_DBG("%s", hdev->name);
4949
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004950 rcu_read_lock();
4951
4952 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004953 struct hci_chan *chan;
4954
4955 if (conn->type != type)
4956 continue;
4957
4958 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4959 continue;
4960
4961 num++;
4962
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004963 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004964 struct sk_buff *skb;
4965
4966 if (chan->sent) {
4967 chan->sent = 0;
4968 continue;
4969 }
4970
4971 if (skb_queue_empty(&chan->data_q))
4972 continue;
4973
4974 skb = skb_peek(&chan->data_q);
4975 if (skb->priority >= HCI_PRIO_MAX - 1)
4976 continue;
4977
4978 skb->priority = HCI_PRIO_MAX - 1;
4979
4980 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004981 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004982 }
4983
4984 if (hci_conn_num(hdev, type) == num)
4985 break;
4986 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004987
4988 rcu_read_unlock();
4989
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004990}
4991
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004992static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4993{
4994 /* Calculate count of blocks used by this packet */
4995 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4996}
4997
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004998static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999{
Marcel Holtmann4a964402014-07-02 19:10:33 +02005000 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001 /* ACL tx timeout must be longer than maximum
5002 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005003 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005004 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005005 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005007}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005009static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005010{
5011 unsigned int cnt = hdev->acl_cnt;
5012 struct hci_chan *chan;
5013 struct sk_buff *skb;
5014 int quote;
5015
5016 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005017
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005018 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005019 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005020 u32 priority = (skb_peek(&chan->data_q))->priority;
5021 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005022 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005023 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005024
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005025 /* Stop if priority has changed */
5026 if (skb->priority < priority)
5027 break;
5028
5029 skb = skb_dequeue(&chan->data_q);
5030
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005031 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005032 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005033
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005034 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 hdev->acl_last_tx = jiffies;
5036
5037 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005038 chan->sent++;
5039 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040 }
5041 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005042
5043 if (cnt != hdev->acl_cnt)
5044 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045}
5046
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005047static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005048{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005049 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005050 struct hci_chan *chan;
5051 struct sk_buff *skb;
5052 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005053 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005054
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005055 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005056
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005057 BT_DBG("%s", hdev->name);
5058
5059 if (hdev->dev_type == HCI_AMP)
5060 type = AMP_LINK;
5061 else
5062 type = ACL_LINK;
5063
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005064 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005065 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005066 u32 priority = (skb_peek(&chan->data_q))->priority;
5067 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5068 int blocks;
5069
5070 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005071 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005072
5073 /* Stop if priority has changed */
5074 if (skb->priority < priority)
5075 break;
5076
5077 skb = skb_dequeue(&chan->data_q);
5078
5079 blocks = __get_blocks(hdev, skb);
5080 if (blocks > hdev->block_cnt)
5081 return;
5082
5083 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005084 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005085
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005086 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005087 hdev->acl_last_tx = jiffies;
5088
5089 hdev->block_cnt -= blocks;
5090 quote -= blocks;
5091
5092 chan->sent += blocks;
5093 chan->conn->sent += blocks;
5094 }
5095 }
5096
5097 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005098 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005099}
5100
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005101static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005102{
5103 BT_DBG("%s", hdev->name);
5104
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005105 /* No ACL link over BR/EDR controller */
5106 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5107 return;
5108
5109 /* No AMP link over AMP controller */
5110 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005111 return;
5112
5113 switch (hdev->flow_ctl_mode) {
5114 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5115 hci_sched_acl_pkt(hdev);
5116 break;
5117
5118 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5119 hci_sched_acl_blk(hdev);
5120 break;
5121 }
5122}
5123
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005125static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126{
5127 struct hci_conn *conn;
5128 struct sk_buff *skb;
5129 int quote;
5130
5131 BT_DBG("%s", hdev->name);
5132
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005133 if (!hci_conn_num(hdev, SCO_LINK))
5134 return;
5135
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5137 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5138 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005139 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140
5141 conn->sent++;
5142 if (conn->sent == ~0)
5143 conn->sent = 0;
5144 }
5145 }
5146}
5147
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005148static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005149{
5150 struct hci_conn *conn;
5151 struct sk_buff *skb;
5152 int quote;
5153
5154 BT_DBG("%s", hdev->name);
5155
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005156 if (!hci_conn_num(hdev, ESCO_LINK))
5157 return;
5158
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005159 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5160 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005161 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5162 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005163 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005164
5165 conn->sent++;
5166 if (conn->sent == ~0)
5167 conn->sent = 0;
5168 }
5169 }
5170}
5171
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005172static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005173{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005174 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005175 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005176 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005177
5178 BT_DBG("%s", hdev->name);
5179
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005180 if (!hci_conn_num(hdev, LE_LINK))
5181 return;
5182
Marcel Holtmann4a964402014-07-02 19:10:33 +02005183 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005184 /* LE tx timeout must be longer than maximum
5185 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005186 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005187 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005188 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005189 }
5190
5191 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005192 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005193 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005194 u32 priority = (skb_peek(&chan->data_q))->priority;
5195 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005196 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005197 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005198
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005199 /* Stop if priority has changed */
5200 if (skb->priority < priority)
5201 break;
5202
5203 skb = skb_dequeue(&chan->data_q);
5204
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005205 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005206 hdev->le_last_tx = jiffies;
5207
5208 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005209 chan->sent++;
5210 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005211 }
5212 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005213
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005214 if (hdev->le_pkts)
5215 hdev->le_cnt = cnt;
5216 else
5217 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005218
5219 if (cnt != tmp)
5220 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005221}
5222
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005223static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005225 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 struct sk_buff *skb;
5227
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005228 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005229 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230
Marcel Holtmann52de5992013-09-03 18:08:38 -07005231 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5232 /* Schedule queues and send stuff to HCI driver */
5233 hci_sched_acl(hdev);
5234 hci_sched_sco(hdev);
5235 hci_sched_esco(hdev);
5236 hci_sched_le(hdev);
5237 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005238
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 /* Send next queued raw (unknown type) packet */
5240 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005241 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242}
5243
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005244/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245
5246/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005247static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248{
5249 struct hci_acl_hdr *hdr = (void *) skb->data;
5250 struct hci_conn *conn;
5251 __u16 handle, flags;
5252
5253 skb_pull(skb, HCI_ACL_HDR_SIZE);
5254
5255 handle = __le16_to_cpu(hdr->handle);
5256 flags = hci_flags(handle);
5257 handle = hci_handle(handle);
5258
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005259 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005260 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261
5262 hdev->stat.acl_rx++;
5263
5264 hci_dev_lock(hdev);
5265 conn = hci_conn_hash_lookup_handle(hdev, handle);
5266 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005267
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005269 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005270
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005272 l2cap_recv_acldata(conn, skb, flags);
5273 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005275 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005276 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277 }
5278
5279 kfree_skb(skb);
5280}
5281
5282/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005283static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005284{
5285 struct hci_sco_hdr *hdr = (void *) skb->data;
5286 struct hci_conn *conn;
5287 __u16 handle;
5288
5289 skb_pull(skb, HCI_SCO_HDR_SIZE);
5290
5291 handle = __le16_to_cpu(hdr->handle);
5292
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005293 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005294
5295 hdev->stat.sco_rx++;
5296
5297 hci_dev_lock(hdev);
5298 conn = hci_conn_hash_lookup_handle(hdev, handle);
5299 hci_dev_unlock(hdev);
5300
5301 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005303 sco_recv_scodata(conn, skb);
5304 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005306 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005307 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308 }
5309
5310 kfree_skb(skb);
5311}
5312
Johan Hedberg9238f362013-03-05 20:37:48 +02005313static bool hci_req_is_complete(struct hci_dev *hdev)
5314{
5315 struct sk_buff *skb;
5316
5317 skb = skb_peek(&hdev->cmd_q);
5318 if (!skb)
5319 return true;
5320
5321 return bt_cb(skb)->req.start;
5322}
5323
Johan Hedberg42c6b122013-03-05 20:37:49 +02005324static void hci_resend_last(struct hci_dev *hdev)
5325{
5326 struct hci_command_hdr *sent;
5327 struct sk_buff *skb;
5328 u16 opcode;
5329
5330 if (!hdev->sent_cmd)
5331 return;
5332
5333 sent = (void *) hdev->sent_cmd->data;
5334 opcode = __le16_to_cpu(sent->opcode);
5335 if (opcode == HCI_OP_RESET)
5336 return;
5337
5338 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5339 if (!skb)
5340 return;
5341
5342 skb_queue_head(&hdev->cmd_q, skb);
5343 queue_work(hdev->workqueue, &hdev->cmd_work);
5344}
5345
Johan Hedberg9238f362013-03-05 20:37:48 +02005346void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5347{
5348 hci_req_complete_t req_complete = NULL;
5349 struct sk_buff *skb;
5350 unsigned long flags;
5351
5352 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5353
Johan Hedberg42c6b122013-03-05 20:37:49 +02005354 /* If the completed command doesn't match the last one that was
5355 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005356 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005357 if (!hci_sent_cmd_data(hdev, opcode)) {
5358 /* Some CSR based controllers generate a spontaneous
5359 * reset complete event during init and any pending
5360 * command will never be completed. In such a case we
5361 * need to resend whatever was the last sent
5362 * command.
5363 */
5364 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5365 hci_resend_last(hdev);
5366
Johan Hedberg9238f362013-03-05 20:37:48 +02005367 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005368 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005369
5370 /* If the command succeeded and there's still more commands in
5371 * this request the request is not yet complete.
5372 */
5373 if (!status && !hci_req_is_complete(hdev))
5374 return;
5375
5376 /* If this was the last command in a request the complete
5377 * callback would be found in hdev->sent_cmd instead of the
5378 * command queue (hdev->cmd_q).
5379 */
5380 if (hdev->sent_cmd) {
5381 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005382
5383 if (req_complete) {
5384 /* We must set the complete callback to NULL to
5385 * avoid calling the callback more than once if
5386 * this function gets called again.
5387 */
5388 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5389
Johan Hedberg9238f362013-03-05 20:37:48 +02005390 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005391 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005392 }
5393
5394 /* Remove all pending commands belonging to this request */
5395 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5396 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5397 if (bt_cb(skb)->req.start) {
5398 __skb_queue_head(&hdev->cmd_q, skb);
5399 break;
5400 }
5401
5402 req_complete = bt_cb(skb)->req.complete;
5403 kfree_skb(skb);
5404 }
5405 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5406
5407call_complete:
5408 if (req_complete)
5409 req_complete(hdev, status);
5410}
5411
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005412static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005414 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 struct sk_buff *skb;
5416
5417 BT_DBG("%s", hdev->name);
5418
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005420 /* Send copy to monitor */
5421 hci_send_to_monitor(hdev, skb);
5422
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 if (atomic_read(&hdev->promisc)) {
5424 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005425 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426 }
5427
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005428 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 kfree_skb(skb);
5430 continue;
5431 }
5432
5433 if (test_bit(HCI_INIT, &hdev->flags)) {
5434 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005435 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 case HCI_ACLDATA_PKT:
5437 case HCI_SCODATA_PKT:
5438 kfree_skb(skb);
5439 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005441 }
5442
5443 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005444 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005446 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 hci_event_packet(hdev, skb);
5448 break;
5449
5450 case HCI_ACLDATA_PKT:
5451 BT_DBG("%s ACL data packet", hdev->name);
5452 hci_acldata_packet(hdev, skb);
5453 break;
5454
5455 case HCI_SCODATA_PKT:
5456 BT_DBG("%s SCO data packet", hdev->name);
5457 hci_scodata_packet(hdev, skb);
5458 break;
5459
5460 default:
5461 kfree_skb(skb);
5462 break;
5463 }
5464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465}
5466
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005467static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005469 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470 struct sk_buff *skb;
5471
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005472 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5473 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005476 if (atomic_read(&hdev->cmd_cnt)) {
5477 skb = skb_dequeue(&hdev->cmd_q);
5478 if (!skb)
5479 return;
5480
Wei Yongjun7585b972009-02-25 18:29:52 +08005481 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005483 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005484 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005486 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005487 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005488 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005489 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005490 schedule_delayed_work(&hdev->cmd_timer,
5491 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492 } else {
5493 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005494 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495 }
5496 }
5497}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005498
5499void hci_req_add_le_scan_disable(struct hci_request *req)
5500{
5501 struct hci_cp_le_set_scan_enable cp;
5502
5503 memset(&cp, 0, sizeof(cp));
5504 cp.enable = LE_SCAN_DISABLE;
5505 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5506}
Andre Guedesa4790db2014-02-26 20:21:47 -03005507
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005508static void add_to_white_list(struct hci_request *req,
5509 struct hci_conn_params *params)
5510{
5511 struct hci_cp_le_add_to_white_list cp;
5512
5513 cp.bdaddr_type = params->addr_type;
5514 bacpy(&cp.bdaddr, &params->addr);
5515
5516 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5517}
5518
5519static u8 update_white_list(struct hci_request *req)
5520{
5521 struct hci_dev *hdev = req->hdev;
5522 struct hci_conn_params *params;
5523 struct bdaddr_list *b;
5524 uint8_t white_list_entries = 0;
5525
5526 /* Go through the current white list programmed into the
5527 * controller one by one and check if that address is still
5528 * in the list of pending connections or list of devices to
5529 * report. If not present in either list, then queue the
5530 * command to remove it from the controller.
5531 */
5532 list_for_each_entry(b, &hdev->le_white_list, list) {
5533 struct hci_cp_le_del_from_white_list cp;
5534
5535 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5536 &b->bdaddr, b->bdaddr_type) ||
5537 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5538 &b->bdaddr, b->bdaddr_type)) {
5539 white_list_entries++;
5540 continue;
5541 }
5542
5543 cp.bdaddr_type = b->bdaddr_type;
5544 bacpy(&cp.bdaddr, &b->bdaddr);
5545
5546 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5547 sizeof(cp), &cp);
5548 }
5549
5550 /* Since all no longer valid white list entries have been
5551 * removed, walk through the list of pending connections
5552 * and ensure that any new device gets programmed into
5553 * the controller.
5554 *
5555 * If the list of the devices is larger than the list of
5556 * available white list entries in the controller, then
5557 * just abort and return filer policy value to not use the
5558 * white list.
5559 */
5560 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5561 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5562 &params->addr, params->addr_type))
5563 continue;
5564
5565 if (white_list_entries >= hdev->le_white_list_size) {
5566 /* Select filter policy to accept all advertising */
5567 return 0x00;
5568 }
5569
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005570 if (hci_find_irk_by_addr(hdev, &params->addr,
5571 params->addr_type)) {
5572 /* White list can not be used with RPAs */
5573 return 0x00;
5574 }
5575
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005576 white_list_entries++;
5577 add_to_white_list(req, params);
5578 }
5579
5580 /* After adding all new pending connections, walk through
5581 * the list of pending reports and also add these to the
5582 * white list if there is still space.
5583 */
5584 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5585 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5586 &params->addr, params->addr_type))
5587 continue;
5588
5589 if (white_list_entries >= hdev->le_white_list_size) {
5590 /* Select filter policy to accept all advertising */
5591 return 0x00;
5592 }
5593
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005594 if (hci_find_irk_by_addr(hdev, &params->addr,
5595 params->addr_type)) {
5596 /* White list can not be used with RPAs */
5597 return 0x00;
5598 }
5599
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005600 white_list_entries++;
5601 add_to_white_list(req, params);
5602 }
5603
5604 /* Select filter policy to use white list */
5605 return 0x01;
5606}
5607
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005608void hci_req_add_le_passive_scan(struct hci_request *req)
5609{
5610 struct hci_cp_le_set_scan_param param_cp;
5611 struct hci_cp_le_set_scan_enable enable_cp;
5612 struct hci_dev *hdev = req->hdev;
5613 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005614 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005615
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005616 /* Set require_privacy to false since no SCAN_REQ are send
5617 * during passive scanning. Not using an unresolvable address
5618 * here is important so that peer devices using direct
5619 * advertising with our address will be correctly reported
5620 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005621 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005622 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005623 return;
5624
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005625 /* Adding or removing entries from the white list must
5626 * happen before enabling scanning. The controller does
5627 * not allow white list modification while scanning.
5628 */
5629 filter_policy = update_white_list(req);
5630
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005631 memset(&param_cp, 0, sizeof(param_cp));
5632 param_cp.type = LE_SCAN_PASSIVE;
5633 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5634 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5635 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005636 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005637 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5638 &param_cp);
5639
5640 memset(&enable_cp, 0, sizeof(enable_cp));
5641 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005642 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005643 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5644 &enable_cp);
5645}
5646
Andre Guedesa4790db2014-02-26 20:21:47 -03005647static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5648{
5649 if (status)
5650 BT_DBG("HCI request failed to update background scanning: "
5651 "status 0x%2.2x", status);
5652}
5653
5654/* This function controls the background scanning based on hdev->pend_le_conns
5655 * list. If there are pending LE connection we start the background scanning,
5656 * otherwise we stop it.
5657 *
5658 * This function requires the caller holds hdev->lock.
5659 */
5660void hci_update_background_scan(struct hci_dev *hdev)
5661{
Andre Guedesa4790db2014-02-26 20:21:47 -03005662 struct hci_request req;
5663 struct hci_conn *conn;
5664 int err;
5665
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005666 if (!test_bit(HCI_UP, &hdev->flags) ||
5667 test_bit(HCI_INIT, &hdev->flags) ||
5668 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005669 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005670 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005671 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005672 return;
5673
Johan Hedberga70f4b52014-07-07 15:19:50 +03005674 /* No point in doing scanning if LE support hasn't been enabled */
5675 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5676 return;
5677
Johan Hedbergae23ada2014-07-07 13:24:59 +03005678 /* If discovery is active don't interfere with it */
5679 if (hdev->discovery.state != DISCOVERY_STOPPED)
5680 return;
5681
Andre Guedesa4790db2014-02-26 20:21:47 -03005682 hci_req_init(&req, hdev);
5683
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005684 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005685 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005686 /* If there is no pending LE connections or devices
5687 * to be scanned for, we should stop the background
5688 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005689 */
5690
5691 /* If controller is not scanning we are done. */
5692 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5693 return;
5694
5695 hci_req_add_le_scan_disable(&req);
5696
5697 BT_DBG("%s stopping background scanning", hdev->name);
5698 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005699 /* If there is at least one pending LE connection, we should
5700 * keep the background scan running.
5701 */
5702
Andre Guedesa4790db2014-02-26 20:21:47 -03005703 /* If controller is connecting, we should not start scanning
5704 * since some controllers are not able to scan and connect at
5705 * the same time.
5706 */
5707 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5708 if (conn)
5709 return;
5710
Andre Guedes4340a122014-03-10 18:26:24 -03005711 /* If controller is currently scanning, we stop it to ensure we
5712 * don't miss any advertising (due to duplicates filter).
5713 */
5714 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5715 hci_req_add_le_scan_disable(&req);
5716
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005717 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005718
5719 BT_DBG("%s starting background scanning", hdev->name);
5720 }
5721
5722 err = hci_req_run(&req, update_background_scan_complete);
5723 if (err)
5724 BT_ERR("Failed to run HCI request: err %d", err);
5725}
Johan Hedberg432df052014-08-01 11:13:31 +03005726
Johan Hedberg22f433d2014-08-01 11:13:32 +03005727static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5728{
5729 struct bdaddr_list *b;
5730
5731 list_for_each_entry(b, &hdev->whitelist, list) {
5732 struct hci_conn *conn;
5733
5734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5735 if (!conn)
5736 return true;
5737
5738 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5739 return true;
5740 }
5741
5742 return false;
5743}
5744
Johan Hedberg432df052014-08-01 11:13:31 +03005745void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5746{
5747 u8 scan;
5748
5749 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5750 return;
5751
5752 if (!hdev_is_powered(hdev))
5753 return;
5754
5755 if (mgmt_powering_down(hdev))
5756 return;
5757
5758 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005759 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005760 scan = SCAN_PAGE;
5761 else
5762 scan = SCAN_DISABLED;
5763
5764 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5765 return;
5766
5767 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5768 scan |= SCAN_INQUIRY;
5769
5770 if (req)
5771 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5772 else
5773 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5774}