blob: 42f86dc3fb40d1135f59ee4b7d513def02ccfca8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Johan Hedberg858cdc72014-10-16 10:45:31 +0200409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700478 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700537 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700565 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
Marcel Holtmannac345812014-02-23 12:44:25 -0800641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200644 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800650
Johan Hedberga1f4c312014-02-27 14:05:41 +0200651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800652 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700720{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700747 return -EINVAL;
748
Marcel Holtmann111902f2014-06-21 04:53:17 +0200749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800750 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700751
Marcel Holtmann111902f2014-06-21 04:53:17 +0200752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800753
754 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700755}
756
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
Marcel Holtmann92202182013-10-18 16:38:10 -0700763
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
Marcel Holtmann3698d702014-02-18 21:54:49 -0800789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200792 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800793
Johan Hedbergadae20c2014-11-13 14:37:48 +0200794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200800 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200821 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700822
Johan Hedberg970d0f12014-11-13 14:37:47 +0200823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200829 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700854 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700882 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
Marcel Holtmannf1649572014-06-30 12:34:38 +0200930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
Georg Lukas729a1052014-07-26 13:59:58 +0200986static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200987{
Georg Lukas729a1052014-07-26 13:59:58 +0200988 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200989
Georg Lukas729a1052014-07-26 13:59:58 +0200990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200991 return -EINVAL;
992
Andre Guedes7d474e02014-02-26 20:21:54 -0300993 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
Georg Lukas729a1052014-07-26 13:59:58 +02001000static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001001{
Georg Lukas729a1052014-07-26 13:59:58 +02001002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009}
1010
Georg Lukas729a1052014-07-26 13:59:58 +02001011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001015{
Georg Lukas729a1052014-07-26 13:59:58 +02001016 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017
Georg Lukas729a1052014-07-26 13:59:58 +02001018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return -EINVAL;
1020
Georg Lukas729a1052014-07-26 13:59:58 +02001021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001024
Georg Lukas729a1052014-07-26 13:59:58 +02001025 return 0;
1026}
Andre Guedes7d474e02014-02-26 20:21:54 -03001027
Georg Lukas729a1052014-07-26 13:59:58 +02001028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001031
Georg Lukas729a1052014-07-26 13:59:58 +02001032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035
Georg Lukas729a1052014-07-26 13:59:58 +02001036 return 0;
1037}
Andre Guedes7d474e02014-02-26 20:21:54 -03001038
Georg Lukas729a1052014-07-26 13:59:58 +02001039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001041
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001042static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001043{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001045 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001046 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001047
Andre Guedes7d474e02014-02-26 20:21:54 -03001048 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001053 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001056
Andre Guedes7d474e02014-02-26 20:21:54 -03001057 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001058}
1059
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001060static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001061{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001062 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001063}
1064
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001067 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* ---- HCI requests ---- */
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
Fengguang Wu77a63e02013-04-20 16:24:31 +03001096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
Johan Hedberg75e84b72013-04-02 13:35:04 +03001127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001152 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163
1164 hdev->req_status = HCI_REQ_PEND;
1165
Johan Hedberg75e84b72013-04-02 13:35:04 +03001166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
Chan-yeol Park039fada2014-10-31 14:23:06 +09001169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001172 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001173 return ERR_PTR(err);
1174 }
1175
Johan Hedberg75e84b72013-04-02 13:35:04 +03001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001209 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_init(&req, hdev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hdev->req_status = HCI_REQ_PEND;
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001232
Chan-yeol Park039fada2014-10-31 14:23:06 +09001233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
Chan-yeol Park039fada2014-10-31 14:23:06 +09001240 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001241 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001242
Andre Guedes920c8302013-03-08 11:20:15 -03001243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 */
Andre Guedes920c8302013-03-08 11:20:15 -03001248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001252 }
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
1390 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402
1403 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001404 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001406
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001414}
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001418 struct hci_dev *hdev = req->hdev;
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
1423 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001434
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438}
1439
1440static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441{
1442 if (lmp_ext_inq_capable(hdev))
1443 return 0x02;
1444
1445 if (lmp_inq_rssi_capable(hdev))
1446 return 0x01;
1447
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1450 return 0x01;
1451
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454 return 0x01;
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456 return 0x01;
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458 return 0x01;
1459 }
1460
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1463 return 0x01;
1464
1465 return 0x00;
1466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
1470 u8 mode;
1471
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475}
1476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 struct hci_dev *hdev = req->hdev;
1480
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1484 */
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1489 */
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001499 } else {
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001509
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552}
1553
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 struct hci_dev *hdev = req->hdev;
1557
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001560 else
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1568 */
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571
1572 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1578 */
1579 hdev->max_page = 0x01;
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 } else {
1586 struct hci_cp_write_eir cp;
1587
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1590
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001592 }
1593 }
1594
1595 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597
1598 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001600
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1603
1604 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607 }
1608
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613 }
1614}
1615
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001617{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1621
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1630
1631 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633}
1634
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001636{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001638 struct hci_cp_write_le_host_supported cp;
1639
Johan Hedbergc73eee92013-04-19 18:35:21 +03001640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1642 return;
1643
Johan Hedberg2177bab2013-03-05 20:37:43 +02001644 memset(&cp, 0, sizeof(cp));
1645
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001648 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001649 }
1650
1651 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001654}
1655
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656static void hci_set_event_mask_page_2(struct hci_request *req)
1657{
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1663 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001664 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1669 }
1670
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1673 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001674 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1679 }
1680
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001681 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001683 events[2] |= 0x80;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001689{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001690 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001691 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001692
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001693 hci_setup_event_mask(req);
1694
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1698 *
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001703 *
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001707 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001710 struct hci_cp_delete_stored_link_key cp;
1711
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715 sizeof(cp), &cp);
1716 }
1717
Johan Hedberg2177bab2013-03-05 20:37:43 +02001718 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001720
Andre Guedes9193c6e2014-07-01 18:10:09 -03001721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
Andre Guedes9193c6e2014-07-01 18:10:09 -03001738 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1739 events);
1740
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001741 if (hdev->commands[25] & 0x40) {
1742 /* Read LE Advertising Channel TX Power */
1743 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1744 }
1745
Johan Hedberg42c6b122013-03-05 20:37:49 +02001746 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001747 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001748
1749 /* Read features beyond page 1 if available */
1750 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1751 struct hci_cp_read_local_ext_features cp;
1752
1753 cp.page = p;
1754 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1755 sizeof(cp), &cp);
1756 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001757}
1758
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001759static void hci_init4_req(struct hci_request *req, unsigned long opt)
1760{
1761 struct hci_dev *hdev = req->hdev;
1762
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001763 /* Set event mask page 2 if the HCI command for it is supported */
1764 if (hdev->commands[22] & 0x04)
1765 hci_set_event_mask_page_2(req);
1766
Marcel Holtmann109e3192014-07-23 19:24:56 +02001767 /* Read local codec list if the HCI command is supported */
1768 if (hdev->commands[29] & 0x20)
1769 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1770
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001771 /* Get MWS transport configuration if the HCI command is supported */
1772 if (hdev->commands[30] & 0x08)
1773 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1774
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001775 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001776 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001777 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001778
1779 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001780 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001781 u8 support = 0x01;
1782 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1783 sizeof(support), &support);
1784 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001785}
1786
Johan Hedberg2177bab2013-03-05 20:37:43 +02001787static int __hci_init(struct hci_dev *hdev)
1788{
1789 int err;
1790
1791 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1792 if (err < 0)
1793 return err;
1794
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001795 /* The Device Under Test (DUT) mode is special and available for
1796 * all controller types. So just create it early on.
1797 */
1798 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1799 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1800 &dut_mode_fops);
1801 }
1802
Johan Hedberg2177bab2013-03-05 20:37:43 +02001803 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1804 * BR/EDR/LE type controllers. AMP controllers only need the
1805 * first stage init.
1806 */
1807 if (hdev->dev_type != HCI_BREDR)
1808 return 0;
1809
1810 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1811 if (err < 0)
1812 return err;
1813
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001814 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001818 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1819 if (err < 0)
1820 return err;
1821
1822 /* Only create debugfs entries during the initial setup
1823 * phase and not every time the controller gets powered on.
1824 */
1825 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1826 return 0;
1827
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001828 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1829 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001830 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1831 &hdev->manufacturer);
1832 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1833 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001834 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1835 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001836 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1837 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001838 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1839
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001840 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1841 &conn_info_min_age_fops);
1842 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1843 &conn_info_max_age_fops);
1844
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001845 if (lmp_bredr_capable(hdev)) {
1846 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1847 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001848 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1849 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001850 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1851 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001852 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1853 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001854 }
1855
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001856 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001857 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1858 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001859 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1860 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001861 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1862 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001863 if (lmp_le_capable(hdev))
1864 debugfs_create_file("force_lesc_support", 0644,
1865 hdev->debugfs, hdev,
1866 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001867 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001868
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001869 if (lmp_sniff_capable(hdev)) {
1870 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1871 hdev, &idle_timeout_fops);
1872 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1873 hdev, &sniff_min_interval_fops);
1874 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1875 hdev, &sniff_max_interval_fops);
1876 }
1877
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001878 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001879 debugfs_create_file("identity", 0400, hdev->debugfs,
1880 hdev, &identity_fops);
1881 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1882 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001883 debugfs_create_file("random_address", 0444, hdev->debugfs,
1884 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001885 debugfs_create_file("static_address", 0444, hdev->debugfs,
1886 hdev, &static_address_fops);
1887
1888 /* For controllers with a public address, provide a debug
1889 * option to force the usage of the configured static
1890 * address. By default the public address is used.
1891 */
1892 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1893 debugfs_create_file("force_static_address", 0644,
1894 hdev->debugfs, hdev,
1895 &force_static_address_fops);
1896
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001897 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1898 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001899 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1900 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001901 debugfs_create_file("identity_resolving_keys", 0400,
1902 hdev->debugfs, hdev,
1903 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001904 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1905 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001906 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1907 hdev, &conn_min_interval_fops);
1908 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1909 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001910 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1911 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001912 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1913 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001914 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1915 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001916 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1917 hdev, &adv_min_interval_fops);
1918 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1919 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001920 debugfs_create_u16("discov_interleaved_timeout", 0644,
1921 hdev->debugfs,
1922 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001923
Johan Hedberg711eafe2014-08-08 09:32:52 +03001924 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001925 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001926
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001927 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001928}
1929
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001930static void hci_init0_req(struct hci_request *req, unsigned long opt)
1931{
1932 struct hci_dev *hdev = req->hdev;
1933
1934 BT_DBG("%s %ld", hdev->name, opt);
1935
1936 /* Reset */
1937 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1938 hci_reset_req(req, 0);
1939
1940 /* Read Local Version */
1941 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1942
1943 /* Read BD Address */
1944 if (hdev->set_bdaddr)
1945 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1946}
1947
1948static int __hci_unconf_init(struct hci_dev *hdev)
1949{
1950 int err;
1951
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001952 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1953 return 0;
1954
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001955 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1956 if (err < 0)
1957 return err;
1958
1959 return 0;
1960}
1961
Johan Hedberg42c6b122013-03-05 20:37:49 +02001962static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
1964 __u8 scan = opt;
1965
Johan Hedberg42c6b122013-03-05 20:37:49 +02001966 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001969 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970}
1971
Johan Hedberg42c6b122013-03-05 20:37:49 +02001972static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
1974 __u8 auth = opt;
1975
Johan Hedberg42c6b122013-03-05 20:37:49 +02001976 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001979 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980}
1981
Johan Hedberg42c6b122013-03-05 20:37:49 +02001982static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
1984 __u8 encrypt = opt;
1985
Johan Hedberg42c6b122013-03-05 20:37:49 +02001986 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001988 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001989 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
1991
Johan Hedberg42c6b122013-03-05 20:37:49 +02001992static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001993{
1994 __le16 policy = cpu_to_le16(opt);
1995
Johan Hedberg42c6b122013-03-05 20:37:49 +02001996 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001997
1998 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001999 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002000}
2001
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002002/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 * Device is held on return. */
2004struct hci_dev *hci_dev_get(int index)
2005{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002006 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
2008 BT_DBG("%d", index);
2009
2010 if (index < 0)
2011 return NULL;
2012
2013 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002014 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (d->id == index) {
2016 hdev = hci_dev_hold(d);
2017 break;
2018 }
2019 }
2020 read_unlock(&hci_dev_list_lock);
2021 return hdev;
2022}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002025
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002026bool hci_discovery_active(struct hci_dev *hdev)
2027{
2028 struct discovery_state *discov = &hdev->discovery;
2029
Andre Guedes6fbe1952012-02-03 17:47:58 -03002030 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002031 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002032 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002033 return true;
2034
Andre Guedes6fbe1952012-02-03 17:47:58 -03002035 default:
2036 return false;
2037 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002038}
2039
Johan Hedbergff9ef572012-01-04 14:23:45 +02002040void hci_discovery_set_state(struct hci_dev *hdev, int state)
2041{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002042 int old_state = hdev->discovery.state;
2043
Johan Hedbergff9ef572012-01-04 14:23:45 +02002044 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2045
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002046 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002047 return;
2048
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002049 hdev->discovery.state = state;
2050
Johan Hedbergff9ef572012-01-04 14:23:45 +02002051 switch (state) {
2052 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002053 hci_update_background_scan(hdev);
2054
Jakub Pawlowski37eab042014-12-05 10:55:55 +01002055 /* Reset RSSI and UUID filters to ensure Start Discovery
2056 * and Start Service Discovery operate properly no matter
2057 * which one started the previous discovery.
2058 *
2059 * While the Start Discovery and Start Service Discovery
2060 * operations will set proper values for RSSI and UUID
2061 * count, it is important to actually free the allocated
2062 * list of UUIDs here.
2063 */
2064 hdev->discovery.rssi = HCI_RSSI_INVALID;
2065 hdev->discovery.uuid_count = 0;
2066 kfree(hdev->discovery.uuids);
2067 hdev->discovery.uuids = NULL;
2068
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002069 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002070 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002071 break;
2072 case DISCOVERY_STARTING:
2073 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002074 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002075 mgmt_discovering(hdev, 1);
2076 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002077 case DISCOVERY_RESOLVING:
2078 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002079 case DISCOVERY_STOPPING:
2080 break;
2081 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002082}
2083
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002084void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085{
Johan Hedberg30883512012-01-04 14:16:21 +02002086 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002087 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
Johan Hedberg561aafb2012-01-04 13:31:59 +02002089 list_for_each_entry_safe(p, n, &cache->all, all) {
2090 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002091 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002093
2094 INIT_LIST_HEAD(&cache->unknown);
2095 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096}
2097
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002098struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2099 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100{
Johan Hedberg30883512012-01-04 14:16:21 +02002101 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 struct inquiry_entry *e;
2103
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002104 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
Johan Hedberg561aafb2012-01-04 13:31:59 +02002106 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002108 return e;
2109 }
2110
2111 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
2113
Johan Hedberg561aafb2012-01-04 13:31:59 +02002114struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002115 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002116{
Johan Hedberg30883512012-01-04 14:16:21 +02002117 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002118 struct inquiry_entry *e;
2119
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002120 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002121
2122 list_for_each_entry(e, &cache->unknown, list) {
2123 if (!bacmp(&e->data.bdaddr, bdaddr))
2124 return e;
2125 }
2126
2127 return NULL;
2128}
2129
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002130struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002131 bdaddr_t *bdaddr,
2132 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002133{
2134 struct discovery_state *cache = &hdev->discovery;
2135 struct inquiry_entry *e;
2136
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002137 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002138
2139 list_for_each_entry(e, &cache->resolve, list) {
2140 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2141 return e;
2142 if (!bacmp(&e->data.bdaddr, bdaddr))
2143 return e;
2144 }
2145
2146 return NULL;
2147}
2148
Johan Hedberga3d4e202012-01-09 00:53:02 +02002149void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002150 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002151{
2152 struct discovery_state *cache = &hdev->discovery;
2153 struct list_head *pos = &cache->resolve;
2154 struct inquiry_entry *p;
2155
2156 list_del(&ie->list);
2157
2158 list_for_each_entry(p, &cache->resolve, list) {
2159 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002160 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002161 break;
2162 pos = &p->list;
2163 }
2164
2165 list_add(&ie->list, pos);
2166}
2167
Marcel Holtmannaf589252014-07-01 14:11:20 +02002168u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2169 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170{
Johan Hedberg30883512012-01-04 14:16:21 +02002171 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002172 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002173 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002175 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Johan Hedberg6928a922014-10-26 20:46:09 +01002177 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002178
Marcel Holtmannaf589252014-07-01 14:11:20 +02002179 if (!data->ssp_mode)
2180 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002181
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002182 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002183 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002184 if (!ie->data.ssp_mode)
2185 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002186
Johan Hedberga3d4e202012-01-09 00:53:02 +02002187 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002188 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002189 ie->data.rssi = data->rssi;
2190 hci_inquiry_cache_update_resolve(hdev, ie);
2191 }
2192
Johan Hedberg561aafb2012-01-04 13:31:59 +02002193 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002194 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002195
Johan Hedberg561aafb2012-01-04 13:31:59 +02002196 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002197 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002198 if (!ie) {
2199 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2200 goto done;
2201 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002202
2203 list_add(&ie->all, &cache->all);
2204
2205 if (name_known) {
2206 ie->name_state = NAME_KNOWN;
2207 } else {
2208 ie->name_state = NAME_NOT_KNOWN;
2209 list_add(&ie->list, &cache->unknown);
2210 }
2211
2212update:
2213 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002214 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002215 ie->name_state = NAME_KNOWN;
2216 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 }
2218
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002219 memcpy(&ie->data, data, sizeof(*data));
2220 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002222
2223 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002224 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002225
Marcel Holtmannaf589252014-07-01 14:11:20 +02002226done:
2227 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228}
2229
2230static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2231{
Johan Hedberg30883512012-01-04 14:16:21 +02002232 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 struct inquiry_info *info = (struct inquiry_info *) buf;
2234 struct inquiry_entry *e;
2235 int copied = 0;
2236
Johan Hedberg561aafb2012-01-04 13:31:59 +02002237 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002239
2240 if (copied >= num)
2241 break;
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 bacpy(&info->bdaddr, &data->bdaddr);
2244 info->pscan_rep_mode = data->pscan_rep_mode;
2245 info->pscan_period_mode = data->pscan_period_mode;
2246 info->pscan_mode = data->pscan_mode;
2247 memcpy(info->dev_class, data->dev_class, 3);
2248 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002251 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 }
2253
2254 BT_DBG("cache %p, copied %d", cache, copied);
2255 return copied;
2256}
2257
Johan Hedberg42c6b122013-03-05 20:37:49 +02002258static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259{
2260 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002261 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 struct hci_cp_inquiry cp;
2263
2264 BT_DBG("%s", hdev->name);
2265
2266 if (test_bit(HCI_INQUIRY, &hdev->flags))
2267 return;
2268
2269 /* Start Inquiry */
2270 memcpy(&cp.lap, &ir->lap, 3);
2271 cp.length = ir->length;
2272 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002273 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274}
2275
2276int hci_inquiry(void __user *arg)
2277{
2278 __u8 __user *ptr = arg;
2279 struct hci_inquiry_req ir;
2280 struct hci_dev *hdev;
2281 int err = 0, do_inquiry = 0, max_rsp;
2282 long timeo;
2283 __u8 *buf;
2284
2285 if (copy_from_user(&ir, ptr, sizeof(ir)))
2286 return -EFAULT;
2287
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002288 hdev = hci_dev_get(ir.dev_id);
2289 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 return -ENODEV;
2291
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002292 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2293 err = -EBUSY;
2294 goto done;
2295 }
2296
Marcel Holtmann4a964402014-07-02 19:10:33 +02002297 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002298 err = -EOPNOTSUPP;
2299 goto done;
2300 }
2301
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002302 if (hdev->dev_type != HCI_BREDR) {
2303 err = -EOPNOTSUPP;
2304 goto done;
2305 }
2306
Johan Hedberg56f87902013-10-02 13:43:13 +03002307 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2308 err = -EOPNOTSUPP;
2309 goto done;
2310 }
2311
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002312 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002313 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002314 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002315 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 do_inquiry = 1;
2317 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002318 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
Marcel Holtmann04837f62006-07-03 10:02:33 +02002320 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002321
2322 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002323 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2324 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002325 if (err < 0)
2326 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002327
2328 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2329 * cleared). If it is interrupted by a signal, return -EINTR.
2330 */
NeilBrown74316202014-07-07 15:16:04 +10002331 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002332 TASK_INTERRUPTIBLE))
2333 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002336 /* for unlimited number of responses we will use buffer with
2337 * 255 entries
2338 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2340
2341 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2342 * copy it to the user space.
2343 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002344 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002345 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 err = -ENOMEM;
2347 goto done;
2348 }
2349
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002350 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002352 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
2354 BT_DBG("num_rsp %d", ir.num_rsp);
2355
2356 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2357 ptr += sizeof(ir);
2358 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002359 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002361 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 err = -EFAULT;
2363
2364 kfree(buf);
2365
2366done:
2367 hci_dev_put(hdev);
2368 return err;
2369}
2370
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002371static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 int ret = 0;
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 BT_DBG("%s %p", hdev->name, hdev);
2376
2377 hci_req_lock(hdev);
2378
Johan Hovold94324962012-03-15 14:48:41 +01002379 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2380 ret = -ENODEV;
2381 goto done;
2382 }
2383
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002384 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2385 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002386 /* Check for rfkill but allow the HCI setup stage to
2387 * proceed (which in itself doesn't cause any RF activity).
2388 */
2389 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2390 ret = -ERFKILL;
2391 goto done;
2392 }
2393
2394 /* Check for valid public address or a configured static
2395 * random adddress, but let the HCI setup proceed to
2396 * be able to determine if there is a public address
2397 * or not.
2398 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002399 * In case of user channel usage, it is not important
2400 * if a public address or static random address is
2401 * available.
2402 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002403 * This check is only valid for BR/EDR controllers
2404 * since AMP controllers do not have an address.
2405 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002406 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2407 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002408 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2409 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2410 ret = -EADDRNOTAVAIL;
2411 goto done;
2412 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002413 }
2414
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 if (test_bit(HCI_UP, &hdev->flags)) {
2416 ret = -EALREADY;
2417 goto done;
2418 }
2419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 if (hdev->open(hdev)) {
2421 ret = -EIO;
2422 goto done;
2423 }
2424
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002425 atomic_set(&hdev->cmd_cnt, 1);
2426 set_bit(HCI_INIT, &hdev->flags);
2427
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002428 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2429 if (hdev->setup)
2430 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002431
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002432 /* The transport driver can set these quirks before
2433 * creating the HCI device or in its setup callback.
2434 *
2435 * In case any of them is set, the controller has to
2436 * start up as unconfigured.
2437 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002438 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2439 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002440 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002441
2442 /* For an unconfigured controller it is required to
2443 * read at least the version information provided by
2444 * the Read Local Version Information command.
2445 *
2446 * If the set_bdaddr driver callback is provided, then
2447 * also the original Bluetooth public device address
2448 * will be read using the Read BD Address command.
2449 */
2450 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2451 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002452 }
2453
Marcel Holtmann9713c172014-07-06 12:11:15 +02002454 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2455 /* If public address change is configured, ensure that
2456 * the address gets programmed. If the driver does not
2457 * support changing the public address, fail the power
2458 * on procedure.
2459 */
2460 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2461 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002462 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2463 else
2464 ret = -EADDRNOTAVAIL;
2465 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002466
2467 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002468 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002469 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002470 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 }
2472
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002473 clear_bit(HCI_INIT, &hdev->flags);
2474
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 if (!ret) {
2476 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002477 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 set_bit(HCI_UP, &hdev->flags);
2479 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002480 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002481 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002482 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002483 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002484 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002485 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002486 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002487 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002488 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002489 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002491 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002492 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002493 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
2495 skb_queue_purge(&hdev->cmd_q);
2496 skb_queue_purge(&hdev->rx_q);
2497
2498 if (hdev->flush)
2499 hdev->flush(hdev);
2500
2501 if (hdev->sent_cmd) {
2502 kfree_skb(hdev->sent_cmd);
2503 hdev->sent_cmd = NULL;
2504 }
2505
2506 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002507 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 }
2509
2510done:
2511 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 return ret;
2513}
2514
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002515/* ---- HCI ioctl helpers ---- */
2516
2517int hci_dev_open(__u16 dev)
2518{
2519 struct hci_dev *hdev;
2520 int err;
2521
2522 hdev = hci_dev_get(dev);
2523 if (!hdev)
2524 return -ENODEV;
2525
Marcel Holtmann4a964402014-07-02 19:10:33 +02002526 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002527 * up as user channel. Trying to bring them up as normal devices
2528 * will result into a failure. Only user channel operation is
2529 * possible.
2530 *
2531 * When this function is called for a user channel, the flag
2532 * HCI_USER_CHANNEL will be set first before attempting to
2533 * open the device.
2534 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002535 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002536 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2537 err = -EOPNOTSUPP;
2538 goto done;
2539 }
2540
Johan Hedberge1d08f42013-10-01 22:44:50 +03002541 /* We need to ensure that no other power on/off work is pending
2542 * before proceeding to call hci_dev_do_open. This is
2543 * particularly important if the setup procedure has not yet
2544 * completed.
2545 */
2546 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2547 cancel_delayed_work(&hdev->power_off);
2548
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002549 /* After this call it is guaranteed that the setup procedure
2550 * has finished. This means that error conditions like RFKILL
2551 * or no valid public or static random address apply.
2552 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002553 flush_workqueue(hdev->req_workqueue);
2554
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002555 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002556 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002557 * so that pairing works for them. Once the management interface
2558 * is in use this bit will be cleared again and userspace has
2559 * to explicitly enable it.
2560 */
2561 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2562 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002563 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002564
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002565 err = hci_dev_do_open(hdev);
2566
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002567done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002568 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002569 return err;
2570}
2571
Johan Hedbergd7347f32014-07-04 12:37:23 +03002572/* This function requires the caller holds hdev->lock */
2573static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2574{
2575 struct hci_conn_params *p;
2576
Johan Hedbergf161dd42014-08-15 21:06:54 +03002577 list_for_each_entry(p, &hdev->le_conn_params, list) {
2578 if (p->conn) {
2579 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002580 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002581 p->conn = NULL;
2582 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002583 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002584 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002585
2586 BT_DBG("All LE pending actions cleared");
2587}
2588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589static int hci_dev_do_close(struct hci_dev *hdev)
2590{
2591 BT_DBG("%s %p", hdev->name, hdev);
2592
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002593 cancel_delayed_work(&hdev->power_off);
2594
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 hci_req_cancel(hdev, ENODEV);
2596 hci_req_lock(hdev);
2597
2598 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002599 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 hci_req_unlock(hdev);
2601 return 0;
2602 }
2603
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002604 /* Flush RX and TX works */
2605 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002606 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002608 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002609 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002610 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002611 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002612 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002613 }
2614
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002615 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002616 cancel_delayed_work(&hdev->service_cache);
2617
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002618 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002619
2620 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2621 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002622
Johan Hedberg76727c02014-11-18 09:00:14 +02002623 /* Avoid potential lockdep warnings from the *_flush() calls by
2624 * ensuring the workqueue is empty up front.
2625 */
2626 drain_workqueue(hdev->workqueue);
2627
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002628 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002629 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002630 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002631 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002632 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
2634 hci_notify(hdev, HCI_DEV_DOWN);
2635
2636 if (hdev->flush)
2637 hdev->flush(hdev);
2638
2639 /* Reset device */
2640 skb_queue_purge(&hdev->cmd_q);
2641 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002642 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2643 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002644 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002646 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 clear_bit(HCI_INIT, &hdev->flags);
2648 }
2649
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002650 /* flush cmd work */
2651 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652
2653 /* Drop queues */
2654 skb_queue_purge(&hdev->rx_q);
2655 skb_queue_purge(&hdev->cmd_q);
2656 skb_queue_purge(&hdev->raw_q);
2657
2658 /* Drop last sent command */
2659 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002660 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 kfree_skb(hdev->sent_cmd);
2662 hdev->sent_cmd = NULL;
2663 }
2664
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002665 kfree_skb(hdev->recv_evt);
2666 hdev->recv_evt = NULL;
2667
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 /* After this point our queues are empty
2669 * and no tasks are scheduled. */
2670 hdev->close(hdev);
2671
Johan Hedberg35b973c2013-03-15 17:06:59 -05002672 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002673 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002674 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2675
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002676 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2677 if (hdev->dev_type == HCI_BREDR) {
2678 hci_dev_lock(hdev);
2679 mgmt_powered(hdev, 0);
2680 hci_dev_unlock(hdev);
2681 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002682 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002683
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002684 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002685 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002686
Johan Hedberge59fda82012-02-22 18:11:53 +02002687 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002688 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002689 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002690
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 hci_req_unlock(hdev);
2692
2693 hci_dev_put(hdev);
2694 return 0;
2695}
2696
2697int hci_dev_close(__u16 dev)
2698{
2699 struct hci_dev *hdev;
2700 int err;
2701
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002702 hdev = hci_dev_get(dev);
2703 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002705
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002706 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2707 err = -EBUSY;
2708 goto done;
2709 }
2710
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002711 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2712 cancel_delayed_work(&hdev->power_off);
2713
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002715
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002716done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 hci_dev_put(hdev);
2718 return err;
2719}
2720
2721int hci_dev_reset(__u16 dev)
2722{
2723 struct hci_dev *hdev;
2724 int ret = 0;
2725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002726 hdev = hci_dev_get(dev);
2727 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 return -ENODEV;
2729
2730 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
Marcel Holtmann808a0492013-08-26 20:57:58 -07002732 if (!test_bit(HCI_UP, &hdev->flags)) {
2733 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002737 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2738 ret = -EBUSY;
2739 goto done;
2740 }
2741
Marcel Holtmann4a964402014-07-02 19:10:33 +02002742 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002743 ret = -EOPNOTSUPP;
2744 goto done;
2745 }
2746
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 /* Drop queues */
2748 skb_queue_purge(&hdev->rx_q);
2749 skb_queue_purge(&hdev->cmd_q);
2750
Johan Hedberg76727c02014-11-18 09:00:14 +02002751 /* Avoid potential lockdep warnings from the *_flush() calls by
2752 * ensuring the workqueue is empty up front.
2753 */
2754 drain_workqueue(hdev->workqueue);
2755
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002756 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002757 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002759 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761 if (hdev->flush)
2762 hdev->flush(hdev);
2763
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002764 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002765 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002767 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
2769done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 hci_req_unlock(hdev);
2771 hci_dev_put(hdev);
2772 return ret;
2773}
2774
2775int hci_dev_reset_stat(__u16 dev)
2776{
2777 struct hci_dev *hdev;
2778 int ret = 0;
2779
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002780 hdev = hci_dev_get(dev);
2781 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 return -ENODEV;
2783
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002784 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2785 ret = -EBUSY;
2786 goto done;
2787 }
2788
Marcel Holtmann4a964402014-07-02 19:10:33 +02002789 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002790 ret = -EOPNOTSUPP;
2791 goto done;
2792 }
2793
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2795
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002796done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 return ret;
2799}
2800
Johan Hedberg123abc02014-07-10 12:09:07 +03002801static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2802{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002803 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002804
2805 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2806
2807 if ((scan & SCAN_PAGE))
2808 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2809 &hdev->dev_flags);
2810 else
2811 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2812 &hdev->dev_flags);
2813
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002814 if ((scan & SCAN_INQUIRY)) {
2815 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2816 &hdev->dev_flags);
2817 } else {
2818 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2819 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2820 &hdev->dev_flags);
2821 }
2822
Johan Hedberg123abc02014-07-10 12:09:07 +03002823 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2824 return;
2825
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002826 if (conn_changed || discov_changed) {
2827 /* In case this was disabled through mgmt */
2828 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2829
2830 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2831 mgmt_update_adv_data(hdev);
2832
Johan Hedberg123abc02014-07-10 12:09:07 +03002833 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002834 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002835}
2836
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837int hci_dev_cmd(unsigned int cmd, void __user *arg)
2838{
2839 struct hci_dev *hdev;
2840 struct hci_dev_req dr;
2841 int err = 0;
2842
2843 if (copy_from_user(&dr, arg, sizeof(dr)))
2844 return -EFAULT;
2845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002846 hdev = hci_dev_get(dr.dev_id);
2847 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 return -ENODEV;
2849
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002850 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2851 err = -EBUSY;
2852 goto done;
2853 }
2854
Marcel Holtmann4a964402014-07-02 19:10:33 +02002855 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002856 err = -EOPNOTSUPP;
2857 goto done;
2858 }
2859
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002860 if (hdev->dev_type != HCI_BREDR) {
2861 err = -EOPNOTSUPP;
2862 goto done;
2863 }
2864
Johan Hedberg56f87902013-10-02 13:43:13 +03002865 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2866 err = -EOPNOTSUPP;
2867 goto done;
2868 }
2869
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 switch (cmd) {
2871 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002872 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2873 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 break;
2875
2876 case HCISETENCRYPT:
2877 if (!lmp_encrypt_capable(hdev)) {
2878 err = -EOPNOTSUPP;
2879 break;
2880 }
2881
2882 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2883 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002884 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2885 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 if (err)
2887 break;
2888 }
2889
Johan Hedberg01178cd2013-03-05 20:37:41 +02002890 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2891 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 break;
2893
2894 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002895 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2896 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002897
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002898 /* Ensure that the connectable and discoverable states
2899 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002900 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002901 if (!err)
2902 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 break;
2904
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002905 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002906 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2907 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002908 break;
2909
2910 case HCISETLINKMODE:
2911 hdev->link_mode = ((__u16) dr.dev_opt) &
2912 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2913 break;
2914
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 case HCISETPTYPE:
2916 hdev->pkt_type = (__u16) dr.dev_opt;
2917 break;
2918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002920 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2921 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 break;
2923
2924 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002925 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2926 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 break;
2928
2929 default:
2930 err = -EINVAL;
2931 break;
2932 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002933
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002934done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 hci_dev_put(hdev);
2936 return err;
2937}
2938
2939int hci_get_dev_list(void __user *arg)
2940{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002941 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 struct hci_dev_list_req *dl;
2943 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 int n = 0, size, err;
2945 __u16 dev_num;
2946
2947 if (get_user(dev_num, (__u16 __user *) arg))
2948 return -EFAULT;
2949
2950 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2951 return -EINVAL;
2952
2953 size = sizeof(*dl) + dev_num * sizeof(*dr);
2954
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002955 dl = kzalloc(size, GFP_KERNEL);
2956 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 return -ENOMEM;
2958
2959 dr = dl->dev_req;
2960
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002961 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002962 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002963 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002964
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002965 /* When the auto-off is configured it means the transport
2966 * is running, but in that case still indicate that the
2967 * device is actually down.
2968 */
2969 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2970 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002971
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002973 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 if (++n >= dev_num)
2976 break;
2977 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002978 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
2980 dl->dev_num = n;
2981 size = sizeof(*dl) + n * sizeof(*dr);
2982
2983 err = copy_to_user(arg, dl, size);
2984 kfree(dl);
2985
2986 return err ? -EFAULT : 0;
2987}
2988
2989int hci_get_dev_info(void __user *arg)
2990{
2991 struct hci_dev *hdev;
2992 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002993 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 int err = 0;
2995
2996 if (copy_from_user(&di, arg, sizeof(di)))
2997 return -EFAULT;
2998
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002999 hdev = hci_dev_get(di.dev_id);
3000 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 return -ENODEV;
3002
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003003 /* When the auto-off is configured it means the transport
3004 * is running, but in that case still indicate that the
3005 * device is actually down.
3006 */
3007 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3008 flags = hdev->flags & ~BIT(HCI_UP);
3009 else
3010 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003011
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 strcpy(di.name, hdev->name);
3013 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003014 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003015 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003017 if (lmp_bredr_capable(hdev)) {
3018 di.acl_mtu = hdev->acl_mtu;
3019 di.acl_pkts = hdev->acl_pkts;
3020 di.sco_mtu = hdev->sco_mtu;
3021 di.sco_pkts = hdev->sco_pkts;
3022 } else {
3023 di.acl_mtu = hdev->le_mtu;
3024 di.acl_pkts = hdev->le_pkts;
3025 di.sco_mtu = 0;
3026 di.sco_pkts = 0;
3027 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 di.link_policy = hdev->link_policy;
3029 di.link_mode = hdev->link_mode;
3030
3031 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3032 memcpy(&di.features, &hdev->features, sizeof(di.features));
3033
3034 if (copy_to_user(arg, &di, sizeof(di)))
3035 err = -EFAULT;
3036
3037 hci_dev_put(hdev);
3038
3039 return err;
3040}
3041
3042/* ---- Interface to HCI drivers ---- */
3043
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003044static int hci_rfkill_set_block(void *data, bool blocked)
3045{
3046 struct hci_dev *hdev = data;
3047
3048 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3049
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003050 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3051 return -EBUSY;
3052
Johan Hedberg5e130362013-09-13 08:58:17 +03003053 if (blocked) {
3054 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003055 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3056 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003057 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003058 } else {
3059 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003060 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003061
3062 return 0;
3063}
3064
3065static const struct rfkill_ops hci_rfkill_ops = {
3066 .set_block = hci_rfkill_set_block,
3067};
3068
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003069static void hci_power_on(struct work_struct *work)
3070{
3071 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003072 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003073
3074 BT_DBG("%s", hdev->name);
3075
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003076 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003077 if (err < 0) {
3078 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003079 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003080 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003081
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003082 /* During the HCI setup phase, a few error conditions are
3083 * ignored and they need to be checked now. If they are still
3084 * valid, it is important to turn the device back off.
3085 */
3086 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003087 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003088 (hdev->dev_type == HCI_BREDR &&
3089 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3090 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003091 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3092 hci_dev_do_close(hdev);
3093 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003094 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3095 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003096 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003097
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003098 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003099 /* For unconfigured devices, set the HCI_RAW flag
3100 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003101 */
3102 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3103 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003104
3105 /* For fully configured devices, this will send
3106 * the Index Added event. For unconfigured devices,
3107 * it will send Unconfigued Index Added event.
3108 *
3109 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3110 * and no event will be send.
3111 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003112 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003113 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003114 /* When the controller is now configured, then it
3115 * is important to clear the HCI_RAW flag.
3116 */
3117 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3118 clear_bit(HCI_RAW, &hdev->flags);
3119
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003120 /* Powering on the controller with HCI_CONFIG set only
3121 * happens with the transition from unconfigured to
3122 * configured. This will send the Index Added event.
3123 */
3124 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003125 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003126}
3127
3128static void hci_power_off(struct work_struct *work)
3129{
Johan Hedberg32435532011-11-07 22:16:04 +02003130 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003131 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003132
3133 BT_DBG("%s", hdev->name);
3134
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003135 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003136}
3137
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003138static void hci_discov_off(struct work_struct *work)
3139{
3140 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003141
3142 hdev = container_of(work, struct hci_dev, discov_off.work);
3143
3144 BT_DBG("%s", hdev->name);
3145
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003146 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003147}
3148
Johan Hedberg35f74982014-02-18 17:14:32 +02003149void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003150{
Johan Hedberg48210022013-01-27 00:31:28 +02003151 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003152
Johan Hedberg48210022013-01-27 00:31:28 +02003153 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3154 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003155 kfree(uuid);
3156 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003157}
3158
Johan Hedberg35f74982014-02-18 17:14:32 +02003159void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003160{
Johan Hedberg0378b592014-11-19 15:22:22 +02003161 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003162
Johan Hedberg0378b592014-11-19 15:22:22 +02003163 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3164 list_del_rcu(&key->list);
3165 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003166 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003167}
3168
Johan Hedberg35f74982014-02-18 17:14:32 +02003169void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003170{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003171 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003172
Johan Hedberg970d0f12014-11-13 14:37:47 +02003173 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3174 list_del_rcu(&k->list);
3175 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003176 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003177}
3178
Johan Hedberg970c4e42014-02-18 10:19:33 +02003179void hci_smp_irks_clear(struct hci_dev *hdev)
3180{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003181 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003182
Johan Hedbergadae20c2014-11-13 14:37:48 +02003183 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3184 list_del_rcu(&k->list);
3185 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003186 }
3187}
3188
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003189struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3190{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003191 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003192
Johan Hedberg0378b592014-11-19 15:22:22 +02003193 rcu_read_lock();
3194 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3195 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3196 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003197 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003198 }
3199 }
3200 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003201
3202 return NULL;
3203}
3204
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303205static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003206 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003207{
3208 /* Legacy key */
3209 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303210 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003211
3212 /* Debug keys are insecure so don't store them persistently */
3213 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303214 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003215
3216 /* Changed combination key and there's no previous one */
3217 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303218 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003219
3220 /* Security mode 3 case */
3221 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303222 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003223
Johan Hedberge3befab2014-06-01 16:33:39 +03003224 /* BR/EDR key derived using SC from an LE link */
3225 if (conn->type == LE_LINK)
3226 return true;
3227
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003228 /* Neither local nor remote side had no-bonding as requirement */
3229 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303230 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003231
3232 /* Local side had dedicated bonding as requirement */
3233 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303234 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003235
3236 /* Remote side had dedicated bonding as requirement */
3237 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303238 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003239
3240 /* If none of the above criteria match, then don't store the key
3241 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303242 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003243}
3244
Johan Hedberge804d252014-07-16 11:42:28 +03003245static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003246{
Johan Hedberge804d252014-07-16 11:42:28 +03003247 if (type == SMP_LTK)
3248 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003249
Johan Hedberge804d252014-07-16 11:42:28 +03003250 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003251}
3252
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003253struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003255{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003256 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003257
Johan Hedberg970d0f12014-11-13 14:37:47 +02003258 rcu_read_lock();
3259 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003260 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3261 continue;
3262
Johan Hedberg923e2412014-12-03 12:43:39 +02003263 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003264 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003265 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003266 }
3267 }
3268 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003269
3270 return NULL;
3271}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003272
Johan Hedberg970c4e42014-02-18 10:19:33 +02003273struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3274{
3275 struct smp_irk *irk;
3276
Johan Hedbergadae20c2014-11-13 14:37:48 +02003277 rcu_read_lock();
3278 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3279 if (!bacmp(&irk->rpa, rpa)) {
3280 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003281 return irk;
3282 }
3283 }
3284
Johan Hedbergadae20c2014-11-13 14:37:48 +02003285 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3286 if (smp_irk_matches(hdev, irk->val, rpa)) {
3287 bacpy(&irk->rpa, rpa);
3288 rcu_read_unlock();
3289 return irk;
3290 }
3291 }
3292 rcu_read_unlock();
3293
Johan Hedberg970c4e42014-02-18 10:19:33 +02003294 return NULL;
3295}
3296
3297struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3298 u8 addr_type)
3299{
3300 struct smp_irk *irk;
3301
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003302 /* Identity Address must be public or static random */
3303 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3304 return NULL;
3305
Johan Hedbergadae20c2014-11-13 14:37:48 +02003306 rcu_read_lock();
3307 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003308 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003309 bacmp(bdaddr, &irk->bdaddr) == 0) {
3310 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003311 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003312 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003313 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003314 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003315
3316 return NULL;
3317}
3318
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003319struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003320 bdaddr_t *bdaddr, u8 *val, u8 type,
3321 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003322{
3323 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303324 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003325
3326 old_key = hci_find_link_key(hdev, bdaddr);
3327 if (old_key) {
3328 old_key_type = old_key->type;
3329 key = old_key;
3330 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003331 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003332 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003333 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003334 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003335 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003336 }
3337
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003338 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003339
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003340 /* Some buggy controller combinations generate a changed
3341 * combination key for legacy pairing even when there's no
3342 * previous key */
3343 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003344 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003345 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003346 if (conn)
3347 conn->key_type = type;
3348 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003349
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003350 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003351 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003352 key->pin_len = pin_len;
3353
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003354 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003355 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003356 else
3357 key->type = type;
3358
Johan Hedberg7652ff62014-06-24 13:15:49 +03003359 if (persistent)
3360 *persistent = hci_persistent_key(hdev, conn, type,
3361 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003362
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003363 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003364}
3365
Johan Hedbergca9142b2014-02-19 14:57:44 +02003366struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003367 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003368 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003369{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003370 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003371 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003372
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003373 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003374 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003375 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003376 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003377 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003378 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003379 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003380 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003381 }
3382
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003383 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003384 key->bdaddr_type = addr_type;
3385 memcpy(key->val, tk, sizeof(key->val));
3386 key->authenticated = authenticated;
3387 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003388 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003389 key->enc_size = enc_size;
3390 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003391
Johan Hedbergca9142b2014-02-19 14:57:44 +02003392 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003393}
3394
Johan Hedbergca9142b2014-02-19 14:57:44 +02003395struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3396 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003397{
3398 struct smp_irk *irk;
3399
3400 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3401 if (!irk) {
3402 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3403 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003404 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003405
3406 bacpy(&irk->bdaddr, bdaddr);
3407 irk->addr_type = addr_type;
3408
Johan Hedbergadae20c2014-11-13 14:37:48 +02003409 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003410 }
3411
3412 memcpy(irk->val, val, 16);
3413 bacpy(&irk->rpa, rpa);
3414
Johan Hedbergca9142b2014-02-19 14:57:44 +02003415 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003416}
3417
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003418int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3419{
3420 struct link_key *key;
3421
3422 key = hci_find_link_key(hdev, bdaddr);
3423 if (!key)
3424 return -ENOENT;
3425
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003427
Johan Hedberg0378b592014-11-19 15:22:22 +02003428 list_del_rcu(&key->list);
3429 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003430
3431 return 0;
3432}
3433
Johan Hedberge0b2b272014-02-18 17:14:31 +02003434int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003435{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003436 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003437 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003438
Johan Hedberg970d0f12014-11-13 14:37:47 +02003439 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003440 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003441 continue;
3442
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003443 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003444
Johan Hedberg970d0f12014-11-13 14:37:47 +02003445 list_del_rcu(&k->list);
3446 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003447 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003448 }
3449
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003450 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003451}
3452
Johan Hedberga7ec7332014-02-18 17:14:35 +02003453void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3454{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003455 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003456
Johan Hedbergadae20c2014-11-13 14:37:48 +02003457 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003458 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3459 continue;
3460
3461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3462
Johan Hedbergadae20c2014-11-13 14:37:48 +02003463 list_del_rcu(&k->list);
3464 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003465 }
3466}
3467
Ville Tervo6bd32322011-02-16 16:32:41 +02003468/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003469static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003470{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003471 struct hci_dev *hdev = container_of(work, struct hci_dev,
3472 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003473
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003474 if (hdev->sent_cmd) {
3475 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3476 u16 opcode = __le16_to_cpu(sent->opcode);
3477
3478 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3479 } else {
3480 BT_ERR("%s command tx timeout", hdev->name);
3481 }
3482
Ville Tervo6bd32322011-02-16 16:32:41 +02003483 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003484 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003485}
3486
Szymon Janc2763eda2011-03-22 13:12:22 +01003487struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003488 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003489{
3490 struct oob_data *data;
3491
Johan Hedberg6928a922014-10-26 20:46:09 +01003492 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3493 if (bacmp(bdaddr, &data->bdaddr) != 0)
3494 continue;
3495 if (data->bdaddr_type != bdaddr_type)
3496 continue;
3497 return data;
3498 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003499
3500 return NULL;
3501}
3502
Johan Hedberg6928a922014-10-26 20:46:09 +01003503int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3504 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003505{
3506 struct oob_data *data;
3507
Johan Hedberg6928a922014-10-26 20:46:09 +01003508 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003509 if (!data)
3510 return -ENOENT;
3511
Johan Hedberg6928a922014-10-26 20:46:09 +01003512 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003513
3514 list_del(&data->list);
3515 kfree(data);
3516
3517 return 0;
3518}
3519
Johan Hedberg35f74982014-02-18 17:14:32 +02003520void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003521{
3522 struct oob_data *data, *n;
3523
3524 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3525 list_del(&data->list);
3526 kfree(data);
3527 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003528}
3529
Marcel Holtmann07988722014-01-10 02:07:29 -08003530int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003531 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003532 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003533{
3534 struct oob_data *data;
3535
Johan Hedberg6928a922014-10-26 20:46:09 +01003536 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003537 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003538 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003539 if (!data)
3540 return -ENOMEM;
3541
3542 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003543 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003544 list_add(&data->list, &hdev->remote_oob_data);
3545 }
3546
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003547 if (hash192 && rand192) {
3548 memcpy(data->hash192, hash192, sizeof(data->hash192));
3549 memcpy(data->rand192, rand192, sizeof(data->rand192));
3550 } else {
3551 memset(data->hash192, 0, sizeof(data->hash192));
3552 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003553 }
3554
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003555 if (hash256 && rand256) {
3556 memcpy(data->hash256, hash256, sizeof(data->hash256));
3557 memcpy(data->rand256, rand256, sizeof(data->rand256));
3558 } else {
3559 memset(data->hash256, 0, sizeof(data->hash256));
3560 memset(data->rand256, 0, sizeof(data->rand256));
3561 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003562
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003563 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003564
3565 return 0;
3566}
3567
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003568struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003569 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003570{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003571 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003572
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003573 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003574 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003575 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003576 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003577
3578 return NULL;
3579}
3580
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003581void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003582{
3583 struct list_head *p, *n;
3584
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003585 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003586 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003587
3588 list_del(p);
3589 kfree(b);
3590 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003591}
3592
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003593int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003594{
3595 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003596
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003597 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003598 return -EBADF;
3599
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003600 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003601 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003602
Johan Hedberg27f70f32014-07-21 10:50:06 +03003603 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003604 if (!entry)
3605 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003606
3607 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003608 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003609
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003610 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003611
3612 return 0;
3613}
3614
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003615int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003616{
3617 struct bdaddr_list *entry;
3618
Johan Hedberg35f74982014-02-18 17:14:32 +02003619 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003620 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003621 return 0;
3622 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003623
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003624 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003625 if (!entry)
3626 return -ENOENT;
3627
3628 list_del(&entry->list);
3629 kfree(entry);
3630
3631 return 0;
3632}
3633
Andre Guedes15819a72014-02-03 13:56:18 -03003634/* This function requires the caller holds hdev->lock */
3635struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3636 bdaddr_t *addr, u8 addr_type)
3637{
3638 struct hci_conn_params *params;
3639
Johan Hedberg738f6182014-07-03 19:33:51 +03003640 /* The conn params list only contains identity addresses */
3641 if (!hci_is_identity_address(addr, addr_type))
3642 return NULL;
3643
Andre Guedes15819a72014-02-03 13:56:18 -03003644 list_for_each_entry(params, &hdev->le_conn_params, list) {
3645 if (bacmp(&params->addr, addr) == 0 &&
3646 params->addr_type == addr_type) {
3647 return params;
3648 }
3649 }
3650
3651 return NULL;
3652}
3653
Andre Guedescef952c2014-02-26 20:21:49 -03003654static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3655{
3656 struct hci_conn *conn;
3657
3658 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3659 if (!conn)
3660 return false;
3661
3662 if (conn->dst_type != type)
3663 return false;
3664
3665 if (conn->state != BT_CONNECTED)
3666 return false;
3667
3668 return true;
3669}
3670
Andre Guedes15819a72014-02-03 13:56:18 -03003671/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003672struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3673 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003674{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003675 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003676
Johan Hedberg738f6182014-07-03 19:33:51 +03003677 /* The list only contains identity addresses */
3678 if (!hci_is_identity_address(addr, addr_type))
3679 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003680
Johan Hedberg501f8822014-07-04 12:37:26 +03003681 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003682 if (bacmp(&param->addr, addr) == 0 &&
3683 param->addr_type == addr_type)
3684 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003685 }
3686
3687 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003688}
3689
3690/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003691struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3692 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003693{
3694 struct hci_conn_params *params;
3695
Johan Hedbergc46245b2014-07-02 17:37:33 +03003696 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003697 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003698
Andre Guedes15819a72014-02-03 13:56:18 -03003699 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003700 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003701 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003702
3703 params = kzalloc(sizeof(*params), GFP_KERNEL);
3704 if (!params) {
3705 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003706 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003707 }
3708
3709 bacpy(&params->addr, addr);
3710 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003711
3712 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003713 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003714
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003715 params->conn_min_interval = hdev->le_conn_min_interval;
3716 params->conn_max_interval = hdev->le_conn_max_interval;
3717 params->conn_latency = hdev->le_conn_latency;
3718 params->supervision_timeout = hdev->le_supv_timeout;
3719 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3720
3721 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3722
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003723 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003724}
3725
3726/* This function requires the caller holds hdev->lock */
3727int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003728 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003729{
3730 struct hci_conn_params *params;
3731
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003732 params = hci_conn_params_add(hdev, addr, addr_type);
3733 if (!params)
3734 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003735
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003736 if (params->auto_connect == auto_connect)
3737 return 0;
3738
Johan Hedberg95305ba2014-07-04 12:37:21 +03003739 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003740
Andre Guedescef952c2014-02-26 20:21:49 -03003741 switch (auto_connect) {
3742 case HCI_AUTO_CONN_DISABLED:
3743 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003744 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003745 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003746 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003747 list_add(&params->action, &hdev->pend_le_reports);
3748 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003749 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003750 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003751 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003752 if (!is_connected(hdev, addr, addr_type)) {
3753 list_add(&params->action, &hdev->pend_le_conns);
3754 hci_update_background_scan(hdev);
3755 }
Andre Guedescef952c2014-02-26 20:21:49 -03003756 break;
3757 }
Andre Guedes15819a72014-02-03 13:56:18 -03003758
Johan Hedberg851efca2014-07-02 22:42:00 +03003759 params->auto_connect = auto_connect;
3760
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003761 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3762 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003763
3764 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003765}
3766
Johan Hedbergf6c63242014-08-15 21:06:59 +03003767static void hci_conn_params_free(struct hci_conn_params *params)
3768{
3769 if (params->conn) {
3770 hci_conn_drop(params->conn);
3771 hci_conn_put(params->conn);
3772 }
3773
3774 list_del(&params->action);
3775 list_del(&params->list);
3776 kfree(params);
3777}
3778
Andre Guedes15819a72014-02-03 13:56:18 -03003779/* This function requires the caller holds hdev->lock */
3780void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3781{
3782 struct hci_conn_params *params;
3783
3784 params = hci_conn_params_lookup(hdev, addr, addr_type);
3785 if (!params)
3786 return;
3787
Johan Hedbergf6c63242014-08-15 21:06:59 +03003788 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003789
Johan Hedberg95305ba2014-07-04 12:37:21 +03003790 hci_update_background_scan(hdev);
3791
Andre Guedes15819a72014-02-03 13:56:18 -03003792 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3793}
3794
3795/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003796void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003797{
3798 struct hci_conn_params *params, *tmp;
3799
3800 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003801 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3802 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003803 list_del(&params->list);
3804 kfree(params);
3805 }
3806
Johan Hedberg55af49a82014-07-02 17:37:26 +03003807 BT_DBG("All LE disabled connection parameters were removed");
3808}
3809
3810/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003811void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003812{
3813 struct hci_conn_params *params, *tmp;
3814
Johan Hedbergf6c63242014-08-15 21:06:59 +03003815 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3816 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003817
Johan Hedberga2f41a82014-07-04 12:37:19 +03003818 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003819
Andre Guedes15819a72014-02-03 13:56:18 -03003820 BT_DBG("All LE connection parameters were removed");
3821}
3822
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003823static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003824{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003825 if (status) {
3826 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003827
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003828 hci_dev_lock(hdev);
3829 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3830 hci_dev_unlock(hdev);
3831 return;
3832 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003833}
3834
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003835static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003836{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003837 /* General inquiry access code (GIAC) */
3838 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3839 struct hci_request req;
3840 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003841 int err;
3842
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003843 if (status) {
3844 BT_ERR("Failed to disable LE scanning: status %d", status);
3845 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003846 }
3847
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003848 switch (hdev->discovery.type) {
3849 case DISCOV_TYPE_LE:
3850 hci_dev_lock(hdev);
3851 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3852 hci_dev_unlock(hdev);
3853 break;
3854
3855 case DISCOV_TYPE_INTERLEAVED:
3856 hci_req_init(&req, hdev);
3857
3858 memset(&cp, 0, sizeof(cp));
3859 memcpy(&cp.lap, lap, sizeof(cp.lap));
3860 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3861 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3862
3863 hci_dev_lock(hdev);
3864
3865 hci_inquiry_cache_flush(hdev);
3866
3867 err = hci_req_run(&req, inquiry_complete);
3868 if (err) {
3869 BT_ERR("Inquiry request failed: err %d", err);
3870 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3871 }
3872
3873 hci_dev_unlock(hdev);
3874 break;
3875 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003876}
3877
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003878static void le_scan_disable_work(struct work_struct *work)
3879{
3880 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003881 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003882 struct hci_request req;
3883 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003884
3885 BT_DBG("%s", hdev->name);
3886
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003887 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003888
Andre Guedesb1efcc22014-02-26 20:21:40 -03003889 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003890
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003891 err = hci_req_run(&req, le_scan_disable_work_complete);
3892 if (err)
3893 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003894}
3895
Johan Hedberg8d972502014-02-28 12:54:14 +02003896static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3897{
3898 struct hci_dev *hdev = req->hdev;
3899
3900 /* If we're advertising or initiating an LE connection we can't
3901 * go ahead and change the random address at this time. This is
3902 * because the eventual initiator address used for the
3903 * subsequently created connection will be undefined (some
3904 * controllers use the new address and others the one we had
3905 * when the operation started).
3906 *
3907 * In this kind of scenario skip the update and let the random
3908 * address be updated at the next cycle.
3909 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003910 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003911 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3912 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003913 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003914 return;
3915 }
3916
3917 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3918}
3919
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003920int hci_update_random_address(struct hci_request *req, bool require_privacy,
3921 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003922{
3923 struct hci_dev *hdev = req->hdev;
3924 int err;
3925
3926 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003927 * current RPA has expired or there is something else than
3928 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003929 */
3930 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003931 int to;
3932
3933 *own_addr_type = ADDR_LE_DEV_RANDOM;
3934
3935 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003936 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003937 return 0;
3938
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003939 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003940 if (err < 0) {
3941 BT_ERR("%s failed to generate new RPA", hdev->name);
3942 return err;
3943 }
3944
Johan Hedberg8d972502014-02-28 12:54:14 +02003945 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003946
3947 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3948 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3949
3950 return 0;
3951 }
3952
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003953 /* In case of required privacy without resolvable private address,
3954 * use an unresolvable private address. This is useful for active
3955 * scanning and non-connectable advertising.
3956 */
3957 if (require_privacy) {
3958 bdaddr_t urpa;
3959
3960 get_random_bytes(&urpa, 6);
3961 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3962
3963 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003964 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003965 return 0;
3966 }
3967
Johan Hedbergebd3a742014-02-23 19:42:21 +02003968 /* If forcing static address is in use or there is no public
3969 * address use the static address as random address (but skip
3970 * the HCI command if the current random address is already the
3971 * static one.
3972 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003973 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003974 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3975 *own_addr_type = ADDR_LE_DEV_RANDOM;
3976 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3977 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3978 &hdev->static_addr);
3979 return 0;
3980 }
3981
3982 /* Neither privacy nor static address is being used so use a
3983 * public address.
3984 */
3985 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3986
3987 return 0;
3988}
3989
Johan Hedberga1f4c312014-02-27 14:05:41 +02003990/* Copy the Identity Address of the controller.
3991 *
3992 * If the controller has a public BD_ADDR, then by default use that one.
3993 * If this is a LE only controller without a public address, default to
3994 * the static random address.
3995 *
3996 * For debugging purposes it is possible to force controllers with a
3997 * public address to use the static random address instead.
3998 */
3999void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4000 u8 *bdaddr_type)
4001{
Marcel Holtmann111902f2014-06-21 04:53:17 +02004002 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02004003 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4004 bacpy(bdaddr, &hdev->static_addr);
4005 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4006 } else {
4007 bacpy(bdaddr, &hdev->bdaddr);
4008 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4009 }
4010}
4011
David Herrmann9be0dab2012-04-22 14:39:57 +02004012/* Alloc HCI device */
4013struct hci_dev *hci_alloc_dev(void)
4014{
4015 struct hci_dev *hdev;
4016
Johan Hedberg27f70f32014-07-21 10:50:06 +03004017 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02004018 if (!hdev)
4019 return NULL;
4020
David Herrmannb1b813d2012-04-22 14:39:58 +02004021 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4022 hdev->esco_type = (ESCO_HV1);
4023 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004024 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4025 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004026 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004027 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4028 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004029
David Herrmannb1b813d2012-04-22 14:39:58 +02004030 hdev->sniff_max_interval = 800;
4031 hdev->sniff_min_interval = 80;
4032
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004033 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004034 hdev->le_adv_min_interval = 0x0800;
4035 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004036 hdev->le_scan_interval = 0x0060;
4037 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004038 hdev->le_conn_min_interval = 0x0028;
4039 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004040 hdev->le_conn_latency = 0x0000;
4041 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004042
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004043 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004044 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004045 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4046 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004047
David Herrmannb1b813d2012-04-22 14:39:58 +02004048 mutex_init(&hdev->lock);
4049 mutex_init(&hdev->req_lock);
4050
4051 INIT_LIST_HEAD(&hdev->mgmt_pending);
4052 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004053 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004054 INIT_LIST_HEAD(&hdev->uuids);
4055 INIT_LIST_HEAD(&hdev->link_keys);
4056 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004057 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004058 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004059 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004060 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004061 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004062 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004063 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004064
4065 INIT_WORK(&hdev->rx_work, hci_rx_work);
4066 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4067 INIT_WORK(&hdev->tx_work, hci_tx_work);
4068 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004069
David Herrmannb1b813d2012-04-22 14:39:58 +02004070 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4071 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4072 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4073
David Herrmannb1b813d2012-04-22 14:39:58 +02004074 skb_queue_head_init(&hdev->rx_q);
4075 skb_queue_head_init(&hdev->cmd_q);
4076 skb_queue_head_init(&hdev->raw_q);
4077
4078 init_waitqueue_head(&hdev->req_wait_q);
4079
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004080 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004081
David Herrmannb1b813d2012-04-22 14:39:58 +02004082 hci_init_sysfs(hdev);
4083 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004084
4085 return hdev;
4086}
4087EXPORT_SYMBOL(hci_alloc_dev);
4088
4089/* Free HCI device */
4090void hci_free_dev(struct hci_dev *hdev)
4091{
David Herrmann9be0dab2012-04-22 14:39:57 +02004092 /* will free via device release */
4093 put_device(&hdev->dev);
4094}
4095EXPORT_SYMBOL(hci_free_dev);
4096
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097/* Register HCI device */
4098int hci_register_dev(struct hci_dev *hdev)
4099{
David Herrmannb1b813d2012-04-22 14:39:58 +02004100 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101
Marcel Holtmann74292d52014-07-06 15:50:27 +02004102 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 return -EINVAL;
4104
Mat Martineau08add512011-11-02 16:18:36 -07004105 /* Do not allow HCI_AMP devices to register at index 0,
4106 * so the index can be used as the AMP controller ID.
4107 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004108 switch (hdev->dev_type) {
4109 case HCI_BREDR:
4110 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4111 break;
4112 case HCI_AMP:
4113 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4114 break;
4115 default:
4116 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004118
Sasha Levin3df92b32012-05-27 22:36:56 +02004119 if (id < 0)
4120 return id;
4121
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122 sprintf(hdev->name, "hci%d", id);
4123 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004124
4125 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4126
Kees Cookd8537542013-07-03 15:04:57 -07004127 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4128 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004129 if (!hdev->workqueue) {
4130 error = -ENOMEM;
4131 goto err;
4132 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004133
Kees Cookd8537542013-07-03 15:04:57 -07004134 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4135 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004136 if (!hdev->req_workqueue) {
4137 destroy_workqueue(hdev->workqueue);
4138 error = -ENOMEM;
4139 goto err;
4140 }
4141
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004142 if (!IS_ERR_OR_NULL(bt_debugfs))
4143 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4144
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004145 dev_set_name(&hdev->dev, "%s", hdev->name);
4146
4147 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004148 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004149 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004151 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004152 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4153 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004154 if (hdev->rfkill) {
4155 if (rfkill_register(hdev->rfkill) < 0) {
4156 rfkill_destroy(hdev->rfkill);
4157 hdev->rfkill = NULL;
4158 }
4159 }
4160
Johan Hedberg5e130362013-09-13 08:58:17 +03004161 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4162 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4163
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004164 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004165 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004166
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004167 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004168 /* Assume BR/EDR support until proven otherwise (such as
4169 * through reading supported features during init.
4170 */
4171 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4172 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004173
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004174 write_lock(&hci_dev_list_lock);
4175 list_add(&hdev->list, &hci_dev_list);
4176 write_unlock(&hci_dev_list_lock);
4177
Marcel Holtmann4a964402014-07-02 19:10:33 +02004178 /* Devices that are marked for raw-only usage are unconfigured
4179 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004180 */
4181 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004182 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004183
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004185 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186
Johan Hedberg19202572013-01-14 22:33:51 +02004187 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004188
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004190
David Herrmann33ca9542011-10-08 14:58:49 +02004191err_wqueue:
4192 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004193 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004194err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004195 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004196
David Herrmann33ca9542011-10-08 14:58:49 +02004197 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198}
4199EXPORT_SYMBOL(hci_register_dev);
4200
4201/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004202void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203{
Sasha Levin3df92b32012-05-27 22:36:56 +02004204 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004205
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004206 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207
Johan Hovold94324962012-03-15 14:48:41 +01004208 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4209
Sasha Levin3df92b32012-05-27 22:36:56 +02004210 id = hdev->id;
4211
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004212 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004214 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
4216 hci_dev_do_close(hdev);
4217
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304218 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004219 kfree_skb(hdev->reassembly[i]);
4220
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004221 cancel_work_sync(&hdev->power_on);
4222
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004223 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004224 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4225 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004226 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004227 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004228 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004229 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004230
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004231 /* mgmt_index_removed should take care of emptying the
4232 * pending list */
4233 BUG_ON(!list_empty(&hdev->mgmt_pending));
4234
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 hci_notify(hdev, HCI_DEV_UNREG);
4236
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004237 if (hdev->rfkill) {
4238 rfkill_unregister(hdev->rfkill);
4239 rfkill_destroy(hdev->rfkill);
4240 }
4241
Johan Hedberg711eafe2014-08-08 09:32:52 +03004242 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004243
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004244 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004245
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004246 debugfs_remove_recursive(hdev->debugfs);
4247
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004248 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004249 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004250
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004251 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004252 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004253 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004254 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004255 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004256 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004257 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004258 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004259 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004260 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004261 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004262
David Herrmanndc946bd2012-01-07 15:47:24 +01004263 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004264
4265 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266}
4267EXPORT_SYMBOL(hci_unregister_dev);
4268
4269/* Suspend HCI device */
4270int hci_suspend_dev(struct hci_dev *hdev)
4271{
4272 hci_notify(hdev, HCI_DEV_SUSPEND);
4273 return 0;
4274}
4275EXPORT_SYMBOL(hci_suspend_dev);
4276
4277/* Resume HCI device */
4278int hci_resume_dev(struct hci_dev *hdev)
4279{
4280 hci_notify(hdev, HCI_DEV_RESUME);
4281 return 0;
4282}
4283EXPORT_SYMBOL(hci_resume_dev);
4284
Marcel Holtmann75e05692014-11-02 08:15:38 +01004285/* Reset HCI device */
4286int hci_reset_dev(struct hci_dev *hdev)
4287{
4288 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4289 struct sk_buff *skb;
4290
4291 skb = bt_skb_alloc(3, GFP_ATOMIC);
4292 if (!skb)
4293 return -ENOMEM;
4294
4295 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4296 memcpy(skb_put(skb, 3), hw_err, 3);
4297
4298 /* Send Hardware Error to upper stack */
4299 return hci_recv_frame(hdev, skb);
4300}
4301EXPORT_SYMBOL(hci_reset_dev);
4302
Marcel Holtmann76bca882009-11-18 00:40:39 +01004303/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004304int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004305{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004306 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004307 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004308 kfree_skb(skb);
4309 return -ENXIO;
4310 }
4311
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004312 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004313 bt_cb(skb)->incoming = 1;
4314
4315 /* Time stamp */
4316 __net_timestamp(skb);
4317
Marcel Holtmann76bca882009-11-18 00:40:39 +01004318 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004319 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004320
Marcel Holtmann76bca882009-11-18 00:40:39 +01004321 return 0;
4322}
4323EXPORT_SYMBOL(hci_recv_frame);
4324
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304325static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004326 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304327{
4328 int len = 0;
4329 int hlen = 0;
4330 int remain = count;
4331 struct sk_buff *skb;
4332 struct bt_skb_cb *scb;
4333
4334 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004335 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304336 return -EILSEQ;
4337
4338 skb = hdev->reassembly[index];
4339
4340 if (!skb) {
4341 switch (type) {
4342 case HCI_ACLDATA_PKT:
4343 len = HCI_MAX_FRAME_SIZE;
4344 hlen = HCI_ACL_HDR_SIZE;
4345 break;
4346 case HCI_EVENT_PKT:
4347 len = HCI_MAX_EVENT_SIZE;
4348 hlen = HCI_EVENT_HDR_SIZE;
4349 break;
4350 case HCI_SCODATA_PKT:
4351 len = HCI_MAX_SCO_SIZE;
4352 hlen = HCI_SCO_HDR_SIZE;
4353 break;
4354 }
4355
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004356 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304357 if (!skb)
4358 return -ENOMEM;
4359
4360 scb = (void *) skb->cb;
4361 scb->expect = hlen;
4362 scb->pkt_type = type;
4363
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304364 hdev->reassembly[index] = skb;
4365 }
4366
4367 while (count) {
4368 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004369 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304370
4371 memcpy(skb_put(skb, len), data, len);
4372
4373 count -= len;
4374 data += len;
4375 scb->expect -= len;
4376 remain = count;
4377
4378 switch (type) {
4379 case HCI_EVENT_PKT:
4380 if (skb->len == HCI_EVENT_HDR_SIZE) {
4381 struct hci_event_hdr *h = hci_event_hdr(skb);
4382 scb->expect = h->plen;
4383
4384 if (skb_tailroom(skb) < scb->expect) {
4385 kfree_skb(skb);
4386 hdev->reassembly[index] = NULL;
4387 return -ENOMEM;
4388 }
4389 }
4390 break;
4391
4392 case HCI_ACLDATA_PKT:
4393 if (skb->len == HCI_ACL_HDR_SIZE) {
4394 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4395 scb->expect = __le16_to_cpu(h->dlen);
4396
4397 if (skb_tailroom(skb) < scb->expect) {
4398 kfree_skb(skb);
4399 hdev->reassembly[index] = NULL;
4400 return -ENOMEM;
4401 }
4402 }
4403 break;
4404
4405 case HCI_SCODATA_PKT:
4406 if (skb->len == HCI_SCO_HDR_SIZE) {
4407 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4408 scb->expect = h->dlen;
4409
4410 if (skb_tailroom(skb) < scb->expect) {
4411 kfree_skb(skb);
4412 hdev->reassembly[index] = NULL;
4413 return -ENOMEM;
4414 }
4415 }
4416 break;
4417 }
4418
4419 if (scb->expect == 0) {
4420 /* Complete frame */
4421
4422 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004423 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304424
4425 hdev->reassembly[index] = NULL;
4426 return remain;
4427 }
4428 }
4429
4430 return remain;
4431}
4432
Suraj Sumangala99811512010-07-14 13:02:19 +05304433#define STREAM_REASSEMBLY 0
4434
4435int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4436{
4437 int type;
4438 int rem = 0;
4439
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004440 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304441 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4442
4443 if (!skb) {
4444 struct { char type; } *pkt;
4445
4446 /* Start of the frame */
4447 pkt = data;
4448 type = pkt->type;
4449
4450 data++;
4451 count--;
4452 } else
4453 type = bt_cb(skb)->pkt_type;
4454
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004455 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004456 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304457 if (rem < 0)
4458 return rem;
4459
4460 data += (count - rem);
4461 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004462 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304463
4464 return rem;
4465}
4466EXPORT_SYMBOL(hci_recv_stream_fragment);
4467
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468/* ---- Interface to upper protocols ---- */
4469
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470int hci_register_cb(struct hci_cb *cb)
4471{
4472 BT_DBG("%p name %s", cb, cb->name);
4473
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004474 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004476 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477
4478 return 0;
4479}
4480EXPORT_SYMBOL(hci_register_cb);
4481
4482int hci_unregister_cb(struct hci_cb *cb)
4483{
4484 BT_DBG("%p name %s", cb, cb->name);
4485
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004486 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004488 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489
4490 return 0;
4491}
4492EXPORT_SYMBOL(hci_unregister_cb);
4493
Marcel Holtmann51086992013-10-10 14:54:19 -07004494static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004496 int err;
4497
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004498 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004500 /* Time stamp */
4501 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004503 /* Send copy to monitor */
4504 hci_send_to_monitor(hdev, skb);
4505
4506 if (atomic_read(&hdev->promisc)) {
4507 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004508 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 }
4510
4511 /* Get rid of skb owner, prior to sending to the driver. */
4512 skb_orphan(skb);
4513
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004514 err = hdev->send(hdev, skb);
4515 if (err < 0) {
4516 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4517 kfree_skb(skb);
4518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519}
4520
Johan Hedberg3119ae92013-03-05 20:37:44 +02004521void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4522{
4523 skb_queue_head_init(&req->cmd_q);
4524 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004525 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004526}
4527
4528int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4529{
4530 struct hci_dev *hdev = req->hdev;
4531 struct sk_buff *skb;
4532 unsigned long flags;
4533
4534 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4535
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004536 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004537 * commands queued on the HCI request queue.
4538 */
4539 if (req->err) {
4540 skb_queue_purge(&req->cmd_q);
4541 return req->err;
4542 }
4543
Johan Hedberg3119ae92013-03-05 20:37:44 +02004544 /* Do not allow empty requests */
4545 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004546 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004547
4548 skb = skb_peek_tail(&req->cmd_q);
4549 bt_cb(skb)->req.complete = complete;
4550
4551 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4552 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4553 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4554
4555 queue_work(hdev->workqueue, &hdev->cmd_work);
4556
4557 return 0;
4558}
4559
Marcel Holtmann899de762014-07-11 05:51:58 +02004560bool hci_req_pending(struct hci_dev *hdev)
4561{
4562 return (hdev->req_status == HCI_REQ_PEND);
4563}
4564
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004565static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004566 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567{
4568 int len = HCI_COMMAND_HDR_SIZE + plen;
4569 struct hci_command_hdr *hdr;
4570 struct sk_buff *skb;
4571
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004573 if (!skb)
4574 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575
4576 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004577 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 hdr->plen = plen;
4579
4580 if (plen)
4581 memcpy(skb_put(skb, plen), param, plen);
4582
4583 BT_DBG("skb len %d", skb->len);
4584
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004585 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004586 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004587
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004588 return skb;
4589}
4590
4591/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004592int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4593 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004594{
4595 struct sk_buff *skb;
4596
4597 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4598
4599 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4600 if (!skb) {
4601 BT_ERR("%s no memory for command", hdev->name);
4602 return -ENOMEM;
4603 }
4604
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004605 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004606 * single-command requests.
4607 */
4608 bt_cb(skb)->req.start = true;
4609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004611 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612
4613 return 0;
4614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
Johan Hedberg71c76a12013-03-05 20:37:46 +02004616/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004617void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4618 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004619{
4620 struct hci_dev *hdev = req->hdev;
4621 struct sk_buff *skb;
4622
4623 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4624
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004625 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004626 * queueing the HCI command. We can simply return.
4627 */
4628 if (req->err)
4629 return;
4630
Johan Hedberg71c76a12013-03-05 20:37:46 +02004631 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4632 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004633 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4634 hdev->name, opcode);
4635 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004636 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004637 }
4638
4639 if (skb_queue_empty(&req->cmd_q))
4640 bt_cb(skb)->req.start = true;
4641
Johan Hedberg02350a72013-04-03 21:50:29 +03004642 bt_cb(skb)->req.event = event;
4643
Johan Hedberg71c76a12013-03-05 20:37:46 +02004644 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004645}
4646
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004647void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4648 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004649{
4650 hci_req_add_ev(req, opcode, plen, param, 0);
4651}
4652
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004654void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655{
4656 struct hci_command_hdr *hdr;
4657
4658 if (!hdev->sent_cmd)
4659 return NULL;
4660
4661 hdr = (void *) hdev->sent_cmd->data;
4662
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004663 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 return NULL;
4665
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004666 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667
4668 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4669}
4670
4671/* Send ACL data */
4672static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4673{
4674 struct hci_acl_hdr *hdr;
4675 int len = skb->len;
4676
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004677 skb_push(skb, HCI_ACL_HDR_SIZE);
4678 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004679 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004680 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4681 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682}
4683
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004684static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004685 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004687 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 struct hci_dev *hdev = conn->hdev;
4689 struct sk_buff *list;
4690
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004691 skb->len = skb_headlen(skb);
4692 skb->data_len = 0;
4693
4694 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004695
4696 switch (hdev->dev_type) {
4697 case HCI_BREDR:
4698 hci_add_acl_hdr(skb, conn->handle, flags);
4699 break;
4700 case HCI_AMP:
4701 hci_add_acl_hdr(skb, chan->handle, flags);
4702 break;
4703 default:
4704 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4705 return;
4706 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004707
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004708 list = skb_shinfo(skb)->frag_list;
4709 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710 /* Non fragmented */
4711 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4712
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004713 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 } else {
4715 /* Fragmented */
4716 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4717
4718 skb_shinfo(skb)->frag_list = NULL;
4719
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004720 /* Queue all fragments atomically. We need to use spin_lock_bh
4721 * here because of 6LoWPAN links, as there this function is
4722 * called from softirq and using normal spin lock could cause
4723 * deadlocks.
4724 */
4725 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004727 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004728
4729 flags &= ~ACL_START;
4730 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 do {
4732 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004733
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004734 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004735 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736
4737 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4738
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004739 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 } while (list);
4741
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004742 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004744}
4745
4746void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4747{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004748 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004749
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004750 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004751
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004752 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004754 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756
4757/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004758void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759{
4760 struct hci_dev *hdev = conn->hdev;
4761 struct hci_sco_hdr hdr;
4762
4763 BT_DBG("%s len %d", hdev->name, skb->len);
4764
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004765 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 hdr.dlen = skb->len;
4767
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004768 skb_push(skb, HCI_SCO_HDR_SIZE);
4769 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004770 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004772 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004773
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004775 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777
4778/* ---- HCI TX task (outgoing data) ---- */
4779
4780/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004781static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4782 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783{
4784 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004785 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004786 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004788 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004790
4791 rcu_read_lock();
4792
4793 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004794 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004796
4797 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4798 continue;
4799
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 num++;
4801
4802 if (c->sent < min) {
4803 min = c->sent;
4804 conn = c;
4805 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004806
4807 if (hci_conn_num(hdev, type) == num)
4808 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 }
4810
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004811 rcu_read_unlock();
4812
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004814 int cnt, q;
4815
4816 switch (conn->type) {
4817 case ACL_LINK:
4818 cnt = hdev->acl_cnt;
4819 break;
4820 case SCO_LINK:
4821 case ESCO_LINK:
4822 cnt = hdev->sco_cnt;
4823 break;
4824 case LE_LINK:
4825 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4826 break;
4827 default:
4828 cnt = 0;
4829 BT_ERR("Unknown link type");
4830 }
4831
4832 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 *quote = q ? q : 1;
4834 } else
4835 *quote = 0;
4836
4837 BT_DBG("conn %p quote %d", conn, *quote);
4838 return conn;
4839}
4840
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004841static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842{
4843 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004844 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845
Ville Tervobae1f5d92011-02-10 22:38:53 -03004846 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004848 rcu_read_lock();
4849
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004851 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004852 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004853 BT_ERR("%s killing stalled connection %pMR",
4854 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004855 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 }
4857 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004858
4859 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860}
4861
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004862static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4863 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004864{
4865 struct hci_conn_hash *h = &hdev->conn_hash;
4866 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004867 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004868 struct hci_conn *conn;
4869 int cnt, q, conn_num = 0;
4870
4871 BT_DBG("%s", hdev->name);
4872
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004873 rcu_read_lock();
4874
4875 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004876 struct hci_chan *tmp;
4877
4878 if (conn->type != type)
4879 continue;
4880
4881 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4882 continue;
4883
4884 conn_num++;
4885
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004886 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004887 struct sk_buff *skb;
4888
4889 if (skb_queue_empty(&tmp->data_q))
4890 continue;
4891
4892 skb = skb_peek(&tmp->data_q);
4893 if (skb->priority < cur_prio)
4894 continue;
4895
4896 if (skb->priority > cur_prio) {
4897 num = 0;
4898 min = ~0;
4899 cur_prio = skb->priority;
4900 }
4901
4902 num++;
4903
4904 if (conn->sent < min) {
4905 min = conn->sent;
4906 chan = tmp;
4907 }
4908 }
4909
4910 if (hci_conn_num(hdev, type) == conn_num)
4911 break;
4912 }
4913
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004914 rcu_read_unlock();
4915
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004916 if (!chan)
4917 return NULL;
4918
4919 switch (chan->conn->type) {
4920 case ACL_LINK:
4921 cnt = hdev->acl_cnt;
4922 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004923 case AMP_LINK:
4924 cnt = hdev->block_cnt;
4925 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004926 case SCO_LINK:
4927 case ESCO_LINK:
4928 cnt = hdev->sco_cnt;
4929 break;
4930 case LE_LINK:
4931 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4932 break;
4933 default:
4934 cnt = 0;
4935 BT_ERR("Unknown link type");
4936 }
4937
4938 q = cnt / num;
4939 *quote = q ? q : 1;
4940 BT_DBG("chan %p quote %d", chan, *quote);
4941 return chan;
4942}
4943
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004944static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4945{
4946 struct hci_conn_hash *h = &hdev->conn_hash;
4947 struct hci_conn *conn;
4948 int num = 0;
4949
4950 BT_DBG("%s", hdev->name);
4951
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004952 rcu_read_lock();
4953
4954 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004955 struct hci_chan *chan;
4956
4957 if (conn->type != type)
4958 continue;
4959
4960 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4961 continue;
4962
4963 num++;
4964
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004965 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004966 struct sk_buff *skb;
4967
4968 if (chan->sent) {
4969 chan->sent = 0;
4970 continue;
4971 }
4972
4973 if (skb_queue_empty(&chan->data_q))
4974 continue;
4975
4976 skb = skb_peek(&chan->data_q);
4977 if (skb->priority >= HCI_PRIO_MAX - 1)
4978 continue;
4979
4980 skb->priority = HCI_PRIO_MAX - 1;
4981
4982 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004983 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004984 }
4985
4986 if (hci_conn_num(hdev, type) == num)
4987 break;
4988 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004989
4990 rcu_read_unlock();
4991
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004992}
4993
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004994static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4995{
4996 /* Calculate count of blocks used by this packet */
4997 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4998}
4999
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005000static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001{
Marcel Holtmann4a964402014-07-02 19:10:33 +02005002 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 /* ACL tx timeout must be longer than maximum
5004 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005005 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005006 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005007 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005009}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005011static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005012{
5013 unsigned int cnt = hdev->acl_cnt;
5014 struct hci_chan *chan;
5015 struct sk_buff *skb;
5016 int quote;
5017
5018 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005019
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005020 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005021 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005022 u32 priority = (skb_peek(&chan->data_q))->priority;
5023 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005024 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005025 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005026
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005027 /* Stop if priority has changed */
5028 if (skb->priority < priority)
5029 break;
5030
5031 skb = skb_dequeue(&chan->data_q);
5032
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005033 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005034 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005035
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005036 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037 hdev->acl_last_tx = jiffies;
5038
5039 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005040 chan->sent++;
5041 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042 }
5043 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005044
5045 if (cnt != hdev->acl_cnt)
5046 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047}
5048
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005049static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005050{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005051 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005052 struct hci_chan *chan;
5053 struct sk_buff *skb;
5054 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005055 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005056
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005057 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005058
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005059 BT_DBG("%s", hdev->name);
5060
5061 if (hdev->dev_type == HCI_AMP)
5062 type = AMP_LINK;
5063 else
5064 type = ACL_LINK;
5065
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005066 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005067 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005068 u32 priority = (skb_peek(&chan->data_q))->priority;
5069 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5070 int blocks;
5071
5072 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005073 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005074
5075 /* Stop if priority has changed */
5076 if (skb->priority < priority)
5077 break;
5078
5079 skb = skb_dequeue(&chan->data_q);
5080
5081 blocks = __get_blocks(hdev, skb);
5082 if (blocks > hdev->block_cnt)
5083 return;
5084
5085 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005086 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005087
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005088 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005089 hdev->acl_last_tx = jiffies;
5090
5091 hdev->block_cnt -= blocks;
5092 quote -= blocks;
5093
5094 chan->sent += blocks;
5095 chan->conn->sent += blocks;
5096 }
5097 }
5098
5099 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005100 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005101}
5102
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005103static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005104{
5105 BT_DBG("%s", hdev->name);
5106
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005107 /* No ACL link over BR/EDR controller */
5108 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5109 return;
5110
5111 /* No AMP link over AMP controller */
5112 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005113 return;
5114
5115 switch (hdev->flow_ctl_mode) {
5116 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5117 hci_sched_acl_pkt(hdev);
5118 break;
5119
5120 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5121 hci_sched_acl_blk(hdev);
5122 break;
5123 }
5124}
5125
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005127static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128{
5129 struct hci_conn *conn;
5130 struct sk_buff *skb;
5131 int quote;
5132
5133 BT_DBG("%s", hdev->name);
5134
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005135 if (!hci_conn_num(hdev, SCO_LINK))
5136 return;
5137
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5139 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5140 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005141 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142
5143 conn->sent++;
5144 if (conn->sent == ~0)
5145 conn->sent = 0;
5146 }
5147 }
5148}
5149
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005150static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005151{
5152 struct hci_conn *conn;
5153 struct sk_buff *skb;
5154 int quote;
5155
5156 BT_DBG("%s", hdev->name);
5157
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005158 if (!hci_conn_num(hdev, ESCO_LINK))
5159 return;
5160
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005161 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5162 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005163 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5164 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005165 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005166
5167 conn->sent++;
5168 if (conn->sent == ~0)
5169 conn->sent = 0;
5170 }
5171 }
5172}
5173
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005174static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005175{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005176 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005177 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005178 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005179
5180 BT_DBG("%s", hdev->name);
5181
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005182 if (!hci_conn_num(hdev, LE_LINK))
5183 return;
5184
Marcel Holtmann4a964402014-07-02 19:10:33 +02005185 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005186 /* LE tx timeout must be longer than maximum
5187 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005188 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005189 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005190 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005191 }
5192
5193 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005194 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005195 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005196 u32 priority = (skb_peek(&chan->data_q))->priority;
5197 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005198 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005199 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005200
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005201 /* Stop if priority has changed */
5202 if (skb->priority < priority)
5203 break;
5204
5205 skb = skb_dequeue(&chan->data_q);
5206
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005207 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005208 hdev->le_last_tx = jiffies;
5209
5210 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005211 chan->sent++;
5212 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005213 }
5214 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005215
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005216 if (hdev->le_pkts)
5217 hdev->le_cnt = cnt;
5218 else
5219 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005220
5221 if (cnt != tmp)
5222 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005223}
5224
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005225static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005227 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 struct sk_buff *skb;
5229
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005230 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005231 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232
Marcel Holtmann52de5992013-09-03 18:08:38 -07005233 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5234 /* Schedule queues and send stuff to HCI driver */
5235 hci_sched_acl(hdev);
5236 hci_sched_sco(hdev);
5237 hci_sched_esco(hdev);
5238 hci_sched_le(hdev);
5239 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005240
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 /* Send next queued raw (unknown type) packet */
5242 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005243 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244}
5245
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005246/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247
5248/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005249static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250{
5251 struct hci_acl_hdr *hdr = (void *) skb->data;
5252 struct hci_conn *conn;
5253 __u16 handle, flags;
5254
5255 skb_pull(skb, HCI_ACL_HDR_SIZE);
5256
5257 handle = __le16_to_cpu(hdr->handle);
5258 flags = hci_flags(handle);
5259 handle = hci_handle(handle);
5260
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005261 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005262 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263
5264 hdev->stat.acl_rx++;
5265
5266 hci_dev_lock(hdev);
5267 conn = hci_conn_hash_lookup_handle(hdev, handle);
5268 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005269
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005271 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005272
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005274 l2cap_recv_acldata(conn, skb, flags);
5275 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005277 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005278 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279 }
5280
5281 kfree_skb(skb);
5282}
5283
5284/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005285static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286{
5287 struct hci_sco_hdr *hdr = (void *) skb->data;
5288 struct hci_conn *conn;
5289 __u16 handle;
5290
5291 skb_pull(skb, HCI_SCO_HDR_SIZE);
5292
5293 handle = __le16_to_cpu(hdr->handle);
5294
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005295 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296
5297 hdev->stat.sco_rx++;
5298
5299 hci_dev_lock(hdev);
5300 conn = hci_conn_hash_lookup_handle(hdev, handle);
5301 hci_dev_unlock(hdev);
5302
5303 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005305 sco_recv_scodata(conn, skb);
5306 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005308 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005309 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 }
5311
5312 kfree_skb(skb);
5313}
5314
Johan Hedberg9238f362013-03-05 20:37:48 +02005315static bool hci_req_is_complete(struct hci_dev *hdev)
5316{
5317 struct sk_buff *skb;
5318
5319 skb = skb_peek(&hdev->cmd_q);
5320 if (!skb)
5321 return true;
5322
5323 return bt_cb(skb)->req.start;
5324}
5325
Johan Hedberg42c6b122013-03-05 20:37:49 +02005326static void hci_resend_last(struct hci_dev *hdev)
5327{
5328 struct hci_command_hdr *sent;
5329 struct sk_buff *skb;
5330 u16 opcode;
5331
5332 if (!hdev->sent_cmd)
5333 return;
5334
5335 sent = (void *) hdev->sent_cmd->data;
5336 opcode = __le16_to_cpu(sent->opcode);
5337 if (opcode == HCI_OP_RESET)
5338 return;
5339
5340 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5341 if (!skb)
5342 return;
5343
5344 skb_queue_head(&hdev->cmd_q, skb);
5345 queue_work(hdev->workqueue, &hdev->cmd_work);
5346}
5347
Johan Hedberg9238f362013-03-05 20:37:48 +02005348void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5349{
5350 hci_req_complete_t req_complete = NULL;
5351 struct sk_buff *skb;
5352 unsigned long flags;
5353
5354 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5355
Johan Hedberg42c6b122013-03-05 20:37:49 +02005356 /* If the completed command doesn't match the last one that was
5357 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005358 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005359 if (!hci_sent_cmd_data(hdev, opcode)) {
5360 /* Some CSR based controllers generate a spontaneous
5361 * reset complete event during init and any pending
5362 * command will never be completed. In such a case we
5363 * need to resend whatever was the last sent
5364 * command.
5365 */
5366 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5367 hci_resend_last(hdev);
5368
Johan Hedberg9238f362013-03-05 20:37:48 +02005369 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005370 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005371
5372 /* If the command succeeded and there's still more commands in
5373 * this request the request is not yet complete.
5374 */
5375 if (!status && !hci_req_is_complete(hdev))
5376 return;
5377
5378 /* If this was the last command in a request the complete
5379 * callback would be found in hdev->sent_cmd instead of the
5380 * command queue (hdev->cmd_q).
5381 */
5382 if (hdev->sent_cmd) {
5383 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005384
5385 if (req_complete) {
5386 /* We must set the complete callback to NULL to
5387 * avoid calling the callback more than once if
5388 * this function gets called again.
5389 */
5390 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5391
Johan Hedberg9238f362013-03-05 20:37:48 +02005392 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005393 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005394 }
5395
5396 /* Remove all pending commands belonging to this request */
5397 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5398 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5399 if (bt_cb(skb)->req.start) {
5400 __skb_queue_head(&hdev->cmd_q, skb);
5401 break;
5402 }
5403
5404 req_complete = bt_cb(skb)->req.complete;
5405 kfree_skb(skb);
5406 }
5407 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5408
5409call_complete:
5410 if (req_complete)
5411 req_complete(hdev, status);
5412}
5413
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005414static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005416 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417 struct sk_buff *skb;
5418
5419 BT_DBG("%s", hdev->name);
5420
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005422 /* Send copy to monitor */
5423 hci_send_to_monitor(hdev, skb);
5424
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 if (atomic_read(&hdev->promisc)) {
5426 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005427 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 }
5429
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005430 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431 kfree_skb(skb);
5432 continue;
5433 }
5434
5435 if (test_bit(HCI_INIT, &hdev->flags)) {
5436 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005437 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 case HCI_ACLDATA_PKT:
5439 case HCI_SCODATA_PKT:
5440 kfree_skb(skb);
5441 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 }
5444
5445 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005446 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005448 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 hci_event_packet(hdev, skb);
5450 break;
5451
5452 case HCI_ACLDATA_PKT:
5453 BT_DBG("%s ACL data packet", hdev->name);
5454 hci_acldata_packet(hdev, skb);
5455 break;
5456
5457 case HCI_SCODATA_PKT:
5458 BT_DBG("%s SCO data packet", hdev->name);
5459 hci_scodata_packet(hdev, skb);
5460 break;
5461
5462 default:
5463 kfree_skb(skb);
5464 break;
5465 }
5466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467}
5468
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005469static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005471 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 struct sk_buff *skb;
5473
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005474 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5475 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005478 if (atomic_read(&hdev->cmd_cnt)) {
5479 skb = skb_dequeue(&hdev->cmd_q);
5480 if (!skb)
5481 return;
5482
Wei Yongjun7585b972009-02-25 18:29:52 +08005483 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005485 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005486 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005488 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005489 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005490 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005491 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005492 schedule_delayed_work(&hdev->cmd_timer,
5493 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494 } else {
5495 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005496 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005497 }
5498 }
5499}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005500
5501void hci_req_add_le_scan_disable(struct hci_request *req)
5502{
5503 struct hci_cp_le_set_scan_enable cp;
5504
5505 memset(&cp, 0, sizeof(cp));
5506 cp.enable = LE_SCAN_DISABLE;
5507 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5508}
Andre Guedesa4790db2014-02-26 20:21:47 -03005509
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005510static void add_to_white_list(struct hci_request *req,
5511 struct hci_conn_params *params)
5512{
5513 struct hci_cp_le_add_to_white_list cp;
5514
5515 cp.bdaddr_type = params->addr_type;
5516 bacpy(&cp.bdaddr, &params->addr);
5517
5518 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5519}
5520
5521static u8 update_white_list(struct hci_request *req)
5522{
5523 struct hci_dev *hdev = req->hdev;
5524 struct hci_conn_params *params;
5525 struct bdaddr_list *b;
5526 uint8_t white_list_entries = 0;
5527
5528 /* Go through the current white list programmed into the
5529 * controller one by one and check if that address is still
5530 * in the list of pending connections or list of devices to
5531 * report. If not present in either list, then queue the
5532 * command to remove it from the controller.
5533 */
5534 list_for_each_entry(b, &hdev->le_white_list, list) {
5535 struct hci_cp_le_del_from_white_list cp;
5536
5537 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5538 &b->bdaddr, b->bdaddr_type) ||
5539 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5540 &b->bdaddr, b->bdaddr_type)) {
5541 white_list_entries++;
5542 continue;
5543 }
5544
5545 cp.bdaddr_type = b->bdaddr_type;
5546 bacpy(&cp.bdaddr, &b->bdaddr);
5547
5548 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5549 sizeof(cp), &cp);
5550 }
5551
5552 /* Since all no longer valid white list entries have been
5553 * removed, walk through the list of pending connections
5554 * and ensure that any new device gets programmed into
5555 * the controller.
5556 *
5557 * If the list of the devices is larger than the list of
5558 * available white list entries in the controller, then
5559 * just abort and return filer policy value to not use the
5560 * white list.
5561 */
5562 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5563 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5564 &params->addr, params->addr_type))
5565 continue;
5566
5567 if (white_list_entries >= hdev->le_white_list_size) {
5568 /* Select filter policy to accept all advertising */
5569 return 0x00;
5570 }
5571
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005572 if (hci_find_irk_by_addr(hdev, &params->addr,
5573 params->addr_type)) {
5574 /* White list can not be used with RPAs */
5575 return 0x00;
5576 }
5577
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005578 white_list_entries++;
5579 add_to_white_list(req, params);
5580 }
5581
5582 /* After adding all new pending connections, walk through
5583 * the list of pending reports and also add these to the
5584 * white list if there is still space.
5585 */
5586 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5587 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5588 &params->addr, params->addr_type))
5589 continue;
5590
5591 if (white_list_entries >= hdev->le_white_list_size) {
5592 /* Select filter policy to accept all advertising */
5593 return 0x00;
5594 }
5595
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005596 if (hci_find_irk_by_addr(hdev, &params->addr,
5597 params->addr_type)) {
5598 /* White list can not be used with RPAs */
5599 return 0x00;
5600 }
5601
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005602 white_list_entries++;
5603 add_to_white_list(req, params);
5604 }
5605
5606 /* Select filter policy to use white list */
5607 return 0x01;
5608}
5609
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005610void hci_req_add_le_passive_scan(struct hci_request *req)
5611{
5612 struct hci_cp_le_set_scan_param param_cp;
5613 struct hci_cp_le_set_scan_enable enable_cp;
5614 struct hci_dev *hdev = req->hdev;
5615 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005616 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005617
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005618 /* Set require_privacy to false since no SCAN_REQ are send
5619 * during passive scanning. Not using an unresolvable address
5620 * here is important so that peer devices using direct
5621 * advertising with our address will be correctly reported
5622 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005623 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005624 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005625 return;
5626
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005627 /* Adding or removing entries from the white list must
5628 * happen before enabling scanning. The controller does
5629 * not allow white list modification while scanning.
5630 */
5631 filter_policy = update_white_list(req);
5632
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005633 memset(&param_cp, 0, sizeof(param_cp));
5634 param_cp.type = LE_SCAN_PASSIVE;
5635 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5636 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5637 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005638 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005639 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5640 &param_cp);
5641
5642 memset(&enable_cp, 0, sizeof(enable_cp));
5643 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005644 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005645 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5646 &enable_cp);
5647}
5648
Andre Guedesa4790db2014-02-26 20:21:47 -03005649static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5650{
5651 if (status)
5652 BT_DBG("HCI request failed to update background scanning: "
5653 "status 0x%2.2x", status);
5654}
5655
5656/* This function controls the background scanning based on hdev->pend_le_conns
5657 * list. If there are pending LE connection we start the background scanning,
5658 * otherwise we stop it.
5659 *
5660 * This function requires the caller holds hdev->lock.
5661 */
5662void hci_update_background_scan(struct hci_dev *hdev)
5663{
Andre Guedesa4790db2014-02-26 20:21:47 -03005664 struct hci_request req;
5665 struct hci_conn *conn;
5666 int err;
5667
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005668 if (!test_bit(HCI_UP, &hdev->flags) ||
5669 test_bit(HCI_INIT, &hdev->flags) ||
5670 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005671 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005672 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005673 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005674 return;
5675
Johan Hedberga70f4b52014-07-07 15:19:50 +03005676 /* No point in doing scanning if LE support hasn't been enabled */
5677 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5678 return;
5679
Johan Hedbergae23ada2014-07-07 13:24:59 +03005680 /* If discovery is active don't interfere with it */
5681 if (hdev->discovery.state != DISCOVERY_STOPPED)
5682 return;
5683
Andre Guedesa4790db2014-02-26 20:21:47 -03005684 hci_req_init(&req, hdev);
5685
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005686 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005687 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005688 /* If there is no pending LE connections or devices
5689 * to be scanned for, we should stop the background
5690 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005691 */
5692
5693 /* If controller is not scanning we are done. */
5694 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5695 return;
5696
5697 hci_req_add_le_scan_disable(&req);
5698
5699 BT_DBG("%s stopping background scanning", hdev->name);
5700 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005701 /* If there is at least one pending LE connection, we should
5702 * keep the background scan running.
5703 */
5704
Andre Guedesa4790db2014-02-26 20:21:47 -03005705 /* If controller is connecting, we should not start scanning
5706 * since some controllers are not able to scan and connect at
5707 * the same time.
5708 */
5709 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5710 if (conn)
5711 return;
5712
Andre Guedes4340a122014-03-10 18:26:24 -03005713 /* If controller is currently scanning, we stop it to ensure we
5714 * don't miss any advertising (due to duplicates filter).
5715 */
5716 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5717 hci_req_add_le_scan_disable(&req);
5718
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005719 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005720
5721 BT_DBG("%s starting background scanning", hdev->name);
5722 }
5723
5724 err = hci_req_run(&req, update_background_scan_complete);
5725 if (err)
5726 BT_ERR("Failed to run HCI request: err %d", err);
5727}
Johan Hedberg432df052014-08-01 11:13:31 +03005728
Johan Hedberg22f433d2014-08-01 11:13:32 +03005729static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5730{
5731 struct bdaddr_list *b;
5732
5733 list_for_each_entry(b, &hdev->whitelist, list) {
5734 struct hci_conn *conn;
5735
5736 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5737 if (!conn)
5738 return true;
5739
5740 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5741 return true;
5742 }
5743
5744 return false;
5745}
5746
Johan Hedberg432df052014-08-01 11:13:31 +03005747void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5748{
5749 u8 scan;
5750
5751 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5752 return;
5753
5754 if (!hdev_is_powered(hdev))
5755 return;
5756
5757 if (mgmt_powering_down(hdev))
5758 return;
5759
5760 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005761 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005762 scan = SCAN_PAGE;
5763 else
5764 scan = SCAN_DISABLED;
5765
5766 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5767 return;
5768
5769 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5770 scan |= SCAN_INQUIRY;
5771
5772 if (req)
5773 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5774 else
5775 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5776}