blob: 93f92a08550694dbe3791109631ab4058afe13ca [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Johan Hedberg858cdc72014-10-16 10:45:31 +0200409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700478 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700537 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700565 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
Marcel Holtmannac345812014-02-23 12:44:25 -0800641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200644 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800650
Johan Hedberga1f4c312014-02-27 14:05:41 +0200651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800652 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700720{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700747 return -EINVAL;
748
Marcel Holtmann111902f2014-06-21 04:53:17 +0200749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800750 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700751
Marcel Holtmann111902f2014-06-21 04:53:17 +0200752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800753
754 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700755}
756
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
Marcel Holtmann92202182013-10-18 16:38:10 -0700763
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
Marcel Holtmann3698d702014-02-18 21:54:49 -0800789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200792 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800793
Johan Hedbergadae20c2014-11-13 14:37:48 +0200794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200800 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200821 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700822
Johan Hedberg970d0f12014-11-13 14:37:47 +0200823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200829 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700854 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700882 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
Marcel Holtmannf1649572014-06-30 12:34:38 +0200930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
Georg Lukas729a1052014-07-26 13:59:58 +0200986static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200987{
Georg Lukas729a1052014-07-26 13:59:58 +0200988 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200989
Georg Lukas729a1052014-07-26 13:59:58 +0200990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200991 return -EINVAL;
992
Andre Guedes7d474e02014-02-26 20:21:54 -0300993 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
Georg Lukas729a1052014-07-26 13:59:58 +02001000static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001001{
Georg Lukas729a1052014-07-26 13:59:58 +02001002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009}
1010
Georg Lukas729a1052014-07-26 13:59:58 +02001011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001015{
Georg Lukas729a1052014-07-26 13:59:58 +02001016 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017
Georg Lukas729a1052014-07-26 13:59:58 +02001018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return -EINVAL;
1020
Georg Lukas729a1052014-07-26 13:59:58 +02001021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001024
Georg Lukas729a1052014-07-26 13:59:58 +02001025 return 0;
1026}
Andre Guedes7d474e02014-02-26 20:21:54 -03001027
Georg Lukas729a1052014-07-26 13:59:58 +02001028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001031
Georg Lukas729a1052014-07-26 13:59:58 +02001032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035
Georg Lukas729a1052014-07-26 13:59:58 +02001036 return 0;
1037}
Andre Guedes7d474e02014-02-26 20:21:54 -03001038
Georg Lukas729a1052014-07-26 13:59:58 +02001039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001041
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001042static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001043{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001045 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001046 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001047
Andre Guedes7d474e02014-02-26 20:21:54 -03001048 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001053 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001056
Andre Guedes7d474e02014-02-26 20:21:54 -03001057 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001058}
1059
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001060static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001061{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001062 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001063}
1064
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001067 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* ---- HCI requests ---- */
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
Fengguang Wu77a63e02013-04-20 16:24:31 +03001096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
Johan Hedberg75e84b72013-04-02 13:35:04 +03001127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001152 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163
1164 hdev->req_status = HCI_REQ_PEND;
1165
Johan Hedberg75e84b72013-04-02 13:35:04 +03001166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
Chan-yeol Park039fada2014-10-31 14:23:06 +09001169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001172 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001173 return ERR_PTR(err);
1174 }
1175
Johan Hedberg75e84b72013-04-02 13:35:04 +03001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001209 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_init(&req, hdev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hdev->req_status = HCI_REQ_PEND;
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001232
Chan-yeol Park039fada2014-10-31 14:23:06 +09001233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
Chan-yeol Park039fada2014-10-31 14:23:06 +09001240 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001241 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001242
Andre Guedes920c8302013-03-08 11:20:15 -03001243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 */
Andre Guedes920c8302013-03-08 11:20:15 -03001248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001252 }
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
1390 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402
1403 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001404 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001406
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001414}
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001418 struct hci_dev *hdev = req->hdev;
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
1423 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001434
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438}
1439
1440static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441{
1442 if (lmp_ext_inq_capable(hdev))
1443 return 0x02;
1444
1445 if (lmp_inq_rssi_capable(hdev))
1446 return 0x01;
1447
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1450 return 0x01;
1451
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454 return 0x01;
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456 return 0x01;
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458 return 0x01;
1459 }
1460
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1463 return 0x01;
1464
1465 return 0x00;
1466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
1470 u8 mode;
1471
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475}
1476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 struct hci_dev *hdev = req->hdev;
1480
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1484 */
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1489 */
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001499 } else {
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001509
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552}
1553
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 struct hci_dev *hdev = req->hdev;
1557
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001560 else
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1568 */
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571
1572 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1578 */
1579 hdev->max_page = 0x01;
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 } else {
1586 struct hci_cp_write_eir cp;
1587
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1590
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001592 }
1593 }
1594
1595 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597
1598 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001600
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1603
1604 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607 }
1608
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613 }
1614}
1615
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001617{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1621
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1630
1631 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633}
1634
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001636{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001638 struct hci_cp_write_le_host_supported cp;
1639
Johan Hedbergc73eee92013-04-19 18:35:21 +03001640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1642 return;
1643
Johan Hedberg2177bab2013-03-05 20:37:43 +02001644 memset(&cp, 0, sizeof(cp));
1645
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001648 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001649 }
1650
1651 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001654}
1655
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656static void hci_set_event_mask_page_2(struct hci_request *req)
1657{
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1663 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001664 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1669 }
1670
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1673 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001674 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1679 }
1680
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001681 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001683 events[2] |= 0x80;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001689{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001690 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001691 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001692
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001693 hci_setup_event_mask(req);
1694
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1698 *
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001703 *
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001707 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001710 struct hci_cp_delete_stored_link_key cp;
1711
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715 sizeof(cp), &cp);
1716 }
1717
Johan Hedberg2177bab2013-03-05 20:37:43 +02001718 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001720
Andre Guedes9193c6e2014-07-01 18:10:09 -03001721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
Marcel Holtmann4b71bba2014-12-05 16:20:12 +01001738 /* If the controller supports Extended Scanner Filter
1739 * Policies, enable the correspondig event.
1740 */
1741 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742 events[1] |= 0x04; /* LE Direct Advertising
1743 * Report
1744 */
1745
Marcel Holtmann5a34bd52014-12-05 16:20:15 +01001746 /* If the controller supports the LE Read Local P-256
1747 * Public Key command, enable the corresponding event.
1748 */
1749 if (hdev->commands[34] & 0x02)
1750 events[0] |= 0x80; /* LE Read Local P-256
1751 * Public Key Complete
1752 */
1753
1754 /* If the controller supports the LE Generate DHKey
1755 * command, enable the corresponding event.
1756 */
1757 if (hdev->commands[34] & 0x04)
1758 events[1] |= 0x01; /* LE Generate DHKey Complete */
1759
Andre Guedes9193c6e2014-07-01 18:10:09 -03001760 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1761 events);
1762
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001763 if (hdev->commands[25] & 0x40) {
1764 /* Read LE Advertising Channel TX Power */
1765 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1766 }
1767
Johan Hedberg42c6b122013-03-05 20:37:49 +02001768 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001769 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001770
1771 /* Read features beyond page 1 if available */
1772 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1773 struct hci_cp_read_local_ext_features cp;
1774
1775 cp.page = p;
1776 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1777 sizeof(cp), &cp);
1778 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001779}
1780
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001781static void hci_init4_req(struct hci_request *req, unsigned long opt)
1782{
1783 struct hci_dev *hdev = req->hdev;
1784
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001785 /* Set event mask page 2 if the HCI command for it is supported */
1786 if (hdev->commands[22] & 0x04)
1787 hci_set_event_mask_page_2(req);
1788
Marcel Holtmann109e3192014-07-23 19:24:56 +02001789 /* Read local codec list if the HCI command is supported */
1790 if (hdev->commands[29] & 0x20)
1791 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1792
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001793 /* Get MWS transport configuration if the HCI command is supported */
1794 if (hdev->commands[30] & 0x08)
1795 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1796
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001797 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001798 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001799 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001800
1801 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001802 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001803 u8 support = 0x01;
1804 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805 sizeof(support), &support);
1806 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001807}
1808
Johan Hedberg2177bab2013-03-05 20:37:43 +02001809static int __hci_init(struct hci_dev *hdev)
1810{
1811 int err;
1812
1813 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1814 if (err < 0)
1815 return err;
1816
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001817 /* The Device Under Test (DUT) mode is special and available for
1818 * all controller types. So just create it early on.
1819 */
1820 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1821 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1822 &dut_mode_fops);
1823 }
1824
Johan Hedberg2177bab2013-03-05 20:37:43 +02001825 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1826 * BR/EDR/LE type controllers. AMP controllers only need the
1827 * first stage init.
1828 */
1829 if (hdev->dev_type != HCI_BREDR)
1830 return 0;
1831
1832 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1833 if (err < 0)
1834 return err;
1835
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001836 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1837 if (err < 0)
1838 return err;
1839
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001840 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1841 if (err < 0)
1842 return err;
1843
1844 /* Only create debugfs entries during the initial setup
1845 * phase and not every time the controller gets powered on.
1846 */
1847 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1848 return 0;
1849
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001850 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1851 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001852 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853 &hdev->manufacturer);
1854 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001856 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1857 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001858 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1859 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001860 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1861
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001862 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863 &conn_info_min_age_fops);
1864 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865 &conn_info_max_age_fops);
1866
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001867 if (lmp_bredr_capable(hdev)) {
1868 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001870 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001872 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001874 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001876 }
1877
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001878 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001879 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1880 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001881 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001883 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001885 if (lmp_le_capable(hdev))
1886 debugfs_create_file("force_lesc_support", 0644,
1887 hdev->debugfs, hdev,
1888 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001889 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001890
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001891 if (lmp_sniff_capable(hdev)) {
1892 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893 hdev, &idle_timeout_fops);
1894 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895 hdev, &sniff_min_interval_fops);
1896 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897 hdev, &sniff_max_interval_fops);
1898 }
1899
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001900 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001901 debugfs_create_file("identity", 0400, hdev->debugfs,
1902 hdev, &identity_fops);
1903 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001905 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001907 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908 hdev, &static_address_fops);
1909
1910 /* For controllers with a public address, provide a debug
1911 * option to force the usage of the configured static
1912 * address. By default the public address is used.
1913 */
1914 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915 debugfs_create_file("force_static_address", 0644,
1916 hdev->debugfs, hdev,
1917 &force_static_address_fops);
1918
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001919 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001921 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1922 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001923 debugfs_create_file("identity_resolving_keys", 0400,
1924 hdev->debugfs, hdev,
1925 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001926 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001928 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929 hdev, &conn_min_interval_fops);
1930 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001932 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001934 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001936 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001938 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939 hdev, &adv_min_interval_fops);
1940 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001942 debugfs_create_u16("discov_interleaved_timeout", 0644,
1943 hdev->debugfs,
1944 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001945
Johan Hedberg711eafe2014-08-08 09:32:52 +03001946 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001947 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001948
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001949 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001950}
1951
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001952static void hci_init0_req(struct hci_request *req, unsigned long opt)
1953{
1954 struct hci_dev *hdev = req->hdev;
1955
1956 BT_DBG("%s %ld", hdev->name, opt);
1957
1958 /* Reset */
1959 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1960 hci_reset_req(req, 0);
1961
1962 /* Read Local Version */
1963 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1964
1965 /* Read BD Address */
1966 if (hdev->set_bdaddr)
1967 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1968}
1969
1970static int __hci_unconf_init(struct hci_dev *hdev)
1971{
1972 int err;
1973
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975 return 0;
1976
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001977 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1978 if (err < 0)
1979 return err;
1980
1981 return 0;
1982}
1983
Johan Hedberg42c6b122013-03-05 20:37:49 +02001984static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 __u8 scan = opt;
1987
Johan Hedberg42c6b122013-03-05 20:37:49 +02001988 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
1990 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001991 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992}
1993
Johan Hedberg42c6b122013-03-05 20:37:49 +02001994static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995{
1996 __u8 auth = opt;
1997
Johan Hedberg42c6b122013-03-05 20:37:49 +02001998 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002001 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
2003
Johan Hedberg42c6b122013-03-05 20:37:49 +02002004static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
2006 __u8 encrypt = opt;
2007
Johan Hedberg42c6b122013-03-05 20:37:49 +02002008 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002010 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002011 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012}
2013
Johan Hedberg42c6b122013-03-05 20:37:49 +02002014static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002015{
2016 __le16 policy = cpu_to_le16(opt);
2017
Johan Hedberg42c6b122013-03-05 20:37:49 +02002018 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002019
2020 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002021 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002022}
2023
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002024/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 * Device is held on return. */
2026struct hci_dev *hci_dev_get(int index)
2027{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002028 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
2030 BT_DBG("%d", index);
2031
2032 if (index < 0)
2033 return NULL;
2034
2035 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002036 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 if (d->id == index) {
2038 hdev = hci_dev_hold(d);
2039 break;
2040 }
2041 }
2042 read_unlock(&hci_dev_list_lock);
2043 return hdev;
2044}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
2046/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002047
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002048bool hci_discovery_active(struct hci_dev *hdev)
2049{
2050 struct discovery_state *discov = &hdev->discovery;
2051
Andre Guedes6fbe1952012-02-03 17:47:58 -03002052 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002053 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002054 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002055 return true;
2056
Andre Guedes6fbe1952012-02-03 17:47:58 -03002057 default:
2058 return false;
2059 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002060}
2061
Johan Hedbergff9ef572012-01-04 14:23:45 +02002062void hci_discovery_set_state(struct hci_dev *hdev, int state)
2063{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002064 int old_state = hdev->discovery.state;
2065
Johan Hedbergff9ef572012-01-04 14:23:45 +02002066 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2067
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002068 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002069 return;
2070
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002071 hdev->discovery.state = state;
2072
Johan Hedbergff9ef572012-01-04 14:23:45 +02002073 switch (state) {
2074 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002075 hci_update_background_scan(hdev);
2076
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002077 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002078 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002079 break;
2080 case DISCOVERY_STARTING:
2081 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002082 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002083 mgmt_discovering(hdev, 1);
2084 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002085 case DISCOVERY_RESOLVING:
2086 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002087 case DISCOVERY_STOPPING:
2088 break;
2089 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002090}
2091
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002092void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Johan Hedberg30883512012-01-04 14:16:21 +02002094 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002095 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
Johan Hedberg561aafb2012-01-04 13:31:59 +02002097 list_for_each_entry_safe(p, n, &cache->all, all) {
2098 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002099 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002101
2102 INIT_LIST_HEAD(&cache->unknown);
2103 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002106struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2107 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Johan Hedberg30883512012-01-04 14:16:21 +02002109 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct inquiry_entry *e;
2111
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002112 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Johan Hedberg561aafb2012-01-04 13:31:59 +02002114 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002116 return e;
2117 }
2118
2119 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120}
2121
Johan Hedberg561aafb2012-01-04 13:31:59 +02002122struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002123 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002124{
Johan Hedberg30883512012-01-04 14:16:21 +02002125 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002126 struct inquiry_entry *e;
2127
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002128 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002129
2130 list_for_each_entry(e, &cache->unknown, list) {
2131 if (!bacmp(&e->data.bdaddr, bdaddr))
2132 return e;
2133 }
2134
2135 return NULL;
2136}
2137
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002138struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002139 bdaddr_t *bdaddr,
2140 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002141{
2142 struct discovery_state *cache = &hdev->discovery;
2143 struct inquiry_entry *e;
2144
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002145 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002146
2147 list_for_each_entry(e, &cache->resolve, list) {
2148 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2149 return e;
2150 if (!bacmp(&e->data.bdaddr, bdaddr))
2151 return e;
2152 }
2153
2154 return NULL;
2155}
2156
Johan Hedberga3d4e202012-01-09 00:53:02 +02002157void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002158 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002159{
2160 struct discovery_state *cache = &hdev->discovery;
2161 struct list_head *pos = &cache->resolve;
2162 struct inquiry_entry *p;
2163
2164 list_del(&ie->list);
2165
2166 list_for_each_entry(p, &cache->resolve, list) {
2167 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002168 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002169 break;
2170 pos = &p->list;
2171 }
2172
2173 list_add(&ie->list, pos);
2174}
2175
Marcel Holtmannaf589252014-07-01 14:11:20 +02002176u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2177 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Johan Hedberg30883512012-01-04 14:16:21 +02002179 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002180 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002181 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002183 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Johan Hedberg6928a922014-10-26 20:46:09 +01002185 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002186
Marcel Holtmannaf589252014-07-01 14:11:20 +02002187 if (!data->ssp_mode)
2188 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002189
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002190 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002191 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002192 if (!ie->data.ssp_mode)
2193 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002194
Johan Hedberga3d4e202012-01-09 00:53:02 +02002195 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002196 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002197 ie->data.rssi = data->rssi;
2198 hci_inquiry_cache_update_resolve(hdev, ie);
2199 }
2200
Johan Hedberg561aafb2012-01-04 13:31:59 +02002201 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002202 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002203
Johan Hedberg561aafb2012-01-04 13:31:59 +02002204 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002205 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002206 if (!ie) {
2207 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2208 goto done;
2209 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002210
2211 list_add(&ie->all, &cache->all);
2212
2213 if (name_known) {
2214 ie->name_state = NAME_KNOWN;
2215 } else {
2216 ie->name_state = NAME_NOT_KNOWN;
2217 list_add(&ie->list, &cache->unknown);
2218 }
2219
2220update:
2221 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002222 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002223 ie->name_state = NAME_KNOWN;
2224 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 }
2226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002227 memcpy(&ie->data, data, sizeof(*data));
2228 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002230
2231 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002232 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002233
Marcel Holtmannaf589252014-07-01 14:11:20 +02002234done:
2235 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
2237
2238static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2239{
Johan Hedberg30883512012-01-04 14:16:21 +02002240 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 struct inquiry_info *info = (struct inquiry_info *) buf;
2242 struct inquiry_entry *e;
2243 int copied = 0;
2244
Johan Hedberg561aafb2012-01-04 13:31:59 +02002245 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002247
2248 if (copied >= num)
2249 break;
2250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 bacpy(&info->bdaddr, &data->bdaddr);
2252 info->pscan_rep_mode = data->pscan_rep_mode;
2253 info->pscan_period_mode = data->pscan_period_mode;
2254 info->pscan_mode = data->pscan_mode;
2255 memcpy(info->dev_class, data->dev_class, 3);
2256 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002259 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
2261
2262 BT_DBG("cache %p, copied %d", cache, copied);
2263 return copied;
2264}
2265
Johan Hedberg42c6b122013-03-05 20:37:49 +02002266static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267{
2268 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002269 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 struct hci_cp_inquiry cp;
2271
2272 BT_DBG("%s", hdev->name);
2273
2274 if (test_bit(HCI_INQUIRY, &hdev->flags))
2275 return;
2276
2277 /* Start Inquiry */
2278 memcpy(&cp.lap, &ir->lap, 3);
2279 cp.length = ir->length;
2280 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002281 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282}
2283
2284int hci_inquiry(void __user *arg)
2285{
2286 __u8 __user *ptr = arg;
2287 struct hci_inquiry_req ir;
2288 struct hci_dev *hdev;
2289 int err = 0, do_inquiry = 0, max_rsp;
2290 long timeo;
2291 __u8 *buf;
2292
2293 if (copy_from_user(&ir, ptr, sizeof(ir)))
2294 return -EFAULT;
2295
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002296 hdev = hci_dev_get(ir.dev_id);
2297 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 return -ENODEV;
2299
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002300 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2301 err = -EBUSY;
2302 goto done;
2303 }
2304
Marcel Holtmann4a964402014-07-02 19:10:33 +02002305 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002306 err = -EOPNOTSUPP;
2307 goto done;
2308 }
2309
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002310 if (hdev->dev_type != HCI_BREDR) {
2311 err = -EOPNOTSUPP;
2312 goto done;
2313 }
2314
Johan Hedberg56f87902013-10-02 13:43:13 +03002315 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2316 err = -EOPNOTSUPP;
2317 goto done;
2318 }
2319
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002320 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002321 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002322 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002323 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 do_inquiry = 1;
2325 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002326 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
Marcel Holtmann04837f62006-07-03 10:02:33 +02002328 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002329
2330 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002331 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2332 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002333 if (err < 0)
2334 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002335
2336 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2337 * cleared). If it is interrupted by a signal, return -EINTR.
2338 */
NeilBrown74316202014-07-07 15:16:04 +10002339 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002340 TASK_INTERRUPTIBLE))
2341 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002344 /* for unlimited number of responses we will use buffer with
2345 * 255 entries
2346 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2348
2349 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2350 * copy it to the user space.
2351 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002352 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002353 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 err = -ENOMEM;
2355 goto done;
2356 }
2357
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002358 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002360 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
2362 BT_DBG("num_rsp %d", ir.num_rsp);
2363
2364 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2365 ptr += sizeof(ir);
2366 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002367 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002369 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 err = -EFAULT;
2371
2372 kfree(buf);
2373
2374done:
2375 hci_dev_put(hdev);
2376 return err;
2377}
2378
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002379static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 int ret = 0;
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 BT_DBG("%s %p", hdev->name, hdev);
2384
2385 hci_req_lock(hdev);
2386
Johan Hovold94324962012-03-15 14:48:41 +01002387 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2388 ret = -ENODEV;
2389 goto done;
2390 }
2391
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002392 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2393 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002394 /* Check for rfkill but allow the HCI setup stage to
2395 * proceed (which in itself doesn't cause any RF activity).
2396 */
2397 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2398 ret = -ERFKILL;
2399 goto done;
2400 }
2401
2402 /* Check for valid public address or a configured static
2403 * random adddress, but let the HCI setup proceed to
2404 * be able to determine if there is a public address
2405 * or not.
2406 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002407 * In case of user channel usage, it is not important
2408 * if a public address or static random address is
2409 * available.
2410 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002411 * This check is only valid for BR/EDR controllers
2412 * since AMP controllers do not have an address.
2413 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002414 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2415 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002416 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2417 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2418 ret = -EADDRNOTAVAIL;
2419 goto done;
2420 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002421 }
2422
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 if (test_bit(HCI_UP, &hdev->flags)) {
2424 ret = -EALREADY;
2425 goto done;
2426 }
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 if (hdev->open(hdev)) {
2429 ret = -EIO;
2430 goto done;
2431 }
2432
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002433 atomic_set(&hdev->cmd_cnt, 1);
2434 set_bit(HCI_INIT, &hdev->flags);
2435
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002436 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2437 if (hdev->setup)
2438 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002439
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002440 /* The transport driver can set these quirks before
2441 * creating the HCI device or in its setup callback.
2442 *
2443 * In case any of them is set, the controller has to
2444 * start up as unconfigured.
2445 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002446 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2447 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002448 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002449
2450 /* For an unconfigured controller it is required to
2451 * read at least the version information provided by
2452 * the Read Local Version Information command.
2453 *
2454 * If the set_bdaddr driver callback is provided, then
2455 * also the original Bluetooth public device address
2456 * will be read using the Read BD Address command.
2457 */
2458 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2459 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002460 }
2461
Marcel Holtmann9713c172014-07-06 12:11:15 +02002462 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2463 /* If public address change is configured, ensure that
2464 * the address gets programmed. If the driver does not
2465 * support changing the public address, fail the power
2466 * on procedure.
2467 */
2468 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2469 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002470 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2471 else
2472 ret = -EADDRNOTAVAIL;
2473 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002474
2475 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002476 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002478 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 }
2480
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002481 clear_bit(HCI_INIT, &hdev->flags);
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 if (!ret) {
2484 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002485 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 set_bit(HCI_UP, &hdev->flags);
2487 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002488 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002489 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002490 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002491 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002492 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002493 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002494 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002495 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002496 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002497 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002499 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002500 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002501 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 skb_queue_purge(&hdev->cmd_q);
2504 skb_queue_purge(&hdev->rx_q);
2505
2506 if (hdev->flush)
2507 hdev->flush(hdev);
2508
2509 if (hdev->sent_cmd) {
2510 kfree_skb(hdev->sent_cmd);
2511 hdev->sent_cmd = NULL;
2512 }
2513
2514 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002515 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
2517
2518done:
2519 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 return ret;
2521}
2522
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002523/* ---- HCI ioctl helpers ---- */
2524
2525int hci_dev_open(__u16 dev)
2526{
2527 struct hci_dev *hdev;
2528 int err;
2529
2530 hdev = hci_dev_get(dev);
2531 if (!hdev)
2532 return -ENODEV;
2533
Marcel Holtmann4a964402014-07-02 19:10:33 +02002534 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002535 * up as user channel. Trying to bring them up as normal devices
2536 * will result into a failure. Only user channel operation is
2537 * possible.
2538 *
2539 * When this function is called for a user channel, the flag
2540 * HCI_USER_CHANNEL will be set first before attempting to
2541 * open the device.
2542 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002543 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002544 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 err = -EOPNOTSUPP;
2546 goto done;
2547 }
2548
Johan Hedberge1d08f42013-10-01 22:44:50 +03002549 /* We need to ensure that no other power on/off work is pending
2550 * before proceeding to call hci_dev_do_open. This is
2551 * particularly important if the setup procedure has not yet
2552 * completed.
2553 */
2554 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2555 cancel_delayed_work(&hdev->power_off);
2556
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002557 /* After this call it is guaranteed that the setup procedure
2558 * has finished. This means that error conditions like RFKILL
2559 * or no valid public or static random address apply.
2560 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002561 flush_workqueue(hdev->req_workqueue);
2562
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002563 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002564 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002565 * so that pairing works for them. Once the management interface
2566 * is in use this bit will be cleared again and userspace has
2567 * to explicitly enable it.
2568 */
2569 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2570 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002571 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002572
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002573 err = hci_dev_do_open(hdev);
2574
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002575done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002576 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002577 return err;
2578}
2579
Johan Hedbergd7347f32014-07-04 12:37:23 +03002580/* This function requires the caller holds hdev->lock */
2581static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2582{
2583 struct hci_conn_params *p;
2584
Johan Hedbergf161dd42014-08-15 21:06:54 +03002585 list_for_each_entry(p, &hdev->le_conn_params, list) {
2586 if (p->conn) {
2587 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002588 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002589 p->conn = NULL;
2590 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002591 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002592 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002593
2594 BT_DBG("All LE pending actions cleared");
2595}
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597static int hci_dev_do_close(struct hci_dev *hdev)
2598{
2599 BT_DBG("%s %p", hdev->name, hdev);
2600
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002601 cancel_delayed_work(&hdev->power_off);
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 hci_req_cancel(hdev, ENODEV);
2604 hci_req_lock(hdev);
2605
2606 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002607 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 hci_req_unlock(hdev);
2609 return 0;
2610 }
2611
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002612 /* Flush RX and TX works */
2613 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002614 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002616 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002617 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002618 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002619 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002620 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002621 }
2622
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002623 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002624 cancel_delayed_work(&hdev->service_cache);
2625
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002626 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002627
2628 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002630
Johan Hedberg76727c02014-11-18 09:00:14 +02002631 /* Avoid potential lockdep warnings from the *_flush() calls by
2632 * ensuring the workqueue is empty up front.
2633 */
2634 drain_workqueue(hdev->workqueue);
2635
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002636 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002637 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002638 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002639 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002640 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
2642 hci_notify(hdev, HCI_DEV_DOWN);
2643
2644 if (hdev->flush)
2645 hdev->flush(hdev);
2646
2647 /* Reset device */
2648 skb_queue_purge(&hdev->cmd_q);
2649 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002650 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2651 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002652 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002654 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 clear_bit(HCI_INIT, &hdev->flags);
2656 }
2657
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002658 /* flush cmd work */
2659 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661 /* Drop queues */
2662 skb_queue_purge(&hdev->rx_q);
2663 skb_queue_purge(&hdev->cmd_q);
2664 skb_queue_purge(&hdev->raw_q);
2665
2666 /* Drop last sent command */
2667 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002668 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 kfree_skb(hdev->sent_cmd);
2670 hdev->sent_cmd = NULL;
2671 }
2672
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002673 kfree_skb(hdev->recv_evt);
2674 hdev->recv_evt = NULL;
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 /* After this point our queues are empty
2677 * and no tasks are scheduled. */
2678 hdev->close(hdev);
2679
Johan Hedberg35b973c2013-03-15 17:06:59 -05002680 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002681 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002682 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2683
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002684 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2685 if (hdev->dev_type == HCI_BREDR) {
2686 hci_dev_lock(hdev);
2687 mgmt_powered(hdev, 0);
2688 hci_dev_unlock(hdev);
2689 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002690 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002691
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002692 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002693 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002694
Johan Hedberge59fda82012-02-22 18:11:53 +02002695 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002696 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002697 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002698
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 hci_req_unlock(hdev);
2700
2701 hci_dev_put(hdev);
2702 return 0;
2703}
2704
2705int hci_dev_close(__u16 dev)
2706{
2707 struct hci_dev *hdev;
2708 int err;
2709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002710 hdev = hci_dev_get(dev);
2711 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002713
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002714 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2715 err = -EBUSY;
2716 goto done;
2717 }
2718
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002719 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2720 cancel_delayed_work(&hdev->power_off);
2721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002723
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002724done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 hci_dev_put(hdev);
2726 return err;
2727}
2728
2729int hci_dev_reset(__u16 dev)
2730{
2731 struct hci_dev *hdev;
2732 int ret = 0;
2733
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002734 hdev = hci_dev_get(dev);
2735 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 return -ENODEV;
2737
2738 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
Marcel Holtmann808a0492013-08-26 20:57:58 -07002740 if (!test_bit(HCI_UP, &hdev->flags)) {
2741 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002745 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2746 ret = -EBUSY;
2747 goto done;
2748 }
2749
Marcel Holtmann4a964402014-07-02 19:10:33 +02002750 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002751 ret = -EOPNOTSUPP;
2752 goto done;
2753 }
2754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 /* Drop queues */
2756 skb_queue_purge(&hdev->rx_q);
2757 skb_queue_purge(&hdev->cmd_q);
2758
Johan Hedberg76727c02014-11-18 09:00:14 +02002759 /* Avoid potential lockdep warnings from the *_flush() calls by
2760 * ensuring the workqueue is empty up front.
2761 */
2762 drain_workqueue(hdev->workqueue);
2763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002764 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002765 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002767 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
2769 if (hdev->flush)
2770 hdev->flush(hdev);
2771
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002772 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002773 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002775 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 hci_req_unlock(hdev);
2779 hci_dev_put(hdev);
2780 return ret;
2781}
2782
2783int hci_dev_reset_stat(__u16 dev)
2784{
2785 struct hci_dev *hdev;
2786 int ret = 0;
2787
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002788 hdev = hci_dev_get(dev);
2789 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 return -ENODEV;
2791
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002792 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2793 ret = -EBUSY;
2794 goto done;
2795 }
2796
Marcel Holtmann4a964402014-07-02 19:10:33 +02002797 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002798 ret = -EOPNOTSUPP;
2799 goto done;
2800 }
2801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2803
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002804done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 return ret;
2807}
2808
Johan Hedberg123abc02014-07-10 12:09:07 +03002809static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2810{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002811 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002812
2813 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2814
2815 if ((scan & SCAN_PAGE))
2816 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2817 &hdev->dev_flags);
2818 else
2819 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2820 &hdev->dev_flags);
2821
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002822 if ((scan & SCAN_INQUIRY)) {
2823 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2824 &hdev->dev_flags);
2825 } else {
2826 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2827 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2828 &hdev->dev_flags);
2829 }
2830
Johan Hedberg123abc02014-07-10 12:09:07 +03002831 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2832 return;
2833
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002834 if (conn_changed || discov_changed) {
2835 /* In case this was disabled through mgmt */
2836 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2837
2838 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2839 mgmt_update_adv_data(hdev);
2840
Johan Hedberg123abc02014-07-10 12:09:07 +03002841 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002842 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002843}
2844
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845int hci_dev_cmd(unsigned int cmd, void __user *arg)
2846{
2847 struct hci_dev *hdev;
2848 struct hci_dev_req dr;
2849 int err = 0;
2850
2851 if (copy_from_user(&dr, arg, sizeof(dr)))
2852 return -EFAULT;
2853
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002854 hdev = hci_dev_get(dr.dev_id);
2855 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 return -ENODEV;
2857
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002858 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2859 err = -EBUSY;
2860 goto done;
2861 }
2862
Marcel Holtmann4a964402014-07-02 19:10:33 +02002863 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002864 err = -EOPNOTSUPP;
2865 goto done;
2866 }
2867
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002868 if (hdev->dev_type != HCI_BREDR) {
2869 err = -EOPNOTSUPP;
2870 goto done;
2871 }
2872
Johan Hedberg56f87902013-10-02 13:43:13 +03002873 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2874 err = -EOPNOTSUPP;
2875 goto done;
2876 }
2877
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 switch (cmd) {
2879 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002880 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2881 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 break;
2883
2884 case HCISETENCRYPT:
2885 if (!lmp_encrypt_capable(hdev)) {
2886 err = -EOPNOTSUPP;
2887 break;
2888 }
2889
2890 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2891 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002892 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2893 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 if (err)
2895 break;
2896 }
2897
Johan Hedberg01178cd2013-03-05 20:37:41 +02002898 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2899 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 break;
2901
2902 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002903 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2904 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002905
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002906 /* Ensure that the connectable and discoverable states
2907 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002908 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002909 if (!err)
2910 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 break;
2912
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002913 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002914 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2915 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002916 break;
2917
2918 case HCISETLINKMODE:
2919 hdev->link_mode = ((__u16) dr.dev_opt) &
2920 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2921 break;
2922
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 case HCISETPTYPE:
2924 hdev->pkt_type = (__u16) dr.dev_opt;
2925 break;
2926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002928 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2929 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 break;
2931
2932 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002933 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2934 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 break;
2936
2937 default:
2938 err = -EINVAL;
2939 break;
2940 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002941
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002942done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 hci_dev_put(hdev);
2944 return err;
2945}
2946
2947int hci_get_dev_list(void __user *arg)
2948{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002949 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 struct hci_dev_list_req *dl;
2951 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 int n = 0, size, err;
2953 __u16 dev_num;
2954
2955 if (get_user(dev_num, (__u16 __user *) arg))
2956 return -EFAULT;
2957
2958 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2959 return -EINVAL;
2960
2961 size = sizeof(*dl) + dev_num * sizeof(*dr);
2962
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002963 dl = kzalloc(size, GFP_KERNEL);
2964 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 return -ENOMEM;
2966
2967 dr = dl->dev_req;
2968
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002969 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002970 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002971 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002972
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002973 /* When the auto-off is configured it means the transport
2974 * is running, but in that case still indicate that the
2975 * device is actually down.
2976 */
2977 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2978 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002981 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002982
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 if (++n >= dev_num)
2984 break;
2985 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002986 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987
2988 dl->dev_num = n;
2989 size = sizeof(*dl) + n * sizeof(*dr);
2990
2991 err = copy_to_user(arg, dl, size);
2992 kfree(dl);
2993
2994 return err ? -EFAULT : 0;
2995}
2996
2997int hci_get_dev_info(void __user *arg)
2998{
2999 struct hci_dev *hdev;
3000 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003001 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 int err = 0;
3003
3004 if (copy_from_user(&di, arg, sizeof(di)))
3005 return -EFAULT;
3006
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003007 hdev = hci_dev_get(di.dev_id);
3008 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 return -ENODEV;
3010
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003011 /* When the auto-off is configured it means the transport
3012 * is running, but in that case still indicate that the
3013 * device is actually down.
3014 */
3015 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3016 flags = hdev->flags & ~BIT(HCI_UP);
3017 else
3018 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 strcpy(di.name, hdev->name);
3021 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003022 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003023 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003025 if (lmp_bredr_capable(hdev)) {
3026 di.acl_mtu = hdev->acl_mtu;
3027 di.acl_pkts = hdev->acl_pkts;
3028 di.sco_mtu = hdev->sco_mtu;
3029 di.sco_pkts = hdev->sco_pkts;
3030 } else {
3031 di.acl_mtu = hdev->le_mtu;
3032 di.acl_pkts = hdev->le_pkts;
3033 di.sco_mtu = 0;
3034 di.sco_pkts = 0;
3035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 di.link_policy = hdev->link_policy;
3037 di.link_mode = hdev->link_mode;
3038
3039 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3040 memcpy(&di.features, &hdev->features, sizeof(di.features));
3041
3042 if (copy_to_user(arg, &di, sizeof(di)))
3043 err = -EFAULT;
3044
3045 hci_dev_put(hdev);
3046
3047 return err;
3048}
3049
3050/* ---- Interface to HCI drivers ---- */
3051
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003052static int hci_rfkill_set_block(void *data, bool blocked)
3053{
3054 struct hci_dev *hdev = data;
3055
3056 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3057
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003058 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3059 return -EBUSY;
3060
Johan Hedberg5e130362013-09-13 08:58:17 +03003061 if (blocked) {
3062 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003063 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3064 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003065 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003066 } else {
3067 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003068 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003069
3070 return 0;
3071}
3072
3073static const struct rfkill_ops hci_rfkill_ops = {
3074 .set_block = hci_rfkill_set_block,
3075};
3076
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003077static void hci_power_on(struct work_struct *work)
3078{
3079 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003080 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003081
3082 BT_DBG("%s", hdev->name);
3083
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003084 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003085 if (err < 0) {
3086 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003087 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003088 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003089
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003090 /* During the HCI setup phase, a few error conditions are
3091 * ignored and they need to be checked now. If they are still
3092 * valid, it is important to turn the device back off.
3093 */
3094 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003095 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003096 (hdev->dev_type == HCI_BREDR &&
3097 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3098 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003099 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3100 hci_dev_do_close(hdev);
3101 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003102 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3103 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003104 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003105
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003106 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003107 /* For unconfigured devices, set the HCI_RAW flag
3108 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003109 */
3110 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3111 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003112
3113 /* For fully configured devices, this will send
3114 * the Index Added event. For unconfigured devices,
3115 * it will send Unconfigued Index Added event.
3116 *
3117 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3118 * and no event will be send.
3119 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003120 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003121 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003122 /* When the controller is now configured, then it
3123 * is important to clear the HCI_RAW flag.
3124 */
3125 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3126 clear_bit(HCI_RAW, &hdev->flags);
3127
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003128 /* Powering on the controller with HCI_CONFIG set only
3129 * happens with the transition from unconfigured to
3130 * configured. This will send the Index Added event.
3131 */
3132 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003133 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003134}
3135
3136static void hci_power_off(struct work_struct *work)
3137{
Johan Hedberg32435532011-11-07 22:16:04 +02003138 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003139 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003140
3141 BT_DBG("%s", hdev->name);
3142
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003143 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003144}
3145
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003146static void hci_discov_off(struct work_struct *work)
3147{
3148 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003149
3150 hdev = container_of(work, struct hci_dev, discov_off.work);
3151
3152 BT_DBG("%s", hdev->name);
3153
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003154 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003155}
3156
Johan Hedberg35f74982014-02-18 17:14:32 +02003157void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003158{
Johan Hedberg48210022013-01-27 00:31:28 +02003159 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003160
Johan Hedberg48210022013-01-27 00:31:28 +02003161 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3162 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003163 kfree(uuid);
3164 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003165}
3166
Johan Hedberg35f74982014-02-18 17:14:32 +02003167void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003168{
Johan Hedberg0378b592014-11-19 15:22:22 +02003169 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003170
Johan Hedberg0378b592014-11-19 15:22:22 +02003171 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3172 list_del_rcu(&key->list);
3173 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003174 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003175}
3176
Johan Hedberg35f74982014-02-18 17:14:32 +02003177void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003178{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003179 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003180
Johan Hedberg970d0f12014-11-13 14:37:47 +02003181 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3182 list_del_rcu(&k->list);
3183 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003184 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003185}
3186
Johan Hedberg970c4e42014-02-18 10:19:33 +02003187void hci_smp_irks_clear(struct hci_dev *hdev)
3188{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003189 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003190
Johan Hedbergadae20c2014-11-13 14:37:48 +02003191 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3192 list_del_rcu(&k->list);
3193 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003194 }
3195}
3196
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003197struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3198{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003199 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003200
Johan Hedberg0378b592014-11-19 15:22:22 +02003201 rcu_read_lock();
3202 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3203 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3204 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003205 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003206 }
3207 }
3208 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003209
3210 return NULL;
3211}
3212
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303213static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003214 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003215{
3216 /* Legacy key */
3217 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303218 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003219
3220 /* Debug keys are insecure so don't store them persistently */
3221 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303222 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003223
3224 /* Changed combination key and there's no previous one */
3225 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303226 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003227
3228 /* Security mode 3 case */
3229 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303230 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003231
Johan Hedberge3befab2014-06-01 16:33:39 +03003232 /* BR/EDR key derived using SC from an LE link */
3233 if (conn->type == LE_LINK)
3234 return true;
3235
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003236 /* Neither local nor remote side had no-bonding as requirement */
3237 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303238 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003239
3240 /* Local side had dedicated bonding as requirement */
3241 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303242 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003243
3244 /* Remote side had dedicated bonding as requirement */
3245 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303246 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003247
3248 /* If none of the above criteria match, then don't store the key
3249 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303250 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003251}
3252
Johan Hedberge804d252014-07-16 11:42:28 +03003253static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003254{
Johan Hedberge804d252014-07-16 11:42:28 +03003255 if (type == SMP_LTK)
3256 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003257
Johan Hedberge804d252014-07-16 11:42:28 +03003258 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003259}
3260
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003261struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003263{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003264 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003265
Johan Hedberg970d0f12014-11-13 14:37:47 +02003266 rcu_read_lock();
3267 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003268 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3269 continue;
3270
Johan Hedberg923e2412014-12-03 12:43:39 +02003271 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003272 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003273 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003274 }
3275 }
3276 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003277
3278 return NULL;
3279}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003280
Johan Hedberg970c4e42014-02-18 10:19:33 +02003281struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3282{
3283 struct smp_irk *irk;
3284
Johan Hedbergadae20c2014-11-13 14:37:48 +02003285 rcu_read_lock();
3286 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3287 if (!bacmp(&irk->rpa, rpa)) {
3288 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003289 return irk;
3290 }
3291 }
3292
Johan Hedbergadae20c2014-11-13 14:37:48 +02003293 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3294 if (smp_irk_matches(hdev, irk->val, rpa)) {
3295 bacpy(&irk->rpa, rpa);
3296 rcu_read_unlock();
3297 return irk;
3298 }
3299 }
3300 rcu_read_unlock();
3301
Johan Hedberg970c4e42014-02-18 10:19:33 +02003302 return NULL;
3303}
3304
3305struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3306 u8 addr_type)
3307{
3308 struct smp_irk *irk;
3309
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003310 /* Identity Address must be public or static random */
3311 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3312 return NULL;
3313
Johan Hedbergadae20c2014-11-13 14:37:48 +02003314 rcu_read_lock();
3315 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003316 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003317 bacmp(bdaddr, &irk->bdaddr) == 0) {
3318 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003319 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003320 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003321 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003322 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003323
3324 return NULL;
3325}
3326
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003327struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003328 bdaddr_t *bdaddr, u8 *val, u8 type,
3329 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003330{
3331 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303332 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003333
3334 old_key = hci_find_link_key(hdev, bdaddr);
3335 if (old_key) {
3336 old_key_type = old_key->type;
3337 key = old_key;
3338 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003339 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003340 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003341 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003342 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003343 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003344 }
3345
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003346 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003347
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003348 /* Some buggy controller combinations generate a changed
3349 * combination key for legacy pairing even when there's no
3350 * previous key */
3351 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003352 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003353 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003354 if (conn)
3355 conn->key_type = type;
3356 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003357
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003358 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003359 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003360 key->pin_len = pin_len;
3361
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003362 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003363 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003364 else
3365 key->type = type;
3366
Johan Hedberg7652ff62014-06-24 13:15:49 +03003367 if (persistent)
3368 *persistent = hci_persistent_key(hdev, conn, type,
3369 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003370
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003371 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003372}
3373
Johan Hedbergca9142b2014-02-19 14:57:44 +02003374struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003375 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003376 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003377{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003378 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003379 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003380
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003381 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003382 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003383 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003384 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003385 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003386 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003387 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003388 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003389 }
3390
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003391 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003392 key->bdaddr_type = addr_type;
3393 memcpy(key->val, tk, sizeof(key->val));
3394 key->authenticated = authenticated;
3395 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003396 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003397 key->enc_size = enc_size;
3398 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003399
Johan Hedbergca9142b2014-02-19 14:57:44 +02003400 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003401}
3402
Johan Hedbergca9142b2014-02-19 14:57:44 +02003403struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3404 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003405{
3406 struct smp_irk *irk;
3407
3408 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3409 if (!irk) {
3410 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3411 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003412 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003413
3414 bacpy(&irk->bdaddr, bdaddr);
3415 irk->addr_type = addr_type;
3416
Johan Hedbergadae20c2014-11-13 14:37:48 +02003417 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003418 }
3419
3420 memcpy(irk->val, val, 16);
3421 bacpy(&irk->rpa, rpa);
3422
Johan Hedbergca9142b2014-02-19 14:57:44 +02003423 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003424}
3425
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003426int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3427{
3428 struct link_key *key;
3429
3430 key = hci_find_link_key(hdev, bdaddr);
3431 if (!key)
3432 return -ENOENT;
3433
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003434 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003435
Johan Hedberg0378b592014-11-19 15:22:22 +02003436 list_del_rcu(&key->list);
3437 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003438
3439 return 0;
3440}
3441
Johan Hedberge0b2b272014-02-18 17:14:31 +02003442int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003443{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003444 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003445 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003446
Johan Hedberg970d0f12014-11-13 14:37:47 +02003447 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003448 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003449 continue;
3450
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003451 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003452
Johan Hedberg970d0f12014-11-13 14:37:47 +02003453 list_del_rcu(&k->list);
3454 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003455 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003456 }
3457
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003458 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003459}
3460
Johan Hedberga7ec7332014-02-18 17:14:35 +02003461void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3462{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003463 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003464
Johan Hedbergadae20c2014-11-13 14:37:48 +02003465 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003466 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3467 continue;
3468
3469 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3470
Johan Hedbergadae20c2014-11-13 14:37:48 +02003471 list_del_rcu(&k->list);
3472 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003473 }
3474}
3475
Ville Tervo6bd32322011-02-16 16:32:41 +02003476/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003477static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003478{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003479 struct hci_dev *hdev = container_of(work, struct hci_dev,
3480 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003481
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003482 if (hdev->sent_cmd) {
3483 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3484 u16 opcode = __le16_to_cpu(sent->opcode);
3485
3486 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3487 } else {
3488 BT_ERR("%s command tx timeout", hdev->name);
3489 }
3490
Ville Tervo6bd32322011-02-16 16:32:41 +02003491 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003492 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003493}
3494
Szymon Janc2763eda2011-03-22 13:12:22 +01003495struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003496 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003497{
3498 struct oob_data *data;
3499
Johan Hedberg6928a922014-10-26 20:46:09 +01003500 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3501 if (bacmp(bdaddr, &data->bdaddr) != 0)
3502 continue;
3503 if (data->bdaddr_type != bdaddr_type)
3504 continue;
3505 return data;
3506 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003507
3508 return NULL;
3509}
3510
Johan Hedberg6928a922014-10-26 20:46:09 +01003511int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3512 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003513{
3514 struct oob_data *data;
3515
Johan Hedberg6928a922014-10-26 20:46:09 +01003516 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003517 if (!data)
3518 return -ENOENT;
3519
Johan Hedberg6928a922014-10-26 20:46:09 +01003520 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003521
3522 list_del(&data->list);
3523 kfree(data);
3524
3525 return 0;
3526}
3527
Johan Hedberg35f74982014-02-18 17:14:32 +02003528void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003529{
3530 struct oob_data *data, *n;
3531
3532 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3533 list_del(&data->list);
3534 kfree(data);
3535 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003536}
3537
Marcel Holtmann07988722014-01-10 02:07:29 -08003538int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003539 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003540 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003541{
3542 struct oob_data *data;
3543
Johan Hedberg6928a922014-10-26 20:46:09 +01003544 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003545 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003546 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003547 if (!data)
3548 return -ENOMEM;
3549
3550 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003551 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003552 list_add(&data->list, &hdev->remote_oob_data);
3553 }
3554
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003555 if (hash192 && rand192) {
3556 memcpy(data->hash192, hash192, sizeof(data->hash192));
3557 memcpy(data->rand192, rand192, sizeof(data->rand192));
3558 } else {
3559 memset(data->hash192, 0, sizeof(data->hash192));
3560 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003561 }
3562
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003563 if (hash256 && rand256) {
3564 memcpy(data->hash256, hash256, sizeof(data->hash256));
3565 memcpy(data->rand256, rand256, sizeof(data->rand256));
3566 } else {
3567 memset(data->hash256, 0, sizeof(data->hash256));
3568 memset(data->rand256, 0, sizeof(data->rand256));
3569 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003570
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003571 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003572
3573 return 0;
3574}
3575
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003576struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003577 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003578{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003579 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003580
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003581 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003582 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003583 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003584 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003585
3586 return NULL;
3587}
3588
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003589void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003590{
3591 struct list_head *p, *n;
3592
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003593 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003594 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003595
3596 list_del(p);
3597 kfree(b);
3598 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003599}
3600
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003601int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003602{
3603 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003604
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003605 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003606 return -EBADF;
3607
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003608 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003609 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003610
Johan Hedberg27f70f32014-07-21 10:50:06 +03003611 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003612 if (!entry)
3613 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003614
3615 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003616 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003617
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003618 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003619
3620 return 0;
3621}
3622
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003623int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003624{
3625 struct bdaddr_list *entry;
3626
Johan Hedberg35f74982014-02-18 17:14:32 +02003627 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003628 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003629 return 0;
3630 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003631
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003632 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003633 if (!entry)
3634 return -ENOENT;
3635
3636 list_del(&entry->list);
3637 kfree(entry);
3638
3639 return 0;
3640}
3641
Andre Guedes15819a72014-02-03 13:56:18 -03003642/* This function requires the caller holds hdev->lock */
3643struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3644 bdaddr_t *addr, u8 addr_type)
3645{
3646 struct hci_conn_params *params;
3647
Johan Hedberg738f6182014-07-03 19:33:51 +03003648 /* The conn params list only contains identity addresses */
3649 if (!hci_is_identity_address(addr, addr_type))
3650 return NULL;
3651
Andre Guedes15819a72014-02-03 13:56:18 -03003652 list_for_each_entry(params, &hdev->le_conn_params, list) {
3653 if (bacmp(&params->addr, addr) == 0 &&
3654 params->addr_type == addr_type) {
3655 return params;
3656 }
3657 }
3658
3659 return NULL;
3660}
3661
Andre Guedescef952c2014-02-26 20:21:49 -03003662static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3663{
3664 struct hci_conn *conn;
3665
3666 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3667 if (!conn)
3668 return false;
3669
3670 if (conn->dst_type != type)
3671 return false;
3672
3673 if (conn->state != BT_CONNECTED)
3674 return false;
3675
3676 return true;
3677}
3678
Andre Guedes15819a72014-02-03 13:56:18 -03003679/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003680struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3681 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003682{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003683 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003684
Johan Hedberg738f6182014-07-03 19:33:51 +03003685 /* The list only contains identity addresses */
3686 if (!hci_is_identity_address(addr, addr_type))
3687 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003688
Johan Hedberg501f8822014-07-04 12:37:26 +03003689 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003690 if (bacmp(&param->addr, addr) == 0 &&
3691 param->addr_type == addr_type)
3692 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003693 }
3694
3695 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003696}
3697
3698/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003699struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3700 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003701{
3702 struct hci_conn_params *params;
3703
Johan Hedbergc46245b2014-07-02 17:37:33 +03003704 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003705 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003706
Andre Guedes15819a72014-02-03 13:56:18 -03003707 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003708 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003709 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003710
3711 params = kzalloc(sizeof(*params), GFP_KERNEL);
3712 if (!params) {
3713 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003714 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003715 }
3716
3717 bacpy(&params->addr, addr);
3718 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003719
3720 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003721 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003722
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003723 params->conn_min_interval = hdev->le_conn_min_interval;
3724 params->conn_max_interval = hdev->le_conn_max_interval;
3725 params->conn_latency = hdev->le_conn_latency;
3726 params->supervision_timeout = hdev->le_supv_timeout;
3727 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3728
3729 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3730
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003731 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003732}
3733
3734/* This function requires the caller holds hdev->lock */
3735int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003736 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003737{
3738 struct hci_conn_params *params;
3739
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003740 params = hci_conn_params_add(hdev, addr, addr_type);
3741 if (!params)
3742 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003743
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003744 if (params->auto_connect == auto_connect)
3745 return 0;
3746
Johan Hedberg95305ba2014-07-04 12:37:21 +03003747 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003748
Andre Guedescef952c2014-02-26 20:21:49 -03003749 switch (auto_connect) {
3750 case HCI_AUTO_CONN_DISABLED:
3751 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003752 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003753 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003754 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003755 list_add(&params->action, &hdev->pend_le_reports);
3756 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003757 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003758 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003759 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003760 if (!is_connected(hdev, addr, addr_type)) {
3761 list_add(&params->action, &hdev->pend_le_conns);
3762 hci_update_background_scan(hdev);
3763 }
Andre Guedescef952c2014-02-26 20:21:49 -03003764 break;
3765 }
Andre Guedes15819a72014-02-03 13:56:18 -03003766
Johan Hedberg851efca2014-07-02 22:42:00 +03003767 params->auto_connect = auto_connect;
3768
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003769 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3770 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003771
3772 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003773}
3774
Johan Hedbergf6c63242014-08-15 21:06:59 +03003775static void hci_conn_params_free(struct hci_conn_params *params)
3776{
3777 if (params->conn) {
3778 hci_conn_drop(params->conn);
3779 hci_conn_put(params->conn);
3780 }
3781
3782 list_del(&params->action);
3783 list_del(&params->list);
3784 kfree(params);
3785}
3786
Andre Guedes15819a72014-02-03 13:56:18 -03003787/* This function requires the caller holds hdev->lock */
3788void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3789{
3790 struct hci_conn_params *params;
3791
3792 params = hci_conn_params_lookup(hdev, addr, addr_type);
3793 if (!params)
3794 return;
3795
Johan Hedbergf6c63242014-08-15 21:06:59 +03003796 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003797
Johan Hedberg95305ba2014-07-04 12:37:21 +03003798 hci_update_background_scan(hdev);
3799
Andre Guedes15819a72014-02-03 13:56:18 -03003800 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3801}
3802
3803/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003804void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003805{
3806 struct hci_conn_params *params, *tmp;
3807
3808 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003809 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3810 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003811 list_del(&params->list);
3812 kfree(params);
3813 }
3814
Johan Hedberg55af49a82014-07-02 17:37:26 +03003815 BT_DBG("All LE disabled connection parameters were removed");
3816}
3817
3818/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003819void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003820{
3821 struct hci_conn_params *params, *tmp;
3822
Johan Hedbergf6c63242014-08-15 21:06:59 +03003823 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3824 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003825
Johan Hedberga2f41a82014-07-04 12:37:19 +03003826 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003827
Andre Guedes15819a72014-02-03 13:56:18 -03003828 BT_DBG("All LE connection parameters were removed");
3829}
3830
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003831static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003832{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003833 if (status) {
3834 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003835
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003836 hci_dev_lock(hdev);
3837 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838 hci_dev_unlock(hdev);
3839 return;
3840 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003841}
3842
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003843static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003844{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003845 /* General inquiry access code (GIAC) */
3846 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3847 struct hci_request req;
3848 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003849 int err;
3850
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003851 if (status) {
3852 BT_ERR("Failed to disable LE scanning: status %d", status);
3853 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003854 }
3855
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003856 switch (hdev->discovery.type) {
3857 case DISCOV_TYPE_LE:
3858 hci_dev_lock(hdev);
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3860 hci_dev_unlock(hdev);
3861 break;
3862
3863 case DISCOV_TYPE_INTERLEAVED:
3864 hci_req_init(&req, hdev);
3865
3866 memset(&cp, 0, sizeof(cp));
3867 memcpy(&cp.lap, lap, sizeof(cp.lap));
3868 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3869 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3870
3871 hci_dev_lock(hdev);
3872
3873 hci_inquiry_cache_flush(hdev);
3874
3875 err = hci_req_run(&req, inquiry_complete);
3876 if (err) {
3877 BT_ERR("Inquiry request failed: err %d", err);
3878 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3879 }
3880
3881 hci_dev_unlock(hdev);
3882 break;
3883 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003884}
3885
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003886static void le_scan_disable_work(struct work_struct *work)
3887{
3888 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003889 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003890 struct hci_request req;
3891 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003892
3893 BT_DBG("%s", hdev->name);
3894
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003895 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003896
Andre Guedesb1efcc22014-02-26 20:21:40 -03003897 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003898
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003899 err = hci_req_run(&req, le_scan_disable_work_complete);
3900 if (err)
3901 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003902}
3903
Johan Hedberg8d972502014-02-28 12:54:14 +02003904static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3905{
3906 struct hci_dev *hdev = req->hdev;
3907
3908 /* If we're advertising or initiating an LE connection we can't
3909 * go ahead and change the random address at this time. This is
3910 * because the eventual initiator address used for the
3911 * subsequently created connection will be undefined (some
3912 * controllers use the new address and others the one we had
3913 * when the operation started).
3914 *
3915 * In this kind of scenario skip the update and let the random
3916 * address be updated at the next cycle.
3917 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003918 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003919 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3920 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003921 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003922 return;
3923 }
3924
3925 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3926}
3927
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003928int hci_update_random_address(struct hci_request *req, bool require_privacy,
3929 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003930{
3931 struct hci_dev *hdev = req->hdev;
3932 int err;
3933
3934 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003935 * current RPA has expired or there is something else than
3936 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003937 */
3938 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003939 int to;
3940
3941 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942
3943 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003944 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003945 return 0;
3946
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003947 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003948 if (err < 0) {
3949 BT_ERR("%s failed to generate new RPA", hdev->name);
3950 return err;
3951 }
3952
Johan Hedberg8d972502014-02-28 12:54:14 +02003953 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003954
3955 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3956 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3957
3958 return 0;
3959 }
3960
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003961 /* In case of required privacy without resolvable private address,
3962 * use an unresolvable private address. This is useful for active
3963 * scanning and non-connectable advertising.
3964 */
3965 if (require_privacy) {
3966 bdaddr_t urpa;
3967
3968 get_random_bytes(&urpa, 6);
3969 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3970
3971 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003972 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003973 return 0;
3974 }
3975
Johan Hedbergebd3a742014-02-23 19:42:21 +02003976 /* If forcing static address is in use or there is no public
3977 * address use the static address as random address (but skip
3978 * the HCI command if the current random address is already the
3979 * static one.
3980 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003981 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003982 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3983 *own_addr_type = ADDR_LE_DEV_RANDOM;
3984 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3985 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3986 &hdev->static_addr);
3987 return 0;
3988 }
3989
3990 /* Neither privacy nor static address is being used so use a
3991 * public address.
3992 */
3993 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3994
3995 return 0;
3996}
3997
Johan Hedberga1f4c312014-02-27 14:05:41 +02003998/* Copy the Identity Address of the controller.
3999 *
4000 * If the controller has a public BD_ADDR, then by default use that one.
4001 * If this is a LE only controller without a public address, default to
4002 * the static random address.
4003 *
4004 * For debugging purposes it is possible to force controllers with a
4005 * public address to use the static random address instead.
4006 */
4007void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4008 u8 *bdaddr_type)
4009{
Marcel Holtmann111902f2014-06-21 04:53:17 +02004010 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02004011 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4012 bacpy(bdaddr, &hdev->static_addr);
4013 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4014 } else {
4015 bacpy(bdaddr, &hdev->bdaddr);
4016 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4017 }
4018}
4019
David Herrmann9be0dab2012-04-22 14:39:57 +02004020/* Alloc HCI device */
4021struct hci_dev *hci_alloc_dev(void)
4022{
4023 struct hci_dev *hdev;
4024
Johan Hedberg27f70f32014-07-21 10:50:06 +03004025 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02004026 if (!hdev)
4027 return NULL;
4028
David Herrmannb1b813d2012-04-22 14:39:58 +02004029 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4030 hdev->esco_type = (ESCO_HV1);
4031 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004032 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4033 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004034 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004035 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4036 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004037
David Herrmannb1b813d2012-04-22 14:39:58 +02004038 hdev->sniff_max_interval = 800;
4039 hdev->sniff_min_interval = 80;
4040
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004041 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004042 hdev->le_adv_min_interval = 0x0800;
4043 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004044 hdev->le_scan_interval = 0x0060;
4045 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004046 hdev->le_conn_min_interval = 0x0028;
4047 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004048 hdev->le_conn_latency = 0x0000;
4049 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004050
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004051 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004052 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004053 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4054 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004055
David Herrmannb1b813d2012-04-22 14:39:58 +02004056 mutex_init(&hdev->lock);
4057 mutex_init(&hdev->req_lock);
4058
4059 INIT_LIST_HEAD(&hdev->mgmt_pending);
4060 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004061 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004062 INIT_LIST_HEAD(&hdev->uuids);
4063 INIT_LIST_HEAD(&hdev->link_keys);
4064 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004065 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004066 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004067 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004068 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004069 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004070 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004071 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004072
4073 INIT_WORK(&hdev->rx_work, hci_rx_work);
4074 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4075 INIT_WORK(&hdev->tx_work, hci_tx_work);
4076 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004077
David Herrmannb1b813d2012-04-22 14:39:58 +02004078 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4079 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4080 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4081
David Herrmannb1b813d2012-04-22 14:39:58 +02004082 skb_queue_head_init(&hdev->rx_q);
4083 skb_queue_head_init(&hdev->cmd_q);
4084 skb_queue_head_init(&hdev->raw_q);
4085
4086 init_waitqueue_head(&hdev->req_wait_q);
4087
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004088 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004089
David Herrmannb1b813d2012-04-22 14:39:58 +02004090 hci_init_sysfs(hdev);
4091 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004092
4093 return hdev;
4094}
4095EXPORT_SYMBOL(hci_alloc_dev);
4096
4097/* Free HCI device */
4098void hci_free_dev(struct hci_dev *hdev)
4099{
David Herrmann9be0dab2012-04-22 14:39:57 +02004100 /* will free via device release */
4101 put_device(&hdev->dev);
4102}
4103EXPORT_SYMBOL(hci_free_dev);
4104
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105/* Register HCI device */
4106int hci_register_dev(struct hci_dev *hdev)
4107{
David Herrmannb1b813d2012-04-22 14:39:58 +02004108 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109
Marcel Holtmann74292d52014-07-06 15:50:27 +02004110 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111 return -EINVAL;
4112
Mat Martineau08add512011-11-02 16:18:36 -07004113 /* Do not allow HCI_AMP devices to register at index 0,
4114 * so the index can be used as the AMP controller ID.
4115 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004116 switch (hdev->dev_type) {
4117 case HCI_BREDR:
4118 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4119 break;
4120 case HCI_AMP:
4121 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4122 break;
4123 default:
4124 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004126
Sasha Levin3df92b32012-05-27 22:36:56 +02004127 if (id < 0)
4128 return id;
4129
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 sprintf(hdev->name, "hci%d", id);
4131 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004132
4133 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4134
Kees Cookd8537542013-07-03 15:04:57 -07004135 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4136 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004137 if (!hdev->workqueue) {
4138 error = -ENOMEM;
4139 goto err;
4140 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004141
Kees Cookd8537542013-07-03 15:04:57 -07004142 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4143 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004144 if (!hdev->req_workqueue) {
4145 destroy_workqueue(hdev->workqueue);
4146 error = -ENOMEM;
4147 goto err;
4148 }
4149
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004150 if (!IS_ERR_OR_NULL(bt_debugfs))
4151 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4152
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004153 dev_set_name(&hdev->dev, "%s", hdev->name);
4154
4155 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004156 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004157 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004159 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004160 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4161 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004162 if (hdev->rfkill) {
4163 if (rfkill_register(hdev->rfkill) < 0) {
4164 rfkill_destroy(hdev->rfkill);
4165 hdev->rfkill = NULL;
4166 }
4167 }
4168
Johan Hedberg5e130362013-09-13 08:58:17 +03004169 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4170 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4171
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004172 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004173 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004174
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004175 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004176 /* Assume BR/EDR support until proven otherwise (such as
4177 * through reading supported features during init.
4178 */
4179 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4180 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004181
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004182 write_lock(&hci_dev_list_lock);
4183 list_add(&hdev->list, &hci_dev_list);
4184 write_unlock(&hci_dev_list_lock);
4185
Marcel Holtmann4a964402014-07-02 19:10:33 +02004186 /* Devices that are marked for raw-only usage are unconfigured
4187 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004188 */
4189 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004190 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004191
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004193 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194
Johan Hedberg19202572013-01-14 22:33:51 +02004195 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004196
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004198
David Herrmann33ca9542011-10-08 14:58:49 +02004199err_wqueue:
4200 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004201 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004202err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004203 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004204
David Herrmann33ca9542011-10-08 14:58:49 +02004205 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206}
4207EXPORT_SYMBOL(hci_register_dev);
4208
4209/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004210void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211{
Sasha Levin3df92b32012-05-27 22:36:56 +02004212 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004213
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004214 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
Johan Hovold94324962012-03-15 14:48:41 +01004216 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4217
Sasha Levin3df92b32012-05-27 22:36:56 +02004218 id = hdev->id;
4219
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004220 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004222 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
4224 hci_dev_do_close(hdev);
4225
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304226 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004227 kfree_skb(hdev->reassembly[i]);
4228
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004229 cancel_work_sync(&hdev->power_on);
4230
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004231 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004232 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4233 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004234 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004235 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004236 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004237 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004238
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004239 /* mgmt_index_removed should take care of emptying the
4240 * pending list */
4241 BUG_ON(!list_empty(&hdev->mgmt_pending));
4242
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 hci_notify(hdev, HCI_DEV_UNREG);
4244
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004245 if (hdev->rfkill) {
4246 rfkill_unregister(hdev->rfkill);
4247 rfkill_destroy(hdev->rfkill);
4248 }
4249
Johan Hedberg711eafe2014-08-08 09:32:52 +03004250 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004251
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004252 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004253
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004254 debugfs_remove_recursive(hdev->debugfs);
4255
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004256 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004257 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004258
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004259 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004260 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004261 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004262 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004263 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004264 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004265 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004266 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004267 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004268 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01004269 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004270 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004271
David Herrmanndc946bd2012-01-07 15:47:24 +01004272 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004273
4274 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275}
4276EXPORT_SYMBOL(hci_unregister_dev);
4277
4278/* Suspend HCI device */
4279int hci_suspend_dev(struct hci_dev *hdev)
4280{
4281 hci_notify(hdev, HCI_DEV_SUSPEND);
4282 return 0;
4283}
4284EXPORT_SYMBOL(hci_suspend_dev);
4285
4286/* Resume HCI device */
4287int hci_resume_dev(struct hci_dev *hdev)
4288{
4289 hci_notify(hdev, HCI_DEV_RESUME);
4290 return 0;
4291}
4292EXPORT_SYMBOL(hci_resume_dev);
4293
Marcel Holtmann75e05692014-11-02 08:15:38 +01004294/* Reset HCI device */
4295int hci_reset_dev(struct hci_dev *hdev)
4296{
4297 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4298 struct sk_buff *skb;
4299
4300 skb = bt_skb_alloc(3, GFP_ATOMIC);
4301 if (!skb)
4302 return -ENOMEM;
4303
4304 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4305 memcpy(skb_put(skb, 3), hw_err, 3);
4306
4307 /* Send Hardware Error to upper stack */
4308 return hci_recv_frame(hdev, skb);
4309}
4310EXPORT_SYMBOL(hci_reset_dev);
4311
Marcel Holtmann76bca882009-11-18 00:40:39 +01004312/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004313int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004314{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004315 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004316 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004317 kfree_skb(skb);
4318 return -ENXIO;
4319 }
4320
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004321 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004322 bt_cb(skb)->incoming = 1;
4323
4324 /* Time stamp */
4325 __net_timestamp(skb);
4326
Marcel Holtmann76bca882009-11-18 00:40:39 +01004327 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004328 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004329
Marcel Holtmann76bca882009-11-18 00:40:39 +01004330 return 0;
4331}
4332EXPORT_SYMBOL(hci_recv_frame);
4333
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304334static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004335 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304336{
4337 int len = 0;
4338 int hlen = 0;
4339 int remain = count;
4340 struct sk_buff *skb;
4341 struct bt_skb_cb *scb;
4342
4343 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004344 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304345 return -EILSEQ;
4346
4347 skb = hdev->reassembly[index];
4348
4349 if (!skb) {
4350 switch (type) {
4351 case HCI_ACLDATA_PKT:
4352 len = HCI_MAX_FRAME_SIZE;
4353 hlen = HCI_ACL_HDR_SIZE;
4354 break;
4355 case HCI_EVENT_PKT:
4356 len = HCI_MAX_EVENT_SIZE;
4357 hlen = HCI_EVENT_HDR_SIZE;
4358 break;
4359 case HCI_SCODATA_PKT:
4360 len = HCI_MAX_SCO_SIZE;
4361 hlen = HCI_SCO_HDR_SIZE;
4362 break;
4363 }
4364
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004365 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304366 if (!skb)
4367 return -ENOMEM;
4368
4369 scb = (void *) skb->cb;
4370 scb->expect = hlen;
4371 scb->pkt_type = type;
4372
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304373 hdev->reassembly[index] = skb;
4374 }
4375
4376 while (count) {
4377 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004378 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304379
4380 memcpy(skb_put(skb, len), data, len);
4381
4382 count -= len;
4383 data += len;
4384 scb->expect -= len;
4385 remain = count;
4386
4387 switch (type) {
4388 case HCI_EVENT_PKT:
4389 if (skb->len == HCI_EVENT_HDR_SIZE) {
4390 struct hci_event_hdr *h = hci_event_hdr(skb);
4391 scb->expect = h->plen;
4392
4393 if (skb_tailroom(skb) < scb->expect) {
4394 kfree_skb(skb);
4395 hdev->reassembly[index] = NULL;
4396 return -ENOMEM;
4397 }
4398 }
4399 break;
4400
4401 case HCI_ACLDATA_PKT:
4402 if (skb->len == HCI_ACL_HDR_SIZE) {
4403 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4404 scb->expect = __le16_to_cpu(h->dlen);
4405
4406 if (skb_tailroom(skb) < scb->expect) {
4407 kfree_skb(skb);
4408 hdev->reassembly[index] = NULL;
4409 return -ENOMEM;
4410 }
4411 }
4412 break;
4413
4414 case HCI_SCODATA_PKT:
4415 if (skb->len == HCI_SCO_HDR_SIZE) {
4416 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4417 scb->expect = h->dlen;
4418
4419 if (skb_tailroom(skb) < scb->expect) {
4420 kfree_skb(skb);
4421 hdev->reassembly[index] = NULL;
4422 return -ENOMEM;
4423 }
4424 }
4425 break;
4426 }
4427
4428 if (scb->expect == 0) {
4429 /* Complete frame */
4430
4431 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004432 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304433
4434 hdev->reassembly[index] = NULL;
4435 return remain;
4436 }
4437 }
4438
4439 return remain;
4440}
4441
Suraj Sumangala99811512010-07-14 13:02:19 +05304442#define STREAM_REASSEMBLY 0
4443
4444int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4445{
4446 int type;
4447 int rem = 0;
4448
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004449 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304450 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4451
4452 if (!skb) {
4453 struct { char type; } *pkt;
4454
4455 /* Start of the frame */
4456 pkt = data;
4457 type = pkt->type;
4458
4459 data++;
4460 count--;
4461 } else
4462 type = bt_cb(skb)->pkt_type;
4463
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004464 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004465 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304466 if (rem < 0)
4467 return rem;
4468
4469 data += (count - rem);
4470 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004471 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304472
4473 return rem;
4474}
4475EXPORT_SYMBOL(hci_recv_stream_fragment);
4476
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477/* ---- Interface to upper protocols ---- */
4478
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479int hci_register_cb(struct hci_cb *cb)
4480{
4481 BT_DBG("%p name %s", cb, cb->name);
4482
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004483 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004485 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486
4487 return 0;
4488}
4489EXPORT_SYMBOL(hci_register_cb);
4490
4491int hci_unregister_cb(struct hci_cb *cb)
4492{
4493 BT_DBG("%p name %s", cb, cb->name);
4494
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004495 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004497 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498
4499 return 0;
4500}
4501EXPORT_SYMBOL(hci_unregister_cb);
4502
Marcel Holtmann51086992013-10-10 14:54:19 -07004503static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004505 int err;
4506
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004507 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004509 /* Time stamp */
4510 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004512 /* Send copy to monitor */
4513 hci_send_to_monitor(hdev, skb);
4514
4515 if (atomic_read(&hdev->promisc)) {
4516 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004517 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 }
4519
4520 /* Get rid of skb owner, prior to sending to the driver. */
4521 skb_orphan(skb);
4522
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004523 err = hdev->send(hdev, skb);
4524 if (err < 0) {
4525 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4526 kfree_skb(skb);
4527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528}
4529
Johan Hedberg3119ae92013-03-05 20:37:44 +02004530void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4531{
4532 skb_queue_head_init(&req->cmd_q);
4533 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004534 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004535}
4536
4537int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4538{
4539 struct hci_dev *hdev = req->hdev;
4540 struct sk_buff *skb;
4541 unsigned long flags;
4542
4543 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4544
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004545 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004546 * commands queued on the HCI request queue.
4547 */
4548 if (req->err) {
4549 skb_queue_purge(&req->cmd_q);
4550 return req->err;
4551 }
4552
Johan Hedberg3119ae92013-03-05 20:37:44 +02004553 /* Do not allow empty requests */
4554 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004555 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004556
4557 skb = skb_peek_tail(&req->cmd_q);
4558 bt_cb(skb)->req.complete = complete;
4559
4560 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4561 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4562 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4563
4564 queue_work(hdev->workqueue, &hdev->cmd_work);
4565
4566 return 0;
4567}
4568
Marcel Holtmann899de762014-07-11 05:51:58 +02004569bool hci_req_pending(struct hci_dev *hdev)
4570{
4571 return (hdev->req_status == HCI_REQ_PEND);
4572}
4573
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004574static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004575 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576{
4577 int len = HCI_COMMAND_HDR_SIZE + plen;
4578 struct hci_command_hdr *hdr;
4579 struct sk_buff *skb;
4580
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004582 if (!skb)
4583 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
4585 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004586 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 hdr->plen = plen;
4588
4589 if (plen)
4590 memcpy(skb_put(skb, plen), param, plen);
4591
4592 BT_DBG("skb len %d", skb->len);
4593
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004594 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004595 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004596
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004597 return skb;
4598}
4599
4600/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004601int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4602 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004603{
4604 struct sk_buff *skb;
4605
4606 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4607
4608 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4609 if (!skb) {
4610 BT_ERR("%s no memory for command", hdev->name);
4611 return -ENOMEM;
4612 }
4613
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004614 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004615 * single-command requests.
4616 */
4617 bt_cb(skb)->req.start = true;
4618
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004620 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621
4622 return 0;
4623}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624
Johan Hedberg71c76a12013-03-05 20:37:46 +02004625/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004626void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4627 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004628{
4629 struct hci_dev *hdev = req->hdev;
4630 struct sk_buff *skb;
4631
4632 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4633
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004634 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004635 * queueing the HCI command. We can simply return.
4636 */
4637 if (req->err)
4638 return;
4639
Johan Hedberg71c76a12013-03-05 20:37:46 +02004640 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4641 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004642 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4643 hdev->name, opcode);
4644 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004645 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004646 }
4647
4648 if (skb_queue_empty(&req->cmd_q))
4649 bt_cb(skb)->req.start = true;
4650
Johan Hedberg02350a72013-04-03 21:50:29 +03004651 bt_cb(skb)->req.event = event;
4652
Johan Hedberg71c76a12013-03-05 20:37:46 +02004653 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004654}
4655
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004656void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4657 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004658{
4659 hci_req_add_ev(req, opcode, plen, param, 0);
4660}
4661
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004663void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664{
4665 struct hci_command_hdr *hdr;
4666
4667 if (!hdev->sent_cmd)
4668 return NULL;
4669
4670 hdr = (void *) hdev->sent_cmd->data;
4671
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004672 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 return NULL;
4674
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004675 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676
4677 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4678}
4679
4680/* Send ACL data */
4681static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4682{
4683 struct hci_acl_hdr *hdr;
4684 int len = skb->len;
4685
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004686 skb_push(skb, HCI_ACL_HDR_SIZE);
4687 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004688 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004689 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4690 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691}
4692
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004693static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004694 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004696 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 struct hci_dev *hdev = conn->hdev;
4698 struct sk_buff *list;
4699
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004700 skb->len = skb_headlen(skb);
4701 skb->data_len = 0;
4702
4703 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004704
4705 switch (hdev->dev_type) {
4706 case HCI_BREDR:
4707 hci_add_acl_hdr(skb, conn->handle, flags);
4708 break;
4709 case HCI_AMP:
4710 hci_add_acl_hdr(skb, chan->handle, flags);
4711 break;
4712 default:
4713 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4714 return;
4715 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004716
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004717 list = skb_shinfo(skb)->frag_list;
4718 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 /* Non fragmented */
4720 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4721
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004722 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 } else {
4724 /* Fragmented */
4725 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4726
4727 skb_shinfo(skb)->frag_list = NULL;
4728
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004729 /* Queue all fragments atomically. We need to use spin_lock_bh
4730 * here because of 6LoWPAN links, as there this function is
4731 * called from softirq and using normal spin lock could cause
4732 * deadlocks.
4733 */
4734 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004736 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004737
4738 flags &= ~ACL_START;
4739 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 do {
4741 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004742
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004743 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004744 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
4746 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4747
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004748 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 } while (list);
4750
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004751 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004753}
4754
4755void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4756{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004757 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004758
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004759 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004760
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004761 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004763 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765
4766/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004767void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768{
4769 struct hci_dev *hdev = conn->hdev;
4770 struct hci_sco_hdr hdr;
4771
4772 BT_DBG("%s len %d", hdev->name, skb->len);
4773
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004774 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 hdr.dlen = skb->len;
4776
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004777 skb_push(skb, HCI_SCO_HDR_SIZE);
4778 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004779 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004781 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004782
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004784 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786
4787/* ---- HCI TX task (outgoing data) ---- */
4788
4789/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004790static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4791 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792{
4793 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004794 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004795 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004797 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004799
4800 rcu_read_lock();
4801
4802 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004803 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004805
4806 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4807 continue;
4808
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 num++;
4810
4811 if (c->sent < min) {
4812 min = c->sent;
4813 conn = c;
4814 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004815
4816 if (hci_conn_num(hdev, type) == num)
4817 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818 }
4819
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004820 rcu_read_unlock();
4821
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004823 int cnt, q;
4824
4825 switch (conn->type) {
4826 case ACL_LINK:
4827 cnt = hdev->acl_cnt;
4828 break;
4829 case SCO_LINK:
4830 case ESCO_LINK:
4831 cnt = hdev->sco_cnt;
4832 break;
4833 case LE_LINK:
4834 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4835 break;
4836 default:
4837 cnt = 0;
4838 BT_ERR("Unknown link type");
4839 }
4840
4841 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842 *quote = q ? q : 1;
4843 } else
4844 *quote = 0;
4845
4846 BT_DBG("conn %p quote %d", conn, *quote);
4847 return conn;
4848}
4849
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004850static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851{
4852 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004853 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854
Ville Tervobae1f5d92011-02-10 22:38:53 -03004855 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004857 rcu_read_lock();
4858
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004860 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004861 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004862 BT_ERR("%s killing stalled connection %pMR",
4863 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004864 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865 }
4866 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004867
4868 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869}
4870
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004871static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4872 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004873{
4874 struct hci_conn_hash *h = &hdev->conn_hash;
4875 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004876 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004877 struct hci_conn *conn;
4878 int cnt, q, conn_num = 0;
4879
4880 BT_DBG("%s", hdev->name);
4881
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004882 rcu_read_lock();
4883
4884 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004885 struct hci_chan *tmp;
4886
4887 if (conn->type != type)
4888 continue;
4889
4890 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4891 continue;
4892
4893 conn_num++;
4894
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004895 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004896 struct sk_buff *skb;
4897
4898 if (skb_queue_empty(&tmp->data_q))
4899 continue;
4900
4901 skb = skb_peek(&tmp->data_q);
4902 if (skb->priority < cur_prio)
4903 continue;
4904
4905 if (skb->priority > cur_prio) {
4906 num = 0;
4907 min = ~0;
4908 cur_prio = skb->priority;
4909 }
4910
4911 num++;
4912
4913 if (conn->sent < min) {
4914 min = conn->sent;
4915 chan = tmp;
4916 }
4917 }
4918
4919 if (hci_conn_num(hdev, type) == conn_num)
4920 break;
4921 }
4922
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004923 rcu_read_unlock();
4924
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004925 if (!chan)
4926 return NULL;
4927
4928 switch (chan->conn->type) {
4929 case ACL_LINK:
4930 cnt = hdev->acl_cnt;
4931 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004932 case AMP_LINK:
4933 cnt = hdev->block_cnt;
4934 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004935 case SCO_LINK:
4936 case ESCO_LINK:
4937 cnt = hdev->sco_cnt;
4938 break;
4939 case LE_LINK:
4940 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4941 break;
4942 default:
4943 cnt = 0;
4944 BT_ERR("Unknown link type");
4945 }
4946
4947 q = cnt / num;
4948 *quote = q ? q : 1;
4949 BT_DBG("chan %p quote %d", chan, *quote);
4950 return chan;
4951}
4952
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004953static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4954{
4955 struct hci_conn_hash *h = &hdev->conn_hash;
4956 struct hci_conn *conn;
4957 int num = 0;
4958
4959 BT_DBG("%s", hdev->name);
4960
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004961 rcu_read_lock();
4962
4963 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004964 struct hci_chan *chan;
4965
4966 if (conn->type != type)
4967 continue;
4968
4969 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4970 continue;
4971
4972 num++;
4973
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004974 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004975 struct sk_buff *skb;
4976
4977 if (chan->sent) {
4978 chan->sent = 0;
4979 continue;
4980 }
4981
4982 if (skb_queue_empty(&chan->data_q))
4983 continue;
4984
4985 skb = skb_peek(&chan->data_q);
4986 if (skb->priority >= HCI_PRIO_MAX - 1)
4987 continue;
4988
4989 skb->priority = HCI_PRIO_MAX - 1;
4990
4991 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004992 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004993 }
4994
4995 if (hci_conn_num(hdev, type) == num)
4996 break;
4997 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004998
4999 rcu_read_unlock();
5000
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005001}
5002
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005003static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
5004{
5005 /* Calculate count of blocks used by this packet */
5006 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
5007}
5008
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005009static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010{
Marcel Holtmann4a964402014-07-02 19:10:33 +02005011 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012 /* ACL tx timeout must be longer than maximum
5013 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005014 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005015 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005016 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005018}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005020static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005021{
5022 unsigned int cnt = hdev->acl_cnt;
5023 struct hci_chan *chan;
5024 struct sk_buff *skb;
5025 int quote;
5026
5027 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005028
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005029 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005030 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005031 u32 priority = (skb_peek(&chan->data_q))->priority;
5032 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005033 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005034 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005035
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005036 /* Stop if priority has changed */
5037 if (skb->priority < priority)
5038 break;
5039
5040 skb = skb_dequeue(&chan->data_q);
5041
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005042 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005043 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005044
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005045 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 hdev->acl_last_tx = jiffies;
5047
5048 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005049 chan->sent++;
5050 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 }
5052 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005053
5054 if (cnt != hdev->acl_cnt)
5055 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056}
5057
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005058static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005059{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005060 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005061 struct hci_chan *chan;
5062 struct sk_buff *skb;
5063 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005064 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005065
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005066 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005067
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005068 BT_DBG("%s", hdev->name);
5069
5070 if (hdev->dev_type == HCI_AMP)
5071 type = AMP_LINK;
5072 else
5073 type = ACL_LINK;
5074
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005075 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005076 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005077 u32 priority = (skb_peek(&chan->data_q))->priority;
5078 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5079 int blocks;
5080
5081 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005082 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005083
5084 /* Stop if priority has changed */
5085 if (skb->priority < priority)
5086 break;
5087
5088 skb = skb_dequeue(&chan->data_q);
5089
5090 blocks = __get_blocks(hdev, skb);
5091 if (blocks > hdev->block_cnt)
5092 return;
5093
5094 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005095 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005096
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005097 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005098 hdev->acl_last_tx = jiffies;
5099
5100 hdev->block_cnt -= blocks;
5101 quote -= blocks;
5102
5103 chan->sent += blocks;
5104 chan->conn->sent += blocks;
5105 }
5106 }
5107
5108 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005109 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005110}
5111
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005112static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005113{
5114 BT_DBG("%s", hdev->name);
5115
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005116 /* No ACL link over BR/EDR controller */
5117 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5118 return;
5119
5120 /* No AMP link over AMP controller */
5121 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005122 return;
5123
5124 switch (hdev->flow_ctl_mode) {
5125 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5126 hci_sched_acl_pkt(hdev);
5127 break;
5128
5129 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5130 hci_sched_acl_blk(hdev);
5131 break;
5132 }
5133}
5134
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005136static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137{
5138 struct hci_conn *conn;
5139 struct sk_buff *skb;
5140 int quote;
5141
5142 BT_DBG("%s", hdev->name);
5143
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005144 if (!hci_conn_num(hdev, SCO_LINK))
5145 return;
5146
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5148 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5149 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005150 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151
5152 conn->sent++;
5153 if (conn->sent == ~0)
5154 conn->sent = 0;
5155 }
5156 }
5157}
5158
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005159static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005160{
5161 struct hci_conn *conn;
5162 struct sk_buff *skb;
5163 int quote;
5164
5165 BT_DBG("%s", hdev->name);
5166
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005167 if (!hci_conn_num(hdev, ESCO_LINK))
5168 return;
5169
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005170 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5171 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5173 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005174 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005175
5176 conn->sent++;
5177 if (conn->sent == ~0)
5178 conn->sent = 0;
5179 }
5180 }
5181}
5182
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005183static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005184{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005185 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005186 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005187 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005188
5189 BT_DBG("%s", hdev->name);
5190
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005191 if (!hci_conn_num(hdev, LE_LINK))
5192 return;
5193
Marcel Holtmann4a964402014-07-02 19:10:33 +02005194 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005195 /* LE tx timeout must be longer than maximum
5196 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005197 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005198 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005199 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005200 }
5201
5202 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005203 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005204 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005205 u32 priority = (skb_peek(&chan->data_q))->priority;
5206 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005207 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005208 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005209
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005210 /* Stop if priority has changed */
5211 if (skb->priority < priority)
5212 break;
5213
5214 skb = skb_dequeue(&chan->data_q);
5215
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005216 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005217 hdev->le_last_tx = jiffies;
5218
5219 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005220 chan->sent++;
5221 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005222 }
5223 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005224
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005225 if (hdev->le_pkts)
5226 hdev->le_cnt = cnt;
5227 else
5228 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005229
5230 if (cnt != tmp)
5231 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005232}
5233
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005234static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005236 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 struct sk_buff *skb;
5238
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005239 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005240 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241
Marcel Holtmann52de5992013-09-03 18:08:38 -07005242 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5243 /* Schedule queues and send stuff to HCI driver */
5244 hci_sched_acl(hdev);
5245 hci_sched_sco(hdev);
5246 hci_sched_esco(hdev);
5247 hci_sched_le(hdev);
5248 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005249
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250 /* Send next queued raw (unknown type) packet */
5251 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005252 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253}
5254
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005255/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256
5257/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005258static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259{
5260 struct hci_acl_hdr *hdr = (void *) skb->data;
5261 struct hci_conn *conn;
5262 __u16 handle, flags;
5263
5264 skb_pull(skb, HCI_ACL_HDR_SIZE);
5265
5266 handle = __le16_to_cpu(hdr->handle);
5267 flags = hci_flags(handle);
5268 handle = hci_handle(handle);
5269
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005270 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005271 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272
5273 hdev->stat.acl_rx++;
5274
5275 hci_dev_lock(hdev);
5276 conn = hci_conn_hash_lookup_handle(hdev, handle);
5277 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005278
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005280 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005281
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005283 l2cap_recv_acldata(conn, skb, flags);
5284 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005286 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005287 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 }
5289
5290 kfree_skb(skb);
5291}
5292
5293/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005294static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295{
5296 struct hci_sco_hdr *hdr = (void *) skb->data;
5297 struct hci_conn *conn;
5298 __u16 handle;
5299
5300 skb_pull(skb, HCI_SCO_HDR_SIZE);
5301
5302 handle = __le16_to_cpu(hdr->handle);
5303
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005304 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305
5306 hdev->stat.sco_rx++;
5307
5308 hci_dev_lock(hdev);
5309 conn = hci_conn_hash_lookup_handle(hdev, handle);
5310 hci_dev_unlock(hdev);
5311
5312 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005314 sco_recv_scodata(conn, skb);
5315 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005317 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005318 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319 }
5320
5321 kfree_skb(skb);
5322}
5323
Johan Hedberg9238f362013-03-05 20:37:48 +02005324static bool hci_req_is_complete(struct hci_dev *hdev)
5325{
5326 struct sk_buff *skb;
5327
5328 skb = skb_peek(&hdev->cmd_q);
5329 if (!skb)
5330 return true;
5331
5332 return bt_cb(skb)->req.start;
5333}
5334
Johan Hedberg42c6b122013-03-05 20:37:49 +02005335static void hci_resend_last(struct hci_dev *hdev)
5336{
5337 struct hci_command_hdr *sent;
5338 struct sk_buff *skb;
5339 u16 opcode;
5340
5341 if (!hdev->sent_cmd)
5342 return;
5343
5344 sent = (void *) hdev->sent_cmd->data;
5345 opcode = __le16_to_cpu(sent->opcode);
5346 if (opcode == HCI_OP_RESET)
5347 return;
5348
5349 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5350 if (!skb)
5351 return;
5352
5353 skb_queue_head(&hdev->cmd_q, skb);
5354 queue_work(hdev->workqueue, &hdev->cmd_work);
5355}
5356
Johan Hedberg9238f362013-03-05 20:37:48 +02005357void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5358{
5359 hci_req_complete_t req_complete = NULL;
5360 struct sk_buff *skb;
5361 unsigned long flags;
5362
5363 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5364
Johan Hedberg42c6b122013-03-05 20:37:49 +02005365 /* If the completed command doesn't match the last one that was
5366 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005367 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005368 if (!hci_sent_cmd_data(hdev, opcode)) {
5369 /* Some CSR based controllers generate a spontaneous
5370 * reset complete event during init and any pending
5371 * command will never be completed. In such a case we
5372 * need to resend whatever was the last sent
5373 * command.
5374 */
5375 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5376 hci_resend_last(hdev);
5377
Johan Hedberg9238f362013-03-05 20:37:48 +02005378 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005379 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005380
5381 /* If the command succeeded and there's still more commands in
5382 * this request the request is not yet complete.
5383 */
5384 if (!status && !hci_req_is_complete(hdev))
5385 return;
5386
5387 /* If this was the last command in a request the complete
5388 * callback would be found in hdev->sent_cmd instead of the
5389 * command queue (hdev->cmd_q).
5390 */
5391 if (hdev->sent_cmd) {
5392 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005393
5394 if (req_complete) {
5395 /* We must set the complete callback to NULL to
5396 * avoid calling the callback more than once if
5397 * this function gets called again.
5398 */
5399 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5400
Johan Hedberg9238f362013-03-05 20:37:48 +02005401 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005402 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005403 }
5404
5405 /* Remove all pending commands belonging to this request */
5406 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5407 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5408 if (bt_cb(skb)->req.start) {
5409 __skb_queue_head(&hdev->cmd_q, skb);
5410 break;
5411 }
5412
5413 req_complete = bt_cb(skb)->req.complete;
5414 kfree_skb(skb);
5415 }
5416 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5417
5418call_complete:
5419 if (req_complete)
5420 req_complete(hdev, status);
5421}
5422
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005423static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005425 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426 struct sk_buff *skb;
5427
5428 BT_DBG("%s", hdev->name);
5429
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005431 /* Send copy to monitor */
5432 hci_send_to_monitor(hdev, skb);
5433
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434 if (atomic_read(&hdev->promisc)) {
5435 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005436 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437 }
5438
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005439 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 kfree_skb(skb);
5441 continue;
5442 }
5443
5444 if (test_bit(HCI_INIT, &hdev->flags)) {
5445 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005446 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 case HCI_ACLDATA_PKT:
5448 case HCI_SCODATA_PKT:
5449 kfree_skb(skb);
5450 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452 }
5453
5454 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005455 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005457 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 hci_event_packet(hdev, skb);
5459 break;
5460
5461 case HCI_ACLDATA_PKT:
5462 BT_DBG("%s ACL data packet", hdev->name);
5463 hci_acldata_packet(hdev, skb);
5464 break;
5465
5466 case HCI_SCODATA_PKT:
5467 BT_DBG("%s SCO data packet", hdev->name);
5468 hci_scodata_packet(hdev, skb);
5469 break;
5470
5471 default:
5472 kfree_skb(skb);
5473 break;
5474 }
5475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476}
5477
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005478static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005480 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005481 struct sk_buff *skb;
5482
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005483 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5484 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485
Linus Torvalds1da177e2005-04-16 15:20:36 -07005486 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005487 if (atomic_read(&hdev->cmd_cnt)) {
5488 skb = skb_dequeue(&hdev->cmd_q);
5489 if (!skb)
5490 return;
5491
Wei Yongjun7585b972009-02-25 18:29:52 +08005492 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005493
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005494 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005495 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005497 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005498 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005499 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005500 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005501 schedule_delayed_work(&hdev->cmd_timer,
5502 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503 } else {
5504 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005505 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 }
5507 }
5508}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005509
5510void hci_req_add_le_scan_disable(struct hci_request *req)
5511{
5512 struct hci_cp_le_set_scan_enable cp;
5513
5514 memset(&cp, 0, sizeof(cp));
5515 cp.enable = LE_SCAN_DISABLE;
5516 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5517}
Andre Guedesa4790db2014-02-26 20:21:47 -03005518
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005519static void add_to_white_list(struct hci_request *req,
5520 struct hci_conn_params *params)
5521{
5522 struct hci_cp_le_add_to_white_list cp;
5523
5524 cp.bdaddr_type = params->addr_type;
5525 bacpy(&cp.bdaddr, &params->addr);
5526
5527 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5528}
5529
5530static u8 update_white_list(struct hci_request *req)
5531{
5532 struct hci_dev *hdev = req->hdev;
5533 struct hci_conn_params *params;
5534 struct bdaddr_list *b;
5535 uint8_t white_list_entries = 0;
5536
5537 /* Go through the current white list programmed into the
5538 * controller one by one and check if that address is still
5539 * in the list of pending connections or list of devices to
5540 * report. If not present in either list, then queue the
5541 * command to remove it from the controller.
5542 */
5543 list_for_each_entry(b, &hdev->le_white_list, list) {
5544 struct hci_cp_le_del_from_white_list cp;
5545
5546 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5547 &b->bdaddr, b->bdaddr_type) ||
5548 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5549 &b->bdaddr, b->bdaddr_type)) {
5550 white_list_entries++;
5551 continue;
5552 }
5553
5554 cp.bdaddr_type = b->bdaddr_type;
5555 bacpy(&cp.bdaddr, &b->bdaddr);
5556
5557 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5558 sizeof(cp), &cp);
5559 }
5560
5561 /* Since all no longer valid white list entries have been
5562 * removed, walk through the list of pending connections
5563 * and ensure that any new device gets programmed into
5564 * the controller.
5565 *
5566 * If the list of the devices is larger than the list of
5567 * available white list entries in the controller, then
5568 * just abort and return filer policy value to not use the
5569 * white list.
5570 */
5571 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5572 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5573 &params->addr, params->addr_type))
5574 continue;
5575
5576 if (white_list_entries >= hdev->le_white_list_size) {
5577 /* Select filter policy to accept all advertising */
5578 return 0x00;
5579 }
5580
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005581 if (hci_find_irk_by_addr(hdev, &params->addr,
5582 params->addr_type)) {
5583 /* White list can not be used with RPAs */
5584 return 0x00;
5585 }
5586
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005587 white_list_entries++;
5588 add_to_white_list(req, params);
5589 }
5590
5591 /* After adding all new pending connections, walk through
5592 * the list of pending reports and also add these to the
5593 * white list if there is still space.
5594 */
5595 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5596 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5597 &params->addr, params->addr_type))
5598 continue;
5599
5600 if (white_list_entries >= hdev->le_white_list_size) {
5601 /* Select filter policy to accept all advertising */
5602 return 0x00;
5603 }
5604
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005605 if (hci_find_irk_by_addr(hdev, &params->addr,
5606 params->addr_type)) {
5607 /* White list can not be used with RPAs */
5608 return 0x00;
5609 }
5610
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005611 white_list_entries++;
5612 add_to_white_list(req, params);
5613 }
5614
5615 /* Select filter policy to use white list */
5616 return 0x01;
5617}
5618
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005619void hci_req_add_le_passive_scan(struct hci_request *req)
5620{
5621 struct hci_cp_le_set_scan_param param_cp;
5622 struct hci_cp_le_set_scan_enable enable_cp;
5623 struct hci_dev *hdev = req->hdev;
5624 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005625 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005626
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005627 /* Set require_privacy to false since no SCAN_REQ are send
5628 * during passive scanning. Not using an unresolvable address
5629 * here is important so that peer devices using direct
5630 * advertising with our address will be correctly reported
5631 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005632 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005633 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005634 return;
5635
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005636 /* Adding or removing entries from the white list must
5637 * happen before enabling scanning. The controller does
5638 * not allow white list modification while scanning.
5639 */
5640 filter_policy = update_white_list(req);
5641
Marcel Holtmann4efbb2c2014-12-05 16:20:14 +01005642 /* When the controller is using random resolvable addresses and
5643 * with that having LE privacy enabled, then controllers with
5644 * Extended Scanner Filter Policies support can now enable support
5645 * for handling directed advertising.
5646 *
5647 * So instead of using filter polices 0x00 (no whitelist)
5648 * and 0x01 (whitelist enabled) use the new filter policies
5649 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5650 */
5651 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5652 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5653 filter_policy |= 0x02;
5654
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005655 memset(&param_cp, 0, sizeof(param_cp));
5656 param_cp.type = LE_SCAN_PASSIVE;
5657 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5658 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5659 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005660 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005661 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5662 &param_cp);
5663
5664 memset(&enable_cp, 0, sizeof(enable_cp));
5665 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005666 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005667 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5668 &enable_cp);
5669}
5670
Andre Guedesa4790db2014-02-26 20:21:47 -03005671static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5672{
5673 if (status)
5674 BT_DBG("HCI request failed to update background scanning: "
5675 "status 0x%2.2x", status);
5676}
5677
5678/* This function controls the background scanning based on hdev->pend_le_conns
5679 * list. If there are pending LE connection we start the background scanning,
5680 * otherwise we stop it.
5681 *
5682 * This function requires the caller holds hdev->lock.
5683 */
5684void hci_update_background_scan(struct hci_dev *hdev)
5685{
Andre Guedesa4790db2014-02-26 20:21:47 -03005686 struct hci_request req;
5687 struct hci_conn *conn;
5688 int err;
5689
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005690 if (!test_bit(HCI_UP, &hdev->flags) ||
5691 test_bit(HCI_INIT, &hdev->flags) ||
5692 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005693 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005694 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005695 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005696 return;
5697
Johan Hedberga70f4b52014-07-07 15:19:50 +03005698 /* No point in doing scanning if LE support hasn't been enabled */
5699 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5700 return;
5701
Johan Hedbergae23ada2014-07-07 13:24:59 +03005702 /* If discovery is active don't interfere with it */
5703 if (hdev->discovery.state != DISCOVERY_STOPPED)
5704 return;
5705
Marcel Holtmannee3c3ca2014-12-05 11:45:23 +01005706 /* Reset RSSI and UUID filters when starting background scanning
5707 * since these filters are meant for service discovery only.
5708 *
5709 * The Start Discovery and Start Service Discovery operations
5710 * ensure to set proper values for RSSI threshold and UUID
5711 * filter list. So it is safe to just reset them here.
5712 */
5713 hci_discovery_filter_clear(hdev);
5714
Andre Guedesa4790db2014-02-26 20:21:47 -03005715 hci_req_init(&req, hdev);
5716
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005717 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005718 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005719 /* If there is no pending LE connections or devices
5720 * to be scanned for, we should stop the background
5721 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005722 */
5723
5724 /* If controller is not scanning we are done. */
5725 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5726 return;
5727
5728 hci_req_add_le_scan_disable(&req);
5729
5730 BT_DBG("%s stopping background scanning", hdev->name);
5731 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005732 /* If there is at least one pending LE connection, we should
5733 * keep the background scan running.
5734 */
5735
Andre Guedesa4790db2014-02-26 20:21:47 -03005736 /* If controller is connecting, we should not start scanning
5737 * since some controllers are not able to scan and connect at
5738 * the same time.
5739 */
5740 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5741 if (conn)
5742 return;
5743
Andre Guedes4340a122014-03-10 18:26:24 -03005744 /* If controller is currently scanning, we stop it to ensure we
5745 * don't miss any advertising (due to duplicates filter).
5746 */
5747 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5748 hci_req_add_le_scan_disable(&req);
5749
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005750 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005751
5752 BT_DBG("%s starting background scanning", hdev->name);
5753 }
5754
5755 err = hci_req_run(&req, update_background_scan_complete);
5756 if (err)
5757 BT_ERR("Failed to run HCI request: err %d", err);
5758}
Johan Hedberg432df052014-08-01 11:13:31 +03005759
Johan Hedberg22f433d2014-08-01 11:13:32 +03005760static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5761{
5762 struct bdaddr_list *b;
5763
5764 list_for_each_entry(b, &hdev->whitelist, list) {
5765 struct hci_conn *conn;
5766
5767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5768 if (!conn)
5769 return true;
5770
5771 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5772 return true;
5773 }
5774
5775 return false;
5776}
5777
Johan Hedberg432df052014-08-01 11:13:31 +03005778void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5779{
5780 u8 scan;
5781
5782 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5783 return;
5784
5785 if (!hdev_is_powered(hdev))
5786 return;
5787
5788 if (mgmt_powering_down(hdev))
5789 return;
5790
5791 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005792 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005793 scan = SCAN_PAGE;
5794 else
5795 scan = SCAN_DISABLED;
5796
5797 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5798 return;
5799
5800 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5801 scan |= SCAN_INQUIRY;
5802
5803 if (req)
5804 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5805 else
5806 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5807}