blob: 96e0acc3fc92b8235cdf3b05e5079c5b7161ad6d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Johan Hedberg66593582014-07-09 12:59:14 +0300194static int whitelist_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
203
204 return 0;
205}
206
207static int whitelist_open(struct inode *inode, struct file *file)
208{
209 return single_open(file, whitelist_show, inode->i_private);
210}
211
212static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
214 .read = seq_read,
215 .llseek = seq_lseek,
216 .release = single_release,
217};
218
Marcel Holtmann47219832013-10-17 17:24:15 -0700219static int uuids_show(struct seq_file *f, void *p)
220{
221 struct hci_dev *hdev = f->private;
222 struct bt_uuid *uuid;
223
224 hci_dev_lock(hdev);
225 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700226 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700227
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700228 /* The Bluetooth UUID values are stored in big endian,
229 * but with reversed byte order. So convert them into
230 * the right order for the %pUb modifier.
231 */
232 for (i = 0; i < 16; i++)
233 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700234
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700236 }
237 hci_dev_unlock(hdev);
238
239 return 0;
240}
241
242static int uuids_open(struct inode *inode, struct file *file)
243{
244 return single_open(file, uuids_show, inode->i_private);
245}
246
247static const struct file_operations uuids_fops = {
248 .open = uuids_open,
249 .read = seq_read,
250 .llseek = seq_lseek,
251 .release = single_release,
252};
253
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700254static int inquiry_cache_show(struct seq_file *f, void *p)
255{
256 struct hci_dev *hdev = f->private;
257 struct discovery_state *cache = &hdev->discovery;
258 struct inquiry_entry *e;
259
260 hci_dev_lock(hdev);
261
262 list_for_each_entry(e, &cache->all, all) {
263 struct inquiry_data *data = &e->data;
264 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
265 &data->bdaddr,
266 data->pscan_rep_mode, data->pscan_period_mode,
267 data->pscan_mode, data->dev_class[2],
268 data->dev_class[1], data->dev_class[0],
269 __le16_to_cpu(data->clock_offset),
270 data->rssi, data->ssp_mode, e->timestamp);
271 }
272
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278static int inquiry_cache_open(struct inode *inode, struct file *file)
279{
280 return single_open(file, inquiry_cache_show, inode->i_private);
281}
282
283static const struct file_operations inquiry_cache_fops = {
284 .open = inquiry_cache_open,
285 .read = seq_read,
286 .llseek = seq_lseek,
287 .release = single_release,
288};
289
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700290static int link_keys_show(struct seq_file *f, void *ptr)
291{
292 struct hci_dev *hdev = f->private;
293 struct list_head *p, *n;
294
295 hci_dev_lock(hdev);
296 list_for_each_safe(p, n, &hdev->link_keys) {
297 struct link_key *key = list_entry(p, struct link_key, list);
298 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
300 }
301 hci_dev_unlock(hdev);
302
303 return 0;
304}
305
306static int link_keys_open(struct inode *inode, struct file *file)
307{
308 return single_open(file, link_keys_show, inode->i_private);
309}
310
311static const struct file_operations link_keys_fops = {
312 .open = link_keys_open,
313 .read = seq_read,
314 .llseek = seq_lseek,
315 .release = single_release,
316};
317
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700318static int dev_class_show(struct seq_file *f, void *ptr)
319{
320 struct hci_dev *hdev = f->private;
321
322 hci_dev_lock(hdev);
323 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324 hdev->dev_class[1], hdev->dev_class[0]);
325 hci_dev_unlock(hdev);
326
327 return 0;
328}
329
330static int dev_class_open(struct inode *inode, struct file *file)
331{
332 return single_open(file, dev_class_show, inode->i_private);
333}
334
335static const struct file_operations dev_class_fops = {
336 .open = dev_class_open,
337 .read = seq_read,
338 .llseek = seq_lseek,
339 .release = single_release,
340};
341
Marcel Holtmann041000b2013-10-17 12:02:31 -0700342static int voice_setting_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->voice_setting;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354 NULL, "0x%4.4llx\n");
355
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700356static int auto_accept_delay_set(void *data, u64 val)
357{
358 struct hci_dev *hdev = data;
359
360 hci_dev_lock(hdev);
361 hdev->auto_accept_delay = val;
362 hci_dev_unlock(hdev);
363
364 return 0;
365}
366
367static int auto_accept_delay_get(void *data, u64 *val)
368{
369 struct hci_dev *hdev = data;
370
371 hci_dev_lock(hdev);
372 *val = hdev->auto_accept_delay;
373 hci_dev_unlock(hdev);
374
375 return 0;
376}
377
378DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379 auto_accept_delay_set, "%llu\n");
380
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800381static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382 size_t count, loff_t *ppos)
383{
384 struct hci_dev *hdev = file->private_data;
385 char buf[3];
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800388 buf[1] = '\n';
389 buf[2] = '\0';
390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
391}
392
393static ssize_t force_sc_support_write(struct file *file,
394 const char __user *user_buf,
395 size_t count, loff_t *ppos)
396{
397 struct hci_dev *hdev = file->private_data;
398 char buf[32];
399 size_t buf_size = min(count, (sizeof(buf)-1));
400 bool enable;
401
402 if (test_bit(HCI_UP, &hdev->flags))
403 return -EBUSY;
404
405 if (copy_from_user(buf, user_buf, buf_size))
406 return -EFAULT;
407
408 buf[buf_size] = '\0';
409 if (strtobool(buf, &enable))
410 return -EINVAL;
411
Marcel Holtmann111902f2014-06-21 04:53:17 +0200412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800413 return -EALREADY;
414
Marcel Holtmann111902f2014-06-21 04:53:17 +0200415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800416
417 return count;
418}
419
420static const struct file_operations force_sc_support_fops = {
421 .open = simple_open,
422 .read = force_sc_support_read,
423 .write = force_sc_support_write,
424 .llseek = default_llseek,
425};
426
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800427static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428 size_t count, loff_t *ppos)
429{
430 struct hci_dev *hdev = file->private_data;
431 char buf[3];
432
433 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
434 buf[1] = '\n';
435 buf[2] = '\0';
436 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
437}
438
439static const struct file_operations sc_only_mode_fops = {
440 .open = simple_open,
441 .read = sc_only_mode_read,
442 .llseek = default_llseek,
443};
444
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700445static int idle_timeout_set(void *data, u64 val)
446{
447 struct hci_dev *hdev = data;
448
449 if (val != 0 && (val < 500 || val > 3600000))
450 return -EINVAL;
451
452 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700453 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454 hci_dev_unlock(hdev);
455
456 return 0;
457}
458
459static int idle_timeout_get(void *data, u64 *val)
460{
461 struct hci_dev *hdev = data;
462
463 hci_dev_lock(hdev);
464 *val = hdev->idle_timeout;
465 hci_dev_unlock(hdev);
466
467 return 0;
468}
469
470DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471 idle_timeout_set, "%llu\n");
472
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200473static int rpa_timeout_set(void *data, u64 val)
474{
475 struct hci_dev *hdev = data;
476
477 /* Require the RPA timeout to be at least 30 seconds and at most
478 * 24 hours.
479 */
480 if (val < 30 || val > (60 * 60 * 24))
481 return -EINVAL;
482
483 hci_dev_lock(hdev);
484 hdev->rpa_timeout = val;
485 hci_dev_unlock(hdev);
486
487 return 0;
488}
489
490static int rpa_timeout_get(void *data, u64 *val)
491{
492 struct hci_dev *hdev = data;
493
494 hci_dev_lock(hdev);
495 *val = hdev->rpa_timeout;
496 hci_dev_unlock(hdev);
497
498 return 0;
499}
500
501DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502 rpa_timeout_set, "%llu\n");
503
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504static int sniff_min_interval_set(void *data, u64 val)
505{
506 struct hci_dev *hdev = data;
507
508 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
509 return -EINVAL;
510
511 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700512 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513 hci_dev_unlock(hdev);
514
515 return 0;
516}
517
518static int sniff_min_interval_get(void *data, u64 *val)
519{
520 struct hci_dev *hdev = data;
521
522 hci_dev_lock(hdev);
523 *val = hdev->sniff_min_interval;
524 hci_dev_unlock(hdev);
525
526 return 0;
527}
528
529DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530 sniff_min_interval_set, "%llu\n");
531
532static int sniff_max_interval_set(void *data, u64 val)
533{
534 struct hci_dev *hdev = data;
535
536 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
537 return -EINVAL;
538
539 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700540 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700541 hci_dev_unlock(hdev);
542
543 return 0;
544}
545
546static int sniff_max_interval_get(void *data, u64 *val)
547{
548 struct hci_dev *hdev = data;
549
550 hci_dev_lock(hdev);
551 *val = hdev->sniff_max_interval;
552 hci_dev_unlock(hdev);
553
554 return 0;
555}
556
557DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558 sniff_max_interval_set, "%llu\n");
559
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200560static int conn_info_min_age_set(void *data, u64 val)
561{
562 struct hci_dev *hdev = data;
563
564 if (val == 0 || val > hdev->conn_info_max_age)
565 return -EINVAL;
566
567 hci_dev_lock(hdev);
568 hdev->conn_info_min_age = val;
569 hci_dev_unlock(hdev);
570
571 return 0;
572}
573
574static int conn_info_min_age_get(void *data, u64 *val)
575{
576 struct hci_dev *hdev = data;
577
578 hci_dev_lock(hdev);
579 *val = hdev->conn_info_min_age;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586 conn_info_min_age_set, "%llu\n");
587
588static int conn_info_max_age_set(void *data, u64 val)
589{
590 struct hci_dev *hdev = data;
591
592 if (val == 0 || val < hdev->conn_info_min_age)
593 return -EINVAL;
594
595 hci_dev_lock(hdev);
596 hdev->conn_info_max_age = val;
597 hci_dev_unlock(hdev);
598
599 return 0;
600}
601
602static int conn_info_max_age_get(void *data, u64 *val)
603{
604 struct hci_dev *hdev = data;
605
606 hci_dev_lock(hdev);
607 *val = hdev->conn_info_max_age;
608 hci_dev_unlock(hdev);
609
610 return 0;
611}
612
613DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614 conn_info_max_age_set, "%llu\n");
615
Marcel Holtmannac345812014-02-23 12:44:25 -0800616static int identity_show(struct seq_file *f, void *p)
617{
618 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200619 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800620 u8 addr_type;
621
622 hci_dev_lock(hdev);
623
Johan Hedberga1f4c312014-02-27 14:05:41 +0200624 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800625
Johan Hedberga1f4c312014-02-27 14:05:41 +0200626 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800627 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800628
629 hci_dev_unlock(hdev);
630
631 return 0;
632}
633
634static int identity_open(struct inode *inode, struct file *file)
635{
636 return single_open(file, identity_show, inode->i_private);
637}
638
639static const struct file_operations identity_fops = {
640 .open = identity_open,
641 .read = seq_read,
642 .llseek = seq_lseek,
643 .release = single_release,
644};
645
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800646static int random_address_show(struct seq_file *f, void *p)
647{
648 struct hci_dev *hdev = f->private;
649
650 hci_dev_lock(hdev);
651 seq_printf(f, "%pMR\n", &hdev->random_addr);
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657static int random_address_open(struct inode *inode, struct file *file)
658{
659 return single_open(file, random_address_show, inode->i_private);
660}
661
662static const struct file_operations random_address_fops = {
663 .open = random_address_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667};
668
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700669static int static_address_show(struct seq_file *f, void *p)
670{
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->static_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678}
679
680static int static_address_open(struct inode *inode, struct file *file)
681{
682 return single_open(file, static_address_show, inode->i_private);
683}
684
685static const struct file_operations static_address_fops = {
686 .open = static_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690};
691
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800692static ssize_t force_static_address_read(struct file *file,
693 char __user *user_buf,
694 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700695{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800696 struct hci_dev *hdev = file->private_data;
697 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 buf[1] = '\n';
701 buf[2] = '\0';
702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
703}
704
705static ssize_t force_static_address_write(struct file *file,
706 const char __user *user_buf,
707 size_t count, loff_t *ppos)
708{
709 struct hci_dev *hdev = file->private_data;
710 char buf[32];
711 size_t buf_size = min(count, (sizeof(buf)-1));
712 bool enable;
713
714 if (test_bit(HCI_UP, &hdev->flags))
715 return -EBUSY;
716
717 if (copy_from_user(buf, user_buf, buf_size))
718 return -EFAULT;
719
720 buf[buf_size] = '\0';
721 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700722 return -EINVAL;
723
Marcel Holtmann111902f2014-06-21 04:53:17 +0200724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800725 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700726
Marcel Holtmann111902f2014-06-21 04:53:17 +0200727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800728
729 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700730}
731
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800732static const struct file_operations force_static_address_fops = {
733 .open = simple_open,
734 .read = force_static_address_read,
735 .write = force_static_address_write,
736 .llseek = default_llseek,
737};
Marcel Holtmann92202182013-10-18 16:38:10 -0700738
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800739static int white_list_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct bdaddr_list *b;
743
744 hci_dev_lock(hdev);
745 list_for_each_entry(b, &hdev->le_white_list, list)
746 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747 hci_dev_unlock(hdev);
748
749 return 0;
750}
751
752static int white_list_open(struct inode *inode, struct file *file)
753{
754 return single_open(file, white_list_show, inode->i_private);
755}
756
757static const struct file_operations white_list_fops = {
758 .open = white_list_open,
759 .read = seq_read,
760 .llseek = seq_lseek,
761 .release = single_release,
762};
763
Marcel Holtmann3698d702014-02-18 21:54:49 -0800764static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct list_head *p, *n;
768
769 hci_dev_lock(hdev);
770 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773 &irk->bdaddr, irk->addr_type,
774 16, irk->val, &irk->rpa);
775 }
776 hci_dev_unlock(hdev);
777
778 return 0;
779}
780
781static int identity_resolving_keys_open(struct inode *inode, struct file *file)
782{
783 return single_open(file, identity_resolving_keys_show,
784 inode->i_private);
785}
786
787static const struct file_operations identity_resolving_keys_fops = {
788 .open = identity_resolving_keys_open,
789 .read = seq_read,
790 .llseek = seq_lseek,
791 .release = single_release,
792};
793
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700794static int long_term_keys_show(struct seq_file *f, void *ptr)
795{
796 struct hci_dev *hdev = f->private;
797 struct list_head *p, *n;
798
799 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800800 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700801 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800802 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800805 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700806 }
807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int long_term_keys_open(struct inode *inode, struct file *file)
813{
814 return single_open(file, long_term_keys_show, inode->i_private);
815}
816
817static const struct file_operations long_term_keys_fops = {
818 .open = long_term_keys_open,
819 .read = seq_read,
820 .llseek = seq_lseek,
821 .release = single_release,
822};
823
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700824static int conn_min_interval_set(void *data, u64 val)
825{
826 struct hci_dev *hdev = data;
827
828 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
829 return -EINVAL;
830
831 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700832 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833 hci_dev_unlock(hdev);
834
835 return 0;
836}
837
838static int conn_min_interval_get(void *data, u64 *val)
839{
840 struct hci_dev *hdev = data;
841
842 hci_dev_lock(hdev);
843 *val = hdev->le_conn_min_interval;
844 hci_dev_unlock(hdev);
845
846 return 0;
847}
848
849DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850 conn_min_interval_set, "%llu\n");
851
852static int conn_max_interval_set(void *data, u64 val)
853{
854 struct hci_dev *hdev = data;
855
856 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
857 return -EINVAL;
858
859 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700860 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700861 hci_dev_unlock(hdev);
862
863 return 0;
864}
865
866static int conn_max_interval_get(void *data, u64 *val)
867{
868 struct hci_dev *hdev = data;
869
870 hci_dev_lock(hdev);
871 *val = hdev->le_conn_max_interval;
872 hci_dev_unlock(hdev);
873
874 return 0;
875}
876
877DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878 conn_max_interval_set, "%llu\n");
879
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200880static int conn_latency_set(void *data, u64 val)
881{
882 struct hci_dev *hdev = data;
883
884 if (val > 0x01f3)
885 return -EINVAL;
886
887 hci_dev_lock(hdev);
888 hdev->le_conn_latency = val;
889 hci_dev_unlock(hdev);
890
891 return 0;
892}
893
894static int conn_latency_get(void *data, u64 *val)
895{
896 struct hci_dev *hdev = data;
897
898 hci_dev_lock(hdev);
899 *val = hdev->le_conn_latency;
900 hci_dev_unlock(hdev);
901
902 return 0;
903}
904
905DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906 conn_latency_set, "%llu\n");
907
Marcel Holtmannf1649572014-06-30 12:34:38 +0200908static int supervision_timeout_set(void *data, u64 val)
909{
910 struct hci_dev *hdev = data;
911
912 if (val < 0x000a || val > 0x0c80)
913 return -EINVAL;
914
915 hci_dev_lock(hdev);
916 hdev->le_supv_timeout = val;
917 hci_dev_unlock(hdev);
918
919 return 0;
920}
921
922static int supervision_timeout_get(void *data, u64 *val)
923{
924 struct hci_dev *hdev = data;
925
926 hci_dev_lock(hdev);
927 *val = hdev->le_supv_timeout;
928 hci_dev_unlock(hdev);
929
930 return 0;
931}
932
933DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934 supervision_timeout_set, "%llu\n");
935
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800936static int adv_channel_map_set(void *data, u64 val)
937{
938 struct hci_dev *hdev = data;
939
940 if (val < 0x01 || val > 0x07)
941 return -EINVAL;
942
943 hci_dev_lock(hdev);
944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
946
947 return 0;
948}
949
950static int adv_channel_map_get(void *data, u64 *val)
951{
952 struct hci_dev *hdev = data;
953
954 hci_dev_lock(hdev);
955 *val = hdev->le_adv_channel_map;
956 hci_dev_unlock(hdev);
957
958 return 0;
959}
960
961DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962 adv_channel_map_set, "%llu\n");
963
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200964static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300965{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200966 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300967 struct hci_conn_params *p;
968
969 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300970 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300972 p->auto_connect);
973 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300974 hci_dev_unlock(hdev);
975
976 return 0;
977}
978
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200979static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300980{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200981 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300982}
983
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200984static const struct file_operations device_list_fops = {
985 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300986 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300987 .llseek = seq_lseek,
988 .release = single_release,
989};
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991/* ---- HCI requests ---- */
992
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 if (hdev->req_status == HCI_REQ_PEND) {
998 hdev->req_result = result;
999 hdev->req_status = HCI_REQ_DONE;
1000 wake_up_interruptible(&hdev->req_wait_q);
1001 }
1002}
1003
1004static void hci_req_cancel(struct hci_dev *hdev, int err)
1005{
1006 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1007
1008 if (hdev->req_status == HCI_REQ_PEND) {
1009 hdev->req_result = err;
1010 hdev->req_status = HCI_REQ_CANCELED;
1011 wake_up_interruptible(&hdev->req_wait_q);
1012 }
1013}
1014
Fengguang Wu77a63e02013-04-20 16:24:31 +03001015static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1016 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001017{
1018 struct hci_ev_cmd_complete *ev;
1019 struct hci_event_hdr *hdr;
1020 struct sk_buff *skb;
1021
1022 hci_dev_lock(hdev);
1023
1024 skb = hdev->recv_evt;
1025 hdev->recv_evt = NULL;
1026
1027 hci_dev_unlock(hdev);
1028
1029 if (!skb)
1030 return ERR_PTR(-ENODATA);
1031
1032 if (skb->len < sizeof(*hdr)) {
1033 BT_ERR("Too short HCI event");
1034 goto failed;
1035 }
1036
1037 hdr = (void *) skb->data;
1038 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1039
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001040 if (event) {
1041 if (hdr->evt != event)
1042 goto failed;
1043 return skb;
1044 }
1045
Johan Hedberg75e84b72013-04-02 13:35:04 +03001046 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1048 goto failed;
1049 }
1050
1051 if (skb->len < sizeof(*ev)) {
1052 BT_ERR("Too short cmd_complete event");
1053 goto failed;
1054 }
1055
1056 ev = (void *) skb->data;
1057 skb_pull(skb, sizeof(*ev));
1058
1059 if (opcode == __le16_to_cpu(ev->opcode))
1060 return skb;
1061
1062 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063 __le16_to_cpu(ev->opcode));
1064
1065failed:
1066 kfree_skb(skb);
1067 return ERR_PTR(-ENODATA);
1068}
1069
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001070struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001071 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001072{
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct hci_request req;
1075 int err = 0;
1076
1077 BT_DBG("%s", hdev->name);
1078
1079 hci_req_init(&req, hdev);
1080
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001081 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001082
1083 hdev->req_status = HCI_REQ_PEND;
1084
1085 err = hci_req_run(&req, hci_req_sync_complete);
1086 if (err < 0)
1087 return ERR_PTR(err);
1088
1089 add_wait_queue(&hdev->req_wait_q, &wait);
1090 set_current_state(TASK_INTERRUPTIBLE);
1091
1092 schedule_timeout(timeout);
1093
1094 remove_wait_queue(&hdev->req_wait_q, &wait);
1095
1096 if (signal_pending(current))
1097 return ERR_PTR(-EINTR);
1098
1099 switch (hdev->req_status) {
1100 case HCI_REQ_DONE:
1101 err = -bt_to_errno(hdev->req_result);
1102 break;
1103
1104 case HCI_REQ_CANCELED:
1105 err = -hdev->req_result;
1106 break;
1107
1108 default:
1109 err = -ETIMEDOUT;
1110 break;
1111 }
1112
1113 hdev->req_status = hdev->req_result = 0;
1114
1115 BT_DBG("%s end: err %d", hdev->name, err);
1116
1117 if (err < 0)
1118 return ERR_PTR(err);
1119
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001120 return hci_get_cmd_complete(hdev, opcode, event);
1121}
1122EXPORT_SYMBOL(__hci_cmd_sync_ev);
1123
1124struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001125 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001126{
1127 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001128}
1129EXPORT_SYMBOL(__hci_cmd_sync);
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001132static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133 void (*func)(struct hci_request *req,
1134 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001135 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001137 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 DECLARE_WAITQUEUE(wait, current);
1139 int err = 0;
1140
1141 BT_DBG("%s start", hdev->name);
1142
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143 hci_req_init(&req, hdev);
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 hdev->req_status = HCI_REQ_PEND;
1146
Johan Hedberg42c6b122013-03-05 20:37:49 +02001147 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001148
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149 err = hci_req_run(&req, hci_req_sync_complete);
1150 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001151 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001152
1153 /* ENODATA means the HCI request command queue is empty.
1154 * This can happen when a request with conditionals doesn't
1155 * trigger any commands to be sent. This is normal behavior
1156 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 */
Andre Guedes920c8302013-03-08 11:20:15 -03001158 if (err == -ENODATA)
1159 return 0;
1160
1161 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001162 }
1163
Andre Guedesbc4445c2013-03-08 11:20:13 -03001164 add_wait_queue(&hdev->req_wait_q, &wait);
1165 set_current_state(TASK_INTERRUPTIBLE);
1166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 schedule_timeout(timeout);
1168
1169 remove_wait_queue(&hdev->req_wait_q, &wait);
1170
1171 if (signal_pending(current))
1172 return -EINTR;
1173
1174 switch (hdev->req_status) {
1175 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001176 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 break;
1178
1179 case HCI_REQ_CANCELED:
1180 err = -hdev->req_result;
1181 break;
1182
1183 default:
1184 err = -ETIMEDOUT;
1185 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Johan Hedberga5040ef2011-01-10 13:28:59 +02001188 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190 BT_DBG("%s end: err %d", hdev->name, err);
1191
1192 return err;
1193}
1194
Johan Hedberg01178cd2013-03-05 20:37:41 +02001195static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001196 void (*req)(struct hci_request *req,
1197 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001198 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199{
1200 int ret;
1201
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001202 if (!test_bit(HCI_UP, &hdev->flags))
1203 return -ENETDOWN;
1204
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 /* Serialize all requests */
1206 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 hci_req_unlock(hdev);
1209
1210 return ret;
1211}
1212
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 set_bit(HCI_RESET, &req->hdev->flags);
1219 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001229 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001230 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001231
1232 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234}
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001237{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001238 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001239
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001241 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001242
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001243 /* Read Local Supported Commands */
1244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1245
1246 /* Read Local Supported Features */
1247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1248
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001249 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001251
1252 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001254
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001255 /* Read Flow Control Mode */
1256 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1257
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001258 /* Read Location Data */
1259 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001263{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001265
1266 BT_DBG("%s %ld", hdev->name, opt);
1267
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001268 /* Reset */
1269 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001271
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272 switch (hdev->dev_type) {
1273 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001275 break;
1276
1277 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001279 break;
1280
1281 default:
1282 BT_ERR("Unknown device type %d", hdev->dev_type);
1283 break;
1284 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001285}
1286
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001288{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001289 struct hci_dev *hdev = req->hdev;
1290
Johan Hedberg2177bab2013-03-05 20:37:43 +02001291 __le16 param;
1292 __u8 flt_type;
1293
1294 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296
1297 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299
1300 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302
1303 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001306 /* Read Number of Supported IAC */
1307 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1308
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001309 /* Read Current IAC LAP */
1310 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1311
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312 /* Clear Event Filters */
1313 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001315
1316 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001317 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001320 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321 * but it does not support page scan related HCI commands.
1322 */
1323 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001324 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1326 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001327}
1328
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001330{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001331 struct hci_dev *hdev = req->hdev;
1332
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001334 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335
1336 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001338
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001339 /* Read LE Supported States */
1340 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Clear LE White List */
1349 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001350
1351 /* LE-only controllers have LE implicitly enabled */
1352 if (!lmp_bredr_capable(hdev))
1353 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001354}
1355
1356static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1357{
1358 if (lmp_ext_inq_capable(hdev))
1359 return 0x02;
1360
1361 if (lmp_inq_rssi_capable(hdev))
1362 return 0x01;
1363
1364 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365 hdev->lmp_subver == 0x0757)
1366 return 0x01;
1367
1368 if (hdev->manufacturer == 15) {
1369 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1370 return 0x01;
1371 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1372 return 0x01;
1373 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1374 return 0x01;
1375 }
1376
1377 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378 hdev->lmp_subver == 0x1805)
1379 return 0x01;
1380
1381 return 0x00;
1382}
1383
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001385{
1386 u8 mode;
1387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391}
1392
Johan Hedberg42c6b122013-03-05 20:37:49 +02001393static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001394{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001395 struct hci_dev *hdev = req->hdev;
1396
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397 /* The second byte is 0xff instead of 0x9f (two reserved bits
1398 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399 * command otherwise.
1400 */
1401 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1402
1403 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404 * any event mask for pre 1.2 devices.
1405 */
1406 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1407 return;
1408
1409 if (lmp_bredr_capable(hdev)) {
1410 events[4] |= 0x01; /* Flow Specification Complete */
1411 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413 events[5] |= 0x08; /* Synchronous Connection Complete */
1414 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001415 } else {
1416 /* Use a different default for LE-only devices */
1417 memset(events, 0, sizeof(events));
1418 events[0] |= 0x10; /* Disconnection Complete */
1419 events[0] |= 0x80; /* Encryption Change */
1420 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421 events[1] |= 0x20; /* Command Complete */
1422 events[1] |= 0x40; /* Command Status */
1423 events[1] |= 0x80; /* Hardware Error */
1424 events[2] |= 0x04; /* Number of Completed Packets */
1425 events[3] |= 0x02; /* Data Buffer Overflow */
1426 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001427 }
1428
1429 if (lmp_inq_rssi_capable(hdev))
1430 events[4] |= 0x02; /* Inquiry Result with RSSI */
1431
1432 if (lmp_sniffsubr_capable(hdev))
1433 events[5] |= 0x20; /* Sniff Subrating */
1434
1435 if (lmp_pause_enc_capable(hdev))
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1437
1438 if (lmp_ext_inq_capable(hdev))
1439 events[5] |= 0x40; /* Extended Inquiry Result */
1440
1441 if (lmp_no_flush_capable(hdev))
1442 events[7] |= 0x01; /* Enhanced Flush Complete */
1443
1444 if (lmp_lsto_capable(hdev))
1445 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1446
1447 if (lmp_ssp_capable(hdev)) {
1448 events[6] |= 0x01; /* IO Capability Request */
1449 events[6] |= 0x02; /* IO Capability Response */
1450 events[6] |= 0x04; /* User Confirmation Request */
1451 events[6] |= 0x08; /* User Passkey Request */
1452 events[6] |= 0x10; /* Remote OOB Data Request */
1453 events[6] |= 0x20; /* Simple Pairing Complete */
1454 events[7] |= 0x04; /* User Passkey Notification */
1455 events[7] |= 0x08; /* Keypress Notification */
1456 events[7] |= 0x10; /* Remote Host Supported
1457 * Features Notification
1458 */
1459 }
1460
1461 if (lmp_le_capable(hdev))
1462 events[7] |= 0x20; /* LE Meta-Event */
1463
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001465}
1466
Johan Hedberg42c6b122013-03-05 20:37:49 +02001467static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001468{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001469 struct hci_dev *hdev = req->hdev;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001473 else
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475
1476 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1483 */
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001486
1487 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1493 */
1494 hdev->max_page = 0x01;
1495
Johan Hedberg2177bab2013-03-05 20:37:43 +02001496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001500 } else {
1501 struct hci_cp_write_eir cp;
1502
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 }
1508 }
1509
1510 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512
1513 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1518
1519 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522 }
1523
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 }
1529}
1530
Johan Hedberg42c6b122013-03-05 20:37:49 +02001531static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001532{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1536
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1545
1546 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001548}
1549
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001551{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553 struct hci_cp_write_le_host_supported cp;
1554
Johan Hedbergc73eee92013-04-19 18:35:21 +03001555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1557 return;
1558
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 memset(&cp, 0, sizeof(cp));
1560
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562 cp.le = 0x01;
1563 cp.simul = lmp_le_br_capable(hdev);
1564 }
1565
1566 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001569}
1570
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001571static void hci_set_event_mask_page_2(struct hci_request *req)
1572{
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1578 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001579 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1584 }
1585
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1588 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001589 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1594 }
1595
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001596 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001598 events[2] |= 0x80;
1599
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601}
1602
Johan Hedberg42c6b122013-03-05 20:37:49 +02001603static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001604{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001606 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1611 *
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001616 *
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001620 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001623 struct hci_cp_delete_stored_link_key cp;
1624
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628 sizeof(cp), &cp);
1629 }
1630
Johan Hedberg2177bab2013-03-05 20:37:43 +02001631 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633
Andre Guedes9193c6e2014-07-01 18:10:09 -03001634 if (lmp_le_capable(hdev)) {
1635 u8 events[8];
1636
1637 memset(events, 0, sizeof(events));
1638 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001639
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1642 */
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1645 * Parameter Request
1646 */
1647
Andre Guedes9193c6e2014-07-01 18:10:09 -03001648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649 events);
1650
Johan Hedberg42c6b122013-03-05 20:37:49 +02001651 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001652 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001653
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1657
1658 cp.page = p;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660 sizeof(cp), &cp);
1661 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001662}
1663
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001664static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665{
1666 struct hci_dev *hdev = req->hdev;
1667
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1671
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001672 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001673 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001675
1676 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001677 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680 u8 support = 0x01;
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1683 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001684}
1685
Johan Hedberg2177bab2013-03-05 20:37:43 +02001686static int __hci_init(struct hci_dev *hdev)
1687{
1688 int err;
1689
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691 if (err < 0)
1692 return err;
1693
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1696 */
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699 &dut_mode_fops);
1700 }
1701
Johan Hedberg2177bab2013-03-05 20:37:43 +02001702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1704 * first stage init.
1705 */
1706 if (hdev->dev_type != HCI_BREDR)
1707 return 0;
1708
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710 if (err < 0)
1711 return err;
1712
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714 if (err < 0)
1715 return err;
1716
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718 if (err < 0)
1719 return err;
1720
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1723 */
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725 return 0;
1726
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1738
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740 &conn_info_min_age_fops);
1741 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742 &conn_info_max_age_fops);
1743
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001744 if (lmp_bredr_capable(hdev)) {
1745 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001747 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001749 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001751 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001753 }
1754
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001755 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001762 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001763
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001764 if (lmp_sniff_capable(hdev)) {
1765 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766 hdev, &idle_timeout_fops);
1767 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768 hdev, &sniff_min_interval_fops);
1769 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770 hdev, &sniff_max_interval_fops);
1771 }
1772
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001773 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001774 debugfs_create_file("identity", 0400, hdev->debugfs,
1775 hdev, &identity_fops);
1776 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001778 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001780 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781 hdev, &static_address_fops);
1782
1783 /* For controllers with a public address, provide a debug
1784 * option to force the usage of the configured static
1785 * address. By default the public address is used.
1786 */
1787 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788 debugfs_create_file("force_static_address", 0644,
1789 hdev->debugfs, hdev,
1790 &force_static_address_fops);
1791
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001792 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001794 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1795 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001796 debugfs_create_file("identity_resolving_keys", 0400,
1797 hdev->debugfs, hdev,
1798 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001799 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001801 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802 hdev, &conn_min_interval_fops);
1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1812 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1814 hdev->debugfs,
1815 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001816 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001817
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001818 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001819}
1820
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001821static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822{
1823 struct hci_dev *hdev = req->hdev;
1824
1825 BT_DBG("%s %ld", hdev->name, opt);
1826
1827 /* Reset */
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1830
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837}
1838
1839static int __hci_unconf_init(struct hci_dev *hdev)
1840{
1841 int err;
1842
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844 return 0;
1845
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847 if (err < 0)
1848 return err;
1849
1850 return 0;
1851}
1852
Johan Hedberg42c6b122013-03-05 20:37:49 +02001853static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854{
1855 __u8 scan = opt;
1856
Johan Hedberg42c6b122013-03-05 20:37:49 +02001857 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
1859 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001860 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861}
1862
Johan Hedberg42c6b122013-03-05 20:37:49 +02001863static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864{
1865 __u8 auth = opt;
1866
Johan Hedberg42c6b122013-03-05 20:37:49 +02001867 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
1869 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001870 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871}
1872
Johan Hedberg42c6b122013-03-05 20:37:49 +02001873static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
1875 __u8 encrypt = opt;
1876
Johan Hedberg42c6b122013-03-05 20:37:49 +02001877 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001879 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001880 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881}
1882
Johan Hedberg42c6b122013-03-05 20:37:49 +02001883static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001884{
1885 __le16 policy = cpu_to_le16(opt);
1886
Johan Hedberg42c6b122013-03-05 20:37:49 +02001887 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001888
1889 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001890 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001891}
1892
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001893/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 * Device is held on return. */
1895struct hci_dev *hci_dev_get(int index)
1896{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001897 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 BT_DBG("%d", index);
1900
1901 if (index < 0)
1902 return NULL;
1903
1904 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001905 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 if (d->id == index) {
1907 hdev = hci_dev_hold(d);
1908 break;
1909 }
1910 }
1911 read_unlock(&hci_dev_list_lock);
1912 return hdev;
1913}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001916
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001917bool hci_discovery_active(struct hci_dev *hdev)
1918{
1919 struct discovery_state *discov = &hdev->discovery;
1920
Andre Guedes6fbe1952012-02-03 17:47:58 -03001921 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001922 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001923 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001924 return true;
1925
Andre Guedes6fbe1952012-02-03 17:47:58 -03001926 default:
1927 return false;
1928 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001929}
1930
Johan Hedbergff9ef572012-01-04 14:23:45 +02001931void hci_discovery_set_state(struct hci_dev *hdev, int state)
1932{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001933 int old_state = hdev->discovery.state;
1934
Johan Hedbergff9ef572012-01-04 14:23:45 +02001935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1936
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001937 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001938 return;
1939
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001940 hdev->discovery.state = state;
1941
Johan Hedbergff9ef572012-01-04 14:23:45 +02001942 switch (state) {
1943 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001944 hci_update_background_scan(hdev);
1945
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001946 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001947 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001948 break;
1949 case DISCOVERY_STARTING:
1950 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001951 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001952 mgmt_discovering(hdev, 1);
1953 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001954 case DISCOVERY_RESOLVING:
1955 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001956 case DISCOVERY_STOPPING:
1957 break;
1958 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001959}
1960
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001961void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962{
Johan Hedberg30883512012-01-04 14:16:21 +02001963 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001964 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Johan Hedberg561aafb2012-01-04 13:31:59 +02001966 list_for_each_entry_safe(p, n, &cache->all, all) {
1967 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001968 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001970
1971 INIT_LIST_HEAD(&cache->unknown);
1972 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973}
1974
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001975struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1976 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
Johan Hedberg30883512012-01-04 14:16:21 +02001978 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 struct inquiry_entry *e;
1980
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001981 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
Johan Hedberg561aafb2012-01-04 13:31:59 +02001983 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001985 return e;
1986 }
1987
1988 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989}
1990
Johan Hedberg561aafb2012-01-04 13:31:59 +02001991struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001992 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001993{
Johan Hedberg30883512012-01-04 14:16:21 +02001994 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001995 struct inquiry_entry *e;
1996
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001997 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001998
1999 list_for_each_entry(e, &cache->unknown, list) {
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2001 return e;
2002 }
2003
2004 return NULL;
2005}
2006
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002007struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002008 bdaddr_t *bdaddr,
2009 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002010{
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2013
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002014 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002015
2016 list_for_each_entry(e, &cache->resolve, list) {
2017 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2018 return e;
2019 if (!bacmp(&e->data.bdaddr, bdaddr))
2020 return e;
2021 }
2022
2023 return NULL;
2024}
2025
Johan Hedberga3d4e202012-01-09 00:53:02 +02002026void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002027 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002028{
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct list_head *pos = &cache->resolve;
2031 struct inquiry_entry *p;
2032
2033 list_del(&ie->list);
2034
2035 list_for_each_entry(p, &cache->resolve, list) {
2036 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002037 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002038 break;
2039 pos = &p->list;
2040 }
2041
2042 list_add(&ie->list, pos);
2043}
2044
Marcel Holtmannaf589252014-07-01 14:11:20 +02002045u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2046 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
Johan Hedberg30883512012-01-04 14:16:21 +02002048 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002049 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002050 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
Szymon Janc2b2fec42012-11-20 11:38:54 +01002054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2055
Marcel Holtmannaf589252014-07-01 14:11:20 +02002056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002058
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002060 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002061 if (!ie->data.ssp_mode)
2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002063
Johan Hedberga3d4e202012-01-09 00:53:02 +02002064 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002065 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002066 ie->data.rssi = data->rssi;
2067 hci_inquiry_cache_update_resolve(hdev, ie);
2068 }
2069
Johan Hedberg561aafb2012-01-04 13:31:59 +02002070 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002071 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002072
Johan Hedberg561aafb2012-01-04 13:31:59 +02002073 /* Entry not in the cache. Add new one. */
2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002075 if (!ie) {
2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077 goto done;
2078 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002079
2080 list_add(&ie->all, &cache->all);
2081
2082 if (name_known) {
2083 ie->name_state = NAME_KNOWN;
2084 } else {
2085 ie->name_state = NAME_NOT_KNOWN;
2086 list_add(&ie->list, &cache->unknown);
2087 }
2088
2089update:
2090 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002091 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002092 ie->name_state = NAME_KNOWN;
2093 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 }
2095
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002096 memcpy(&ie->data, data, sizeof(*data));
2097 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002099
2100 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002102
Marcel Holtmannaf589252014-07-01 14:11:20 +02002103done:
2104 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105}
2106
2107static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2108{
Johan Hedberg30883512012-01-04 14:16:21 +02002109 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct inquiry_info *info = (struct inquiry_info *) buf;
2111 struct inquiry_entry *e;
2112 int copied = 0;
2113
Johan Hedberg561aafb2012-01-04 13:31:59 +02002114 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002116
2117 if (copied >= num)
2118 break;
2119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 bacpy(&info->bdaddr, &data->bdaddr);
2121 info->pscan_rep_mode = data->pscan_rep_mode;
2122 info->pscan_period_mode = data->pscan_period_mode;
2123 info->pscan_mode = data->pscan_mode;
2124 memcpy(info->dev_class, data->dev_class, 3);
2125 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002128 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 }
2130
2131 BT_DBG("cache %p, copied %d", cache, copied);
2132 return copied;
2133}
2134
Johan Hedberg42c6b122013-03-05 20:37:49 +02002135static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136{
2137 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002138 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 struct hci_cp_inquiry cp;
2140
2141 BT_DBG("%s", hdev->name);
2142
2143 if (test_bit(HCI_INQUIRY, &hdev->flags))
2144 return;
2145
2146 /* Start Inquiry */
2147 memcpy(&cp.lap, &ir->lap, 3);
2148 cp.length = ir->length;
2149 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002150 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151}
2152
Andre Guedes3e13fa12013-03-27 20:04:56 -03002153static int wait_inquiry(void *word)
2154{
2155 schedule();
2156 return signal_pending(current);
2157}
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159int hci_inquiry(void __user *arg)
2160{
2161 __u8 __user *ptr = arg;
2162 struct hci_inquiry_req ir;
2163 struct hci_dev *hdev;
2164 int err = 0, do_inquiry = 0, max_rsp;
2165 long timeo;
2166 __u8 *buf;
2167
2168 if (copy_from_user(&ir, ptr, sizeof(ir)))
2169 return -EFAULT;
2170
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002171 hdev = hci_dev_get(ir.dev_id);
2172 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 return -ENODEV;
2174
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002175 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2176 err = -EBUSY;
2177 goto done;
2178 }
2179
Marcel Holtmann4a964402014-07-02 19:10:33 +02002180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002181 err = -EOPNOTSUPP;
2182 goto done;
2183 }
2184
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002185 if (hdev->dev_type != HCI_BREDR) {
2186 err = -EOPNOTSUPP;
2187 goto done;
2188 }
2189
Johan Hedberg56f87902013-10-02 13:43:13 +03002190 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2191 err = -EOPNOTSUPP;
2192 goto done;
2193 }
2194
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002195 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002198 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 do_inquiry = 1;
2200 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002201 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
Marcel Holtmann04837f62006-07-03 10:02:33 +02002203 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002204
2205 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2207 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002208 if (err < 0)
2209 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002210
2211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212 * cleared). If it is interrupted by a signal, return -EINTR.
2213 */
2214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215 TASK_INTERRUPTIBLE))
2216 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002219 /* for unlimited number of responses we will use buffer with
2220 * 255 entries
2221 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2223
2224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225 * copy it to the user space.
2226 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002228 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 err = -ENOMEM;
2230 goto done;
2231 }
2232
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002233 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002235 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
2237 BT_DBG("num_rsp %d", ir.num_rsp);
2238
2239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2240 ptr += sizeof(ir);
2241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002242 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002244 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 err = -EFAULT;
2246
2247 kfree(buf);
2248
2249done:
2250 hci_dev_put(hdev);
2251 return err;
2252}
2253
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002254static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 int ret = 0;
2257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 BT_DBG("%s %p", hdev->name, hdev);
2259
2260 hci_req_lock(hdev);
2261
Johan Hovold94324962012-03-15 14:48:41 +01002262 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2263 ret = -ENODEV;
2264 goto done;
2265 }
2266
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002269 /* Check for rfkill but allow the HCI setup stage to
2270 * proceed (which in itself doesn't cause any RF activity).
2271 */
2272 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2273 ret = -ERFKILL;
2274 goto done;
2275 }
2276
2277 /* Check for valid public address or a configured static
2278 * random adddress, but let the HCI setup proceed to
2279 * be able to determine if there is a public address
2280 * or not.
2281 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002282 * In case of user channel usage, it is not important
2283 * if a public address or static random address is
2284 * available.
2285 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002286 * This check is only valid for BR/EDR controllers
2287 * since AMP controllers do not have an address.
2288 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002289 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293 ret = -EADDRNOTAVAIL;
2294 goto done;
2295 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002296 }
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 if (test_bit(HCI_UP, &hdev->flags)) {
2299 ret = -EALREADY;
2300 goto done;
2301 }
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 if (hdev->open(hdev)) {
2304 ret = -EIO;
2305 goto done;
2306 }
2307
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002308 atomic_set(&hdev->cmd_cnt, 1);
2309 set_bit(HCI_INIT, &hdev->flags);
2310
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2312 if (hdev->setup)
2313 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002314
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002315 /* The transport driver can set these quirks before
2316 * creating the HCI device or in its setup callback.
2317 *
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2320 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002324
2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2328 *
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2332 */
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002335 }
2336
Marcel Holtmann9713c172014-07-06 12:11:15 +02002337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2341 * on procedure.
2342 */
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346 else
2347 ret = -EADDRNOTAVAIL;
2348 }
2349
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002350 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002353 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 }
2355
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002356 clear_bit(HCI_INIT, &hdev->flags);
2357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 if (!ret) {
2359 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002360 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 set_bit(HCI_UP, &hdev->flags);
2362 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002367 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002368 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002369 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002370 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002371 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002372 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002374 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002375 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002376 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377
2378 skb_queue_purge(&hdev->cmd_q);
2379 skb_queue_purge(&hdev->rx_q);
2380
2381 if (hdev->flush)
2382 hdev->flush(hdev);
2383
2384 if (hdev->sent_cmd) {
2385 kfree_skb(hdev->sent_cmd);
2386 hdev->sent_cmd = NULL;
2387 }
2388
2389 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002390 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 }
2392
2393done:
2394 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 return ret;
2396}
2397
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002398/* ---- HCI ioctl helpers ---- */
2399
2400int hci_dev_open(__u16 dev)
2401{
2402 struct hci_dev *hdev;
2403 int err;
2404
2405 hdev = hci_dev_get(dev);
2406 if (!hdev)
2407 return -ENODEV;
2408
Marcel Holtmann4a964402014-07-02 19:10:33 +02002409 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2412 * possible.
2413 *
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2416 * open the device.
2417 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420 err = -EOPNOTSUPP;
2421 goto done;
2422 }
2423
Johan Hedberge1d08f42013-10-01 22:44:50 +03002424 /* We need to ensure that no other power on/off work is pending
2425 * before proceeding to call hci_dev_do_open. This is
2426 * particularly important if the setup procedure has not yet
2427 * completed.
2428 */
2429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430 cancel_delayed_work(&hdev->power_off);
2431
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002432 /* After this call it is guaranteed that the setup procedure
2433 * has finished. This means that error conditions like RFKILL
2434 * or no valid public or static random address apply.
2435 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002436 flush_workqueue(hdev->req_workqueue);
2437
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002438 err = hci_dev_do_open(hdev);
2439
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002440done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002441 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002442 return err;
2443}
2444
Johan Hedbergd7347f32014-07-04 12:37:23 +03002445/* This function requires the caller holds hdev->lock */
2446static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447{
2448 struct hci_conn_params *p;
2449
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2452
2453 BT_DBG("All LE pending actions cleared");
2454}
2455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456static int hci_dev_do_close(struct hci_dev *hdev)
2457{
2458 BT_DBG("%s %p", hdev->name, hdev);
2459
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002460 cancel_delayed_work(&hdev->power_off);
2461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 hci_req_cancel(hdev, ENODEV);
2463 hci_req_lock(hdev);
2464
2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002466 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 hci_req_unlock(hdev);
2468 return 0;
2469 }
2470
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002471 /* Flush RX and TX works */
2472 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002473 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002475 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002476 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002477 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002479 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002480 }
2481
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002482 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002483 cancel_delayed_work(&hdev->service_cache);
2484
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002485 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002486
2487 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002489
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002490 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002491 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002493 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002494 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
2496 hci_notify(hdev, HCI_DEV_DOWN);
2497
2498 if (hdev->flush)
2499 hdev->flush(hdev);
2500
2501 /* Reset device */
2502 skb_queue_purge(&hdev->cmd_q);
2503 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 clear_bit(HCI_INIT, &hdev->flags);
2510 }
2511
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002512 /* flush cmd work */
2513 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515 /* Drop queues */
2516 skb_queue_purge(&hdev->rx_q);
2517 skb_queue_purge(&hdev->cmd_q);
2518 skb_queue_purge(&hdev->raw_q);
2519
2520 /* Drop last sent command */
2521 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002522 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 kfree_skb(hdev->sent_cmd);
2524 hdev->sent_cmd = NULL;
2525 }
2526
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002527 kfree_skb(hdev->recv_evt);
2528 hdev->recv_evt = NULL;
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 /* After this point our queues are empty
2531 * and no tasks are scheduled. */
2532 hdev->close(hdev);
2533
Johan Hedberg35b973c2013-03-15 17:06:59 -05002534 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002535 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2537
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539 if (hdev->dev_type == HCI_BREDR) {
2540 hci_dev_lock(hdev);
2541 mgmt_powered(hdev, 0);
2542 hci_dev_unlock(hdev);
2543 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002544 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002545
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002546 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002547 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002548
Johan Hedberge59fda82012-02-22 18:11:53 +02002549 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002550 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002551 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 hci_req_unlock(hdev);
2554
2555 hci_dev_put(hdev);
2556 return 0;
2557}
2558
2559int hci_dev_close(__u16 dev)
2560{
2561 struct hci_dev *hdev;
2562 int err;
2563
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002564 hdev = hci_dev_get(dev);
2565 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002567
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569 err = -EBUSY;
2570 goto done;
2571 }
2572
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574 cancel_delayed_work(&hdev->power_off);
2575
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002577
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002578done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 hci_dev_put(hdev);
2580 return err;
2581}
2582
2583int hci_dev_reset(__u16 dev)
2584{
2585 struct hci_dev *hdev;
2586 int ret = 0;
2587
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002588 hdev = hci_dev_get(dev);
2589 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 return -ENODEV;
2591
2592 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
Marcel Holtmann808a0492013-08-26 20:57:58 -07002594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600 ret = -EBUSY;
2601 goto done;
2602 }
2603
Marcel Holtmann4a964402014-07-02 19:10:33 +02002604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002605 ret = -EOPNOTSUPP;
2606 goto done;
2607 }
2608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 /* Drop queues */
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2612
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002613 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002614 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002616 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
2618 if (hdev->flush)
2619 hdev->flush(hdev);
2620
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002621 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
2626done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 hci_req_unlock(hdev);
2628 hci_dev_put(hdev);
2629 return ret;
2630}
2631
2632int hci_dev_reset_stat(__u16 dev)
2633{
2634 struct hci_dev *hdev;
2635 int ret = 0;
2636
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002637 hdev = hci_dev_get(dev);
2638 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 return -ENODEV;
2640
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002641 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2642 ret = -EBUSY;
2643 goto done;
2644 }
2645
Marcel Holtmann4a964402014-07-02 19:10:33 +02002646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002647 ret = -EOPNOTSUPP;
2648 goto done;
2649 }
2650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2652
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002653done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 return ret;
2656}
2657
Johan Hedberg123abc02014-07-10 12:09:07 +03002658static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2659{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002660 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002661
2662 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2663
2664 if ((scan & SCAN_PAGE))
2665 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2666 &hdev->dev_flags);
2667 else
2668 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2669 &hdev->dev_flags);
2670
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002671 if ((scan & SCAN_INQUIRY)) {
2672 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2673 &hdev->dev_flags);
2674 } else {
2675 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2676 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2677 &hdev->dev_flags);
2678 }
2679
Johan Hedberg123abc02014-07-10 12:09:07 +03002680 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2681 return;
2682
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002683 if (conn_changed || discov_changed) {
2684 /* In case this was disabled through mgmt */
2685 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2686
2687 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2688 mgmt_update_adv_data(hdev);
2689
Johan Hedberg123abc02014-07-10 12:09:07 +03002690 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002691 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002692}
2693
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694int hci_dev_cmd(unsigned int cmd, void __user *arg)
2695{
2696 struct hci_dev *hdev;
2697 struct hci_dev_req dr;
2698 int err = 0;
2699
2700 if (copy_from_user(&dr, arg, sizeof(dr)))
2701 return -EFAULT;
2702
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002703 hdev = hci_dev_get(dr.dev_id);
2704 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 return -ENODEV;
2706
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002707 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2708 err = -EBUSY;
2709 goto done;
2710 }
2711
Marcel Holtmann4a964402014-07-02 19:10:33 +02002712 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002713 err = -EOPNOTSUPP;
2714 goto done;
2715 }
2716
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002717 if (hdev->dev_type != HCI_BREDR) {
2718 err = -EOPNOTSUPP;
2719 goto done;
2720 }
2721
Johan Hedberg56f87902013-10-02 13:43:13 +03002722 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2723 err = -EOPNOTSUPP;
2724 goto done;
2725 }
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 switch (cmd) {
2728 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002729 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2730 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 break;
2732
2733 case HCISETENCRYPT:
2734 if (!lmp_encrypt_capable(hdev)) {
2735 err = -EOPNOTSUPP;
2736 break;
2737 }
2738
2739 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2740 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002741 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2742 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 if (err)
2744 break;
2745 }
2746
Johan Hedberg01178cd2013-03-05 20:37:41 +02002747 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2748 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 break;
2750
2751 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002752 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2753 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002754
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002755 /* Ensure that the connectable and discoverable states
2756 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002757 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002758 if (!err)
2759 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 break;
2761
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002762 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002763 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2764 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002765 break;
2766
2767 case HCISETLINKMODE:
2768 hdev->link_mode = ((__u16) dr.dev_opt) &
2769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2770 break;
2771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 case HCISETPTYPE:
2773 hdev->pkt_type = (__u16) dr.dev_opt;
2774 break;
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 break;
2780
2781 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 break;
2785
2786 default:
2787 err = -EINVAL;
2788 break;
2789 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002790
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002791done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 hci_dev_put(hdev);
2793 return err;
2794}
2795
2796int hci_get_dev_list(void __user *arg)
2797{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002798 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 struct hci_dev_list_req *dl;
2800 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 int n = 0, size, err;
2802 __u16 dev_num;
2803
2804 if (get_user(dev_num, (__u16 __user *) arg))
2805 return -EFAULT;
2806
2807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2808 return -EINVAL;
2809
2810 size = sizeof(*dl) + dev_num * sizeof(*dr);
2811
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002812 dl = kzalloc(size, GFP_KERNEL);
2813 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 return -ENOMEM;
2815
2816 dr = dl->dev_req;
2817
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002818 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002819 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002820 unsigned long flags = hdev->flags;
2821
2822 /* When the auto-off is configured it means the transport
2823 * is running, but in that case still indicate that the
2824 * device is actually down.
2825 */
2826 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2827 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002828
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002829 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2830 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002833 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002834
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 if (++n >= dev_num)
2836 break;
2837 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002838 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
2840 dl->dev_num = n;
2841 size = sizeof(*dl) + n * sizeof(*dr);
2842
2843 err = copy_to_user(arg, dl, size);
2844 kfree(dl);
2845
2846 return err ? -EFAULT : 0;
2847}
2848
2849int hci_get_dev_info(void __user *arg)
2850{
2851 struct hci_dev *hdev;
2852 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002853 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 int err = 0;
2855
2856 if (copy_from_user(&di, arg, sizeof(di)))
2857 return -EFAULT;
2858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002859 hdev = hci_dev_get(di.dev_id);
2860 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 return -ENODEV;
2862
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002863 /* When the auto-off is configured it means the transport
2864 * is running, but in that case still indicate that the
2865 * device is actually down.
2866 */
2867 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2868 flags = hdev->flags & ~BIT(HCI_UP);
2869 else
2870 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002871
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002872 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2873 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002874
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 strcpy(di.name, hdev->name);
2876 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002877 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002878 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002880 if (lmp_bredr_capable(hdev)) {
2881 di.acl_mtu = hdev->acl_mtu;
2882 di.acl_pkts = hdev->acl_pkts;
2883 di.sco_mtu = hdev->sco_mtu;
2884 di.sco_pkts = hdev->sco_pkts;
2885 } else {
2886 di.acl_mtu = hdev->le_mtu;
2887 di.acl_pkts = hdev->le_pkts;
2888 di.sco_mtu = 0;
2889 di.sco_pkts = 0;
2890 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 di.link_policy = hdev->link_policy;
2892 di.link_mode = hdev->link_mode;
2893
2894 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2895 memcpy(&di.features, &hdev->features, sizeof(di.features));
2896
2897 if (copy_to_user(arg, &di, sizeof(di)))
2898 err = -EFAULT;
2899
2900 hci_dev_put(hdev);
2901
2902 return err;
2903}
2904
2905/* ---- Interface to HCI drivers ---- */
2906
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002907static int hci_rfkill_set_block(void *data, bool blocked)
2908{
2909 struct hci_dev *hdev = data;
2910
2911 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2912
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002913 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2914 return -EBUSY;
2915
Johan Hedberg5e130362013-09-13 08:58:17 +03002916 if (blocked) {
2917 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002918 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2919 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002920 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002921 } else {
2922 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002923 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002924
2925 return 0;
2926}
2927
2928static const struct rfkill_ops hci_rfkill_ops = {
2929 .set_block = hci_rfkill_set_block,
2930};
2931
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002932static void hci_power_on(struct work_struct *work)
2933{
2934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002935 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002936
2937 BT_DBG("%s", hdev->name);
2938
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002939 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002940 if (err < 0) {
2941 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002942 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002943 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002944
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002945 /* During the HCI setup phase, a few error conditions are
2946 * ignored and they need to be checked now. If they are still
2947 * valid, it is important to turn the device back off.
2948 */
2949 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002950 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002951 (hdev->dev_type == HCI_BREDR &&
2952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2953 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002954 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2955 hci_dev_do_close(hdev);
2956 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002957 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2958 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002959 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002960
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002961 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002962 /* For unconfigured devices, set the HCI_RAW flag
2963 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002964 */
2965 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2966 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002967
2968 /* For fully configured devices, this will send
2969 * the Index Added event. For unconfigured devices,
2970 * it will send Unconfigued Index Added event.
2971 *
2972 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2973 * and no event will be send.
2974 */
2975 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002976 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002977 /* When the controller is now configured, then it
2978 * is important to clear the HCI_RAW flag.
2979 */
2980 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2981 clear_bit(HCI_RAW, &hdev->flags);
2982
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002983 /* Powering on the controller with HCI_CONFIG set only
2984 * happens with the transition from unconfigured to
2985 * configured. This will send the Index Added event.
2986 */
2987 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002988 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002989}
2990
2991static void hci_power_off(struct work_struct *work)
2992{
Johan Hedberg32435532011-11-07 22:16:04 +02002993 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002994 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002995
2996 BT_DBG("%s", hdev->name);
2997
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002998 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002999}
3000
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003001static void hci_discov_off(struct work_struct *work)
3002{
3003 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003004
3005 hdev = container_of(work, struct hci_dev, discov_off.work);
3006
3007 BT_DBG("%s", hdev->name);
3008
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003009 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003010}
3011
Johan Hedberg35f74982014-02-18 17:14:32 +02003012void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003013{
Johan Hedberg48210022013-01-27 00:31:28 +02003014 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003015
Johan Hedberg48210022013-01-27 00:31:28 +02003016 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3017 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003018 kfree(uuid);
3019 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003020}
3021
Johan Hedberg35f74982014-02-18 17:14:32 +02003022void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003023{
3024 struct list_head *p, *n;
3025
3026 list_for_each_safe(p, n, &hdev->link_keys) {
3027 struct link_key *key;
3028
3029 key = list_entry(p, struct link_key, list);
3030
3031 list_del(p);
3032 kfree(key);
3033 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003034}
3035
Johan Hedberg35f74982014-02-18 17:14:32 +02003036void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003037{
3038 struct smp_ltk *k, *tmp;
3039
3040 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3041 list_del(&k->list);
3042 kfree(k);
3043 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003044}
3045
Johan Hedberg970c4e42014-02-18 10:19:33 +02003046void hci_smp_irks_clear(struct hci_dev *hdev)
3047{
3048 struct smp_irk *k, *tmp;
3049
3050 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3051 list_del(&k->list);
3052 kfree(k);
3053 }
3054}
3055
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003056struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3057{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003058 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003059
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003060 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003061 if (bacmp(bdaddr, &k->bdaddr) == 0)
3062 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003063
3064 return NULL;
3065}
3066
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303067static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003068 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003069{
3070 /* Legacy key */
3071 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303072 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003073
3074 /* Debug keys are insecure so don't store them persistently */
3075 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303076 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003077
3078 /* Changed combination key and there's no previous one */
3079 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303080 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003081
3082 /* Security mode 3 case */
3083 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303084 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003085
3086 /* Neither local nor remote side had no-bonding as requirement */
3087 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303088 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003089
3090 /* Local side had dedicated bonding as requirement */
3091 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303092 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003093
3094 /* Remote side had dedicated bonding as requirement */
3095 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303096 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003097
3098 /* If none of the above criteria match, then don't store the key
3099 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303100 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003101}
3102
Johan Hedberg98a0b842014-01-30 19:40:00 -08003103static bool ltk_type_master(u8 type)
3104{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03003105 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08003106}
3107
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003108struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003109 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003110{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003111 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003112
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003113 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003114 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003115 continue;
3116
Johan Hedberg98a0b842014-01-30 19:40:00 -08003117 if (ltk_type_master(k->type) != master)
3118 continue;
3119
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003120 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003121 }
3122
3123 return NULL;
3124}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003125
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003126struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003127 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003128{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003129 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003130
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003131 list_for_each_entry(k, &hdev->long_term_keys, list)
3132 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003133 bacmp(bdaddr, &k->bdaddr) == 0 &&
3134 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003135 return k;
3136
3137 return NULL;
3138}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003139
Johan Hedberg970c4e42014-02-18 10:19:33 +02003140struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3141{
3142 struct smp_irk *irk;
3143
3144 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3145 if (!bacmp(&irk->rpa, rpa))
3146 return irk;
3147 }
3148
3149 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3150 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3151 bacpy(&irk->rpa, rpa);
3152 return irk;
3153 }
3154 }
3155
3156 return NULL;
3157}
3158
3159struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3160 u8 addr_type)
3161{
3162 struct smp_irk *irk;
3163
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003164 /* Identity Address must be public or static random */
3165 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3166 return NULL;
3167
Johan Hedberg970c4e42014-02-18 10:19:33 +02003168 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169 if (addr_type == irk->addr_type &&
3170 bacmp(bdaddr, &irk->bdaddr) == 0)
3171 return irk;
3172 }
3173
3174 return NULL;
3175}
3176
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003177struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003178 bdaddr_t *bdaddr, u8 *val, u8 type,
3179 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003180{
3181 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303182 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003183
3184 old_key = hci_find_link_key(hdev, bdaddr);
3185 if (old_key) {
3186 old_key_type = old_key->type;
3187 key = old_key;
3188 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003189 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003190 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003191 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003192 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003193 list_add(&key->list, &hdev->link_keys);
3194 }
3195
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003196 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003197
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003198 /* Some buggy controller combinations generate a changed
3199 * combination key for legacy pairing even when there's no
3200 * previous key */
3201 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003202 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003203 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003204 if (conn)
3205 conn->key_type = type;
3206 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003207
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003208 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003209 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003210 key->pin_len = pin_len;
3211
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003212 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003213 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003214 else
3215 key->type = type;
3216
Johan Hedberg7652ff62014-06-24 13:15:49 +03003217 if (persistent)
3218 *persistent = hci_persistent_key(hdev, conn, type,
3219 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003220
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003221 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003222}
3223
Johan Hedbergca9142b2014-02-19 14:57:44 +02003224struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003225 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003226 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003227{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003228 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003229 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003230
Johan Hedberg98a0b842014-01-30 19:40:00 -08003231 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003232 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003233 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003234 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003235 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003236 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003237 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003238 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003239 }
3240
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003241 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003242 key->bdaddr_type = addr_type;
3243 memcpy(key->val, tk, sizeof(key->val));
3244 key->authenticated = authenticated;
3245 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003246 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003247 key->enc_size = enc_size;
3248 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003249
Johan Hedbergca9142b2014-02-19 14:57:44 +02003250 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003251}
3252
Johan Hedbergca9142b2014-02-19 14:57:44 +02003253struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003255{
3256 struct smp_irk *irk;
3257
3258 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3259 if (!irk) {
3260 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3261 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003262 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003263
3264 bacpy(&irk->bdaddr, bdaddr);
3265 irk->addr_type = addr_type;
3266
3267 list_add(&irk->list, &hdev->identity_resolving_keys);
3268 }
3269
3270 memcpy(irk->val, val, 16);
3271 bacpy(&irk->rpa, rpa);
3272
Johan Hedbergca9142b2014-02-19 14:57:44 +02003273 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003274}
3275
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003276int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3277{
3278 struct link_key *key;
3279
3280 key = hci_find_link_key(hdev, bdaddr);
3281 if (!key)
3282 return -ENOENT;
3283
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003284 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003285
3286 list_del(&key->list);
3287 kfree(key);
3288
3289 return 0;
3290}
3291
Johan Hedberge0b2b272014-02-18 17:14:31 +02003292int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003293{
3294 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003295 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003296
3297 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003298 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003299 continue;
3300
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003302
3303 list_del(&k->list);
3304 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003305 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003306 }
3307
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003308 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003309}
3310
Johan Hedberga7ec7332014-02-18 17:14:35 +02003311void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3312{
3313 struct smp_irk *k, *tmp;
3314
Johan Hedberg668b7b12014-02-21 16:03:31 +02003315 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003316 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3317 continue;
3318
3319 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3320
3321 list_del(&k->list);
3322 kfree(k);
3323 }
3324}
3325
Ville Tervo6bd32322011-02-16 16:32:41 +02003326/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003327static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003328{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003329 struct hci_dev *hdev = container_of(work, struct hci_dev,
3330 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003331
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003332 if (hdev->sent_cmd) {
3333 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3334 u16 opcode = __le16_to_cpu(sent->opcode);
3335
3336 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3337 } else {
3338 BT_ERR("%s command tx timeout", hdev->name);
3339 }
3340
Ville Tervo6bd32322011-02-16 16:32:41 +02003341 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003342 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003343}
3344
Szymon Janc2763eda2011-03-22 13:12:22 +01003345struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003346 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003347{
3348 struct oob_data *data;
3349
3350 list_for_each_entry(data, &hdev->remote_oob_data, list)
3351 if (bacmp(bdaddr, &data->bdaddr) == 0)
3352 return data;
3353
3354 return NULL;
3355}
3356
3357int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3358{
3359 struct oob_data *data;
3360
3361 data = hci_find_remote_oob_data(hdev, bdaddr);
3362 if (!data)
3363 return -ENOENT;
3364
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003365 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003366
3367 list_del(&data->list);
3368 kfree(data);
3369
3370 return 0;
3371}
3372
Johan Hedberg35f74982014-02-18 17:14:32 +02003373void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003374{
3375 struct oob_data *data, *n;
3376
3377 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3378 list_del(&data->list);
3379 kfree(data);
3380 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003381}
3382
Marcel Holtmann07988722014-01-10 02:07:29 -08003383int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3384 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003385{
3386 struct oob_data *data;
3387
3388 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003389 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003390 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003391 if (!data)
3392 return -ENOMEM;
3393
3394 bacpy(&data->bdaddr, bdaddr);
3395 list_add(&data->list, &hdev->remote_oob_data);
3396 }
3397
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003398 memcpy(data->hash192, hash, sizeof(data->hash192));
3399 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003400
Marcel Holtmann07988722014-01-10 02:07:29 -08003401 memset(data->hash256, 0, sizeof(data->hash256));
3402 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3403
3404 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3405
3406 return 0;
3407}
3408
3409int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3410 u8 *hash192, u8 *randomizer192,
3411 u8 *hash256, u8 *randomizer256)
3412{
3413 struct oob_data *data;
3414
3415 data = hci_find_remote_oob_data(hdev, bdaddr);
3416 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003417 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003418 if (!data)
3419 return -ENOMEM;
3420
3421 bacpy(&data->bdaddr, bdaddr);
3422 list_add(&data->list, &hdev->remote_oob_data);
3423 }
3424
3425 memcpy(data->hash192, hash192, sizeof(data->hash192));
3426 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3427
3428 memcpy(data->hash256, hash256, sizeof(data->hash256));
3429 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3430
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003431 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003432
3433 return 0;
3434}
3435
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003436struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003437 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003438{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003439 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003440
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003441 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003442 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003443 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003444 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003445
3446 return NULL;
3447}
3448
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003449void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003450{
3451 struct list_head *p, *n;
3452
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003453 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003454 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003455
3456 list_del(p);
3457 kfree(b);
3458 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003459}
3460
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003461int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003462{
3463 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003464
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003465 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003466 return -EBADF;
3467
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003468 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003469 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003470
3471 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003472 if (!entry)
3473 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003474
3475 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003476 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003477
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003478 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003479
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003480 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003481}
3482
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003483int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003484{
3485 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003486
Johan Hedberg35f74982014-02-18 17:14:32 +02003487 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003488 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003489 return 0;
3490 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003491
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003492 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003493 if (!entry)
3494 return -ENOENT;
3495
3496 list_del(&entry->list);
3497 kfree(entry);
3498
3499 return 0;
3500}
3501
Andre Guedes15819a72014-02-03 13:56:18 -03003502/* This function requires the caller holds hdev->lock */
3503struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3504 bdaddr_t *addr, u8 addr_type)
3505{
3506 struct hci_conn_params *params;
3507
Johan Hedberg738f6182014-07-03 19:33:51 +03003508 /* The conn params list only contains identity addresses */
3509 if (!hci_is_identity_address(addr, addr_type))
3510 return NULL;
3511
Andre Guedes15819a72014-02-03 13:56:18 -03003512 list_for_each_entry(params, &hdev->le_conn_params, list) {
3513 if (bacmp(&params->addr, addr) == 0 &&
3514 params->addr_type == addr_type) {
3515 return params;
3516 }
3517 }
3518
3519 return NULL;
3520}
3521
Andre Guedescef952c2014-02-26 20:21:49 -03003522static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3523{
3524 struct hci_conn *conn;
3525
3526 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3527 if (!conn)
3528 return false;
3529
3530 if (conn->dst_type != type)
3531 return false;
3532
3533 if (conn->state != BT_CONNECTED)
3534 return false;
3535
3536 return true;
3537}
3538
Andre Guedes15819a72014-02-03 13:56:18 -03003539/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003540struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3541 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003542{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003543 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003544
Johan Hedberg738f6182014-07-03 19:33:51 +03003545 /* The list only contains identity addresses */
3546 if (!hci_is_identity_address(addr, addr_type))
3547 return NULL;
3548
Johan Hedberg501f8822014-07-04 12:37:26 +03003549 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003550 if (bacmp(&param->addr, addr) == 0 &&
3551 param->addr_type == addr_type)
3552 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003553 }
3554
3555 return NULL;
3556}
3557
3558/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003559struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3560 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003561{
3562 struct hci_conn_params *params;
3563
Johan Hedbergc46245b2014-07-02 17:37:33 +03003564 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003565 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003566
3567 params = hci_conn_params_lookup(hdev, addr, addr_type);
3568 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003569 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003570
3571 params = kzalloc(sizeof(*params), GFP_KERNEL);
3572 if (!params) {
3573 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003574 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003575 }
3576
3577 bacpy(&params->addr, addr);
3578 params->addr_type = addr_type;
3579
3580 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003581 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003582
3583 params->conn_min_interval = hdev->le_conn_min_interval;
3584 params->conn_max_interval = hdev->le_conn_max_interval;
3585 params->conn_latency = hdev->le_conn_latency;
3586 params->supervision_timeout = hdev->le_supv_timeout;
3587 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3588
3589 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3590
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003591 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003592}
3593
3594/* This function requires the caller holds hdev->lock */
3595int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003596 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003597{
3598 struct hci_conn_params *params;
3599
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003600 params = hci_conn_params_add(hdev, addr, addr_type);
3601 if (!params)
3602 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003603
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003604 if (params->auto_connect == auto_connect)
3605 return 0;
3606
Johan Hedberg95305ba2014-07-04 12:37:21 +03003607 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003608
Andre Guedescef952c2014-02-26 20:21:49 -03003609 switch (auto_connect) {
3610 case HCI_AUTO_CONN_DISABLED:
3611 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003612 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003613 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003614 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003615 list_add(&params->action, &hdev->pend_le_reports);
3616 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003617 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003618 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003619 if (!is_connected(hdev, addr, addr_type)) {
3620 list_add(&params->action, &hdev->pend_le_conns);
3621 hci_update_background_scan(hdev);
3622 }
Andre Guedescef952c2014-02-26 20:21:49 -03003623 break;
3624 }
Andre Guedes15819a72014-02-03 13:56:18 -03003625
Johan Hedberg851efca2014-07-02 22:42:00 +03003626 params->auto_connect = auto_connect;
3627
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003628 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3629 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003630
3631 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003632}
3633
3634/* This function requires the caller holds hdev->lock */
3635void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3636{
3637 struct hci_conn_params *params;
3638
3639 params = hci_conn_params_lookup(hdev, addr, addr_type);
3640 if (!params)
3641 return;
3642
Johan Hedberg95305ba2014-07-04 12:37:21 +03003643 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003644 list_del(&params->list);
3645 kfree(params);
3646
Johan Hedberg95305ba2014-07-04 12:37:21 +03003647 hci_update_background_scan(hdev);
3648
Andre Guedes15819a72014-02-03 13:56:18 -03003649 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3650}
3651
3652/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003653void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3654{
3655 struct hci_conn_params *params, *tmp;
3656
3657 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3658 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3659 continue;
3660 list_del(&params->list);
3661 kfree(params);
3662 }
3663
3664 BT_DBG("All LE disabled connection parameters were removed");
3665}
3666
3667/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003668void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003669{
3670 struct hci_conn_params *params, *tmp;
3671
3672 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003673 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003674 list_del(&params->list);
3675 kfree(params);
3676 }
3677
Johan Hedberga2f41a82014-07-04 12:37:19 +03003678 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003679
Andre Guedes15819a72014-02-03 13:56:18 -03003680 BT_DBG("All LE connection parameters were removed");
3681}
3682
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003683static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003684{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003685 if (status) {
3686 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003687
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003688 hci_dev_lock(hdev);
3689 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3690 hci_dev_unlock(hdev);
3691 return;
3692 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003693}
3694
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003695static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003696{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003697 /* General inquiry access code (GIAC) */
3698 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3699 struct hci_request req;
3700 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003701 int err;
3702
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003703 if (status) {
3704 BT_ERR("Failed to disable LE scanning: status %d", status);
3705 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003706 }
3707
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003708 switch (hdev->discovery.type) {
3709 case DISCOV_TYPE_LE:
3710 hci_dev_lock(hdev);
3711 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3712 hci_dev_unlock(hdev);
3713 break;
3714
3715 case DISCOV_TYPE_INTERLEAVED:
3716 hci_req_init(&req, hdev);
3717
3718 memset(&cp, 0, sizeof(cp));
3719 memcpy(&cp.lap, lap, sizeof(cp.lap));
3720 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3721 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3722
3723 hci_dev_lock(hdev);
3724
3725 hci_inquiry_cache_flush(hdev);
3726
3727 err = hci_req_run(&req, inquiry_complete);
3728 if (err) {
3729 BT_ERR("Inquiry request failed: err %d", err);
3730 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3731 }
3732
3733 hci_dev_unlock(hdev);
3734 break;
3735 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003736}
3737
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003738static void le_scan_disable_work(struct work_struct *work)
3739{
3740 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003741 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003742 struct hci_request req;
3743 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003744
3745 BT_DBG("%s", hdev->name);
3746
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003747 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003748
Andre Guedesb1efcc22014-02-26 20:21:40 -03003749 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003750
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003751 err = hci_req_run(&req, le_scan_disable_work_complete);
3752 if (err)
3753 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003754}
3755
Johan Hedberg8d972502014-02-28 12:54:14 +02003756static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3757{
3758 struct hci_dev *hdev = req->hdev;
3759
3760 /* If we're advertising or initiating an LE connection we can't
3761 * go ahead and change the random address at this time. This is
3762 * because the eventual initiator address used for the
3763 * subsequently created connection will be undefined (some
3764 * controllers use the new address and others the one we had
3765 * when the operation started).
3766 *
3767 * In this kind of scenario skip the update and let the random
3768 * address be updated at the next cycle.
3769 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003770 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003771 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3772 BT_DBG("Deferring random address update");
3773 return;
3774 }
3775
3776 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3777}
3778
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003779int hci_update_random_address(struct hci_request *req, bool require_privacy,
3780 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003781{
3782 struct hci_dev *hdev = req->hdev;
3783 int err;
3784
3785 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003786 * current RPA has expired or there is something else than
3787 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003788 */
3789 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003790 int to;
3791
3792 *own_addr_type = ADDR_LE_DEV_RANDOM;
3793
3794 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003795 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003796 return 0;
3797
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003798 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003799 if (err < 0) {
3800 BT_ERR("%s failed to generate new RPA", hdev->name);
3801 return err;
3802 }
3803
Johan Hedberg8d972502014-02-28 12:54:14 +02003804 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003805
3806 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3807 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3808
3809 return 0;
3810 }
3811
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003812 /* In case of required privacy without resolvable private address,
3813 * use an unresolvable private address. This is useful for active
3814 * scanning and non-connectable advertising.
3815 */
3816 if (require_privacy) {
3817 bdaddr_t urpa;
3818
3819 get_random_bytes(&urpa, 6);
3820 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3821
3822 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003823 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003824 return 0;
3825 }
3826
Johan Hedbergebd3a742014-02-23 19:42:21 +02003827 /* If forcing static address is in use or there is no public
3828 * address use the static address as random address (but skip
3829 * the HCI command if the current random address is already the
3830 * static one.
3831 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003832 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003833 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3834 *own_addr_type = ADDR_LE_DEV_RANDOM;
3835 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3836 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3837 &hdev->static_addr);
3838 return 0;
3839 }
3840
3841 /* Neither privacy nor static address is being used so use a
3842 * public address.
3843 */
3844 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3845
3846 return 0;
3847}
3848
Johan Hedberga1f4c312014-02-27 14:05:41 +02003849/* Copy the Identity Address of the controller.
3850 *
3851 * If the controller has a public BD_ADDR, then by default use that one.
3852 * If this is a LE only controller without a public address, default to
3853 * the static random address.
3854 *
3855 * For debugging purposes it is possible to force controllers with a
3856 * public address to use the static random address instead.
3857 */
3858void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3859 u8 *bdaddr_type)
3860{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003861 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003862 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3863 bacpy(bdaddr, &hdev->static_addr);
3864 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3865 } else {
3866 bacpy(bdaddr, &hdev->bdaddr);
3867 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3868 }
3869}
3870
David Herrmann9be0dab2012-04-22 14:39:57 +02003871/* Alloc HCI device */
3872struct hci_dev *hci_alloc_dev(void)
3873{
3874 struct hci_dev *hdev;
3875
3876 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3877 if (!hdev)
3878 return NULL;
3879
David Herrmannb1b813d2012-04-22 14:39:58 +02003880 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3881 hdev->esco_type = (ESCO_HV1);
3882 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003883 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3884 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003885 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003886 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3887 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003888
David Herrmannb1b813d2012-04-22 14:39:58 +02003889 hdev->sniff_max_interval = 800;
3890 hdev->sniff_min_interval = 80;
3891
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003892 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003893 hdev->le_scan_interval = 0x0060;
3894 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003895 hdev->le_conn_min_interval = 0x0028;
3896 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003897 hdev->le_conn_latency = 0x0000;
3898 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003899
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003900 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003901 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003902 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3903 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003904
David Herrmannb1b813d2012-04-22 14:39:58 +02003905 mutex_init(&hdev->lock);
3906 mutex_init(&hdev->req_lock);
3907
3908 INIT_LIST_HEAD(&hdev->mgmt_pending);
3909 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003910 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003911 INIT_LIST_HEAD(&hdev->uuids);
3912 INIT_LIST_HEAD(&hdev->link_keys);
3913 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003914 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003915 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003916 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003917 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003918 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003919 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003920 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003921
3922 INIT_WORK(&hdev->rx_work, hci_rx_work);
3923 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3924 INIT_WORK(&hdev->tx_work, hci_tx_work);
3925 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003926
David Herrmannb1b813d2012-04-22 14:39:58 +02003927 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3928 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3929 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3930
David Herrmannb1b813d2012-04-22 14:39:58 +02003931 skb_queue_head_init(&hdev->rx_q);
3932 skb_queue_head_init(&hdev->cmd_q);
3933 skb_queue_head_init(&hdev->raw_q);
3934
3935 init_waitqueue_head(&hdev->req_wait_q);
3936
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003937 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003938
David Herrmannb1b813d2012-04-22 14:39:58 +02003939 hci_init_sysfs(hdev);
3940 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003941
3942 return hdev;
3943}
3944EXPORT_SYMBOL(hci_alloc_dev);
3945
3946/* Free HCI device */
3947void hci_free_dev(struct hci_dev *hdev)
3948{
David Herrmann9be0dab2012-04-22 14:39:57 +02003949 /* will free via device release */
3950 put_device(&hdev->dev);
3951}
3952EXPORT_SYMBOL(hci_free_dev);
3953
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954/* Register HCI device */
3955int hci_register_dev(struct hci_dev *hdev)
3956{
David Herrmannb1b813d2012-04-22 14:39:58 +02003957 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958
Marcel Holtmann74292d52014-07-06 15:50:27 +02003959 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 return -EINVAL;
3961
Mat Martineau08add512011-11-02 16:18:36 -07003962 /* Do not allow HCI_AMP devices to register at index 0,
3963 * so the index can be used as the AMP controller ID.
3964 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003965 switch (hdev->dev_type) {
3966 case HCI_BREDR:
3967 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3968 break;
3969 case HCI_AMP:
3970 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3971 break;
3972 default:
3973 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003975
Sasha Levin3df92b32012-05-27 22:36:56 +02003976 if (id < 0)
3977 return id;
3978
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 sprintf(hdev->name, "hci%d", id);
3980 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003981
3982 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3983
Kees Cookd8537542013-07-03 15:04:57 -07003984 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3985 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003986 if (!hdev->workqueue) {
3987 error = -ENOMEM;
3988 goto err;
3989 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003990
Kees Cookd8537542013-07-03 15:04:57 -07003991 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3992 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003993 if (!hdev->req_workqueue) {
3994 destroy_workqueue(hdev->workqueue);
3995 error = -ENOMEM;
3996 goto err;
3997 }
3998
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003999 if (!IS_ERR_OR_NULL(bt_debugfs))
4000 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4001
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004002 dev_set_name(&hdev->dev, "%s", hdev->name);
4003
Johan Hedberg99780a72014-02-18 10:40:07 +02004004 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4005 CRYPTO_ALG_ASYNC);
4006 if (IS_ERR(hdev->tfm_aes)) {
4007 BT_ERR("Unable to create crypto context");
4008 error = PTR_ERR(hdev->tfm_aes);
4009 hdev->tfm_aes = NULL;
4010 goto err_wqueue;
4011 }
4012
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004013 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004014 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004015 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004017 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004018 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4019 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004020 if (hdev->rfkill) {
4021 if (rfkill_register(hdev->rfkill) < 0) {
4022 rfkill_destroy(hdev->rfkill);
4023 hdev->rfkill = NULL;
4024 }
4025 }
4026
Johan Hedberg5e130362013-09-13 08:58:17 +03004027 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4028 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4029
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004030 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004031 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004032
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004033 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004034 /* Assume BR/EDR support until proven otherwise (such as
4035 * through reading supported features during init.
4036 */
4037 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4038 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004039
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004040 write_lock(&hci_dev_list_lock);
4041 list_add(&hdev->list, &hci_dev_list);
4042 write_unlock(&hci_dev_list_lock);
4043
Marcel Holtmann4a964402014-07-02 19:10:33 +02004044 /* Devices that are marked for raw-only usage are unconfigured
4045 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004046 */
4047 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004048 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004049
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004051 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052
Johan Hedberg19202572013-01-14 22:33:51 +02004053 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004054
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004056
Johan Hedberg99780a72014-02-18 10:40:07 +02004057err_tfm:
4058 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004059err_wqueue:
4060 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004061 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004062err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004063 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004064
David Herrmann33ca9542011-10-08 14:58:49 +02004065 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066}
4067EXPORT_SYMBOL(hci_register_dev);
4068
4069/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004070void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071{
Sasha Levin3df92b32012-05-27 22:36:56 +02004072 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004073
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004074 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075
Johan Hovold94324962012-03-15 14:48:41 +01004076 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4077
Sasha Levin3df92b32012-05-27 22:36:56 +02004078 id = hdev->id;
4079
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004080 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004082 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083
4084 hci_dev_do_close(hdev);
4085
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304086 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004087 kfree_skb(hdev->reassembly[i]);
4088
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004089 cancel_work_sync(&hdev->power_on);
4090
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004091 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004092 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4093 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004094 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004095 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004096 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004097 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004098
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004099 /* mgmt_index_removed should take care of emptying the
4100 * pending list */
4101 BUG_ON(!list_empty(&hdev->mgmt_pending));
4102
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 hci_notify(hdev, HCI_DEV_UNREG);
4104
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004105 if (hdev->rfkill) {
4106 rfkill_unregister(hdev->rfkill);
4107 rfkill_destroy(hdev->rfkill);
4108 }
4109
Johan Hedberg99780a72014-02-18 10:40:07 +02004110 if (hdev->tfm_aes)
4111 crypto_free_blkcipher(hdev->tfm_aes);
4112
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004113 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004114
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004115 debugfs_remove_recursive(hdev->debugfs);
4116
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004117 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004118 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004119
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004120 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004121 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004122 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004123 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004124 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004125 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004126 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004127 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004128 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004129 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004130 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004131
David Herrmanndc946bd2012-01-07 15:47:24 +01004132 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004133
4134 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135}
4136EXPORT_SYMBOL(hci_unregister_dev);
4137
4138/* Suspend HCI device */
4139int hci_suspend_dev(struct hci_dev *hdev)
4140{
4141 hci_notify(hdev, HCI_DEV_SUSPEND);
4142 return 0;
4143}
4144EXPORT_SYMBOL(hci_suspend_dev);
4145
4146/* Resume HCI device */
4147int hci_resume_dev(struct hci_dev *hdev)
4148{
4149 hci_notify(hdev, HCI_DEV_RESUME);
4150 return 0;
4151}
4152EXPORT_SYMBOL(hci_resume_dev);
4153
Marcel Holtmann76bca882009-11-18 00:40:39 +01004154/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004155int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004156{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004157 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004158 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004159 kfree_skb(skb);
4160 return -ENXIO;
4161 }
4162
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004163 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004164 bt_cb(skb)->incoming = 1;
4165
4166 /* Time stamp */
4167 __net_timestamp(skb);
4168
Marcel Holtmann76bca882009-11-18 00:40:39 +01004169 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004170 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004171
Marcel Holtmann76bca882009-11-18 00:40:39 +01004172 return 0;
4173}
4174EXPORT_SYMBOL(hci_recv_frame);
4175
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304176static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004177 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304178{
4179 int len = 0;
4180 int hlen = 0;
4181 int remain = count;
4182 struct sk_buff *skb;
4183 struct bt_skb_cb *scb;
4184
4185 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004186 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304187 return -EILSEQ;
4188
4189 skb = hdev->reassembly[index];
4190
4191 if (!skb) {
4192 switch (type) {
4193 case HCI_ACLDATA_PKT:
4194 len = HCI_MAX_FRAME_SIZE;
4195 hlen = HCI_ACL_HDR_SIZE;
4196 break;
4197 case HCI_EVENT_PKT:
4198 len = HCI_MAX_EVENT_SIZE;
4199 hlen = HCI_EVENT_HDR_SIZE;
4200 break;
4201 case HCI_SCODATA_PKT:
4202 len = HCI_MAX_SCO_SIZE;
4203 hlen = HCI_SCO_HDR_SIZE;
4204 break;
4205 }
4206
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004207 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304208 if (!skb)
4209 return -ENOMEM;
4210
4211 scb = (void *) skb->cb;
4212 scb->expect = hlen;
4213 scb->pkt_type = type;
4214
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304215 hdev->reassembly[index] = skb;
4216 }
4217
4218 while (count) {
4219 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004220 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304221
4222 memcpy(skb_put(skb, len), data, len);
4223
4224 count -= len;
4225 data += len;
4226 scb->expect -= len;
4227 remain = count;
4228
4229 switch (type) {
4230 case HCI_EVENT_PKT:
4231 if (skb->len == HCI_EVENT_HDR_SIZE) {
4232 struct hci_event_hdr *h = hci_event_hdr(skb);
4233 scb->expect = h->plen;
4234
4235 if (skb_tailroom(skb) < scb->expect) {
4236 kfree_skb(skb);
4237 hdev->reassembly[index] = NULL;
4238 return -ENOMEM;
4239 }
4240 }
4241 break;
4242
4243 case HCI_ACLDATA_PKT:
4244 if (skb->len == HCI_ACL_HDR_SIZE) {
4245 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4246 scb->expect = __le16_to_cpu(h->dlen);
4247
4248 if (skb_tailroom(skb) < scb->expect) {
4249 kfree_skb(skb);
4250 hdev->reassembly[index] = NULL;
4251 return -ENOMEM;
4252 }
4253 }
4254 break;
4255
4256 case HCI_SCODATA_PKT:
4257 if (skb->len == HCI_SCO_HDR_SIZE) {
4258 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4259 scb->expect = h->dlen;
4260
4261 if (skb_tailroom(skb) < scb->expect) {
4262 kfree_skb(skb);
4263 hdev->reassembly[index] = NULL;
4264 return -ENOMEM;
4265 }
4266 }
4267 break;
4268 }
4269
4270 if (scb->expect == 0) {
4271 /* Complete frame */
4272
4273 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004274 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304275
4276 hdev->reassembly[index] = NULL;
4277 return remain;
4278 }
4279 }
4280
4281 return remain;
4282}
4283
Marcel Holtmannef222012007-07-11 06:42:04 +02004284int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4285{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304286 int rem = 0;
4287
Marcel Holtmannef222012007-07-11 06:42:04 +02004288 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4289 return -EILSEQ;
4290
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004291 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004292 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304293 if (rem < 0)
4294 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004295
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304296 data += (count - rem);
4297 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004298 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004299
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304300 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004301}
4302EXPORT_SYMBOL(hci_recv_fragment);
4303
Suraj Sumangala99811512010-07-14 13:02:19 +05304304#define STREAM_REASSEMBLY 0
4305
4306int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4307{
4308 int type;
4309 int rem = 0;
4310
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004311 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304312 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4313
4314 if (!skb) {
4315 struct { char type; } *pkt;
4316
4317 /* Start of the frame */
4318 pkt = data;
4319 type = pkt->type;
4320
4321 data++;
4322 count--;
4323 } else
4324 type = bt_cb(skb)->pkt_type;
4325
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004326 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004327 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304328 if (rem < 0)
4329 return rem;
4330
4331 data += (count - rem);
4332 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004333 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304334
4335 return rem;
4336}
4337EXPORT_SYMBOL(hci_recv_stream_fragment);
4338
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339/* ---- Interface to upper protocols ---- */
4340
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341int hci_register_cb(struct hci_cb *cb)
4342{
4343 BT_DBG("%p name %s", cb, cb->name);
4344
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004345 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004347 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348
4349 return 0;
4350}
4351EXPORT_SYMBOL(hci_register_cb);
4352
4353int hci_unregister_cb(struct hci_cb *cb)
4354{
4355 BT_DBG("%p name %s", cb, cb->name);
4356
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004357 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004359 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360
4361 return 0;
4362}
4363EXPORT_SYMBOL(hci_unregister_cb);
4364
Marcel Holtmann51086992013-10-10 14:54:19 -07004365static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004367 int err;
4368
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004369 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004371 /* Time stamp */
4372 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004374 /* Send copy to monitor */
4375 hci_send_to_monitor(hdev, skb);
4376
4377 if (atomic_read(&hdev->promisc)) {
4378 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004379 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 }
4381
4382 /* Get rid of skb owner, prior to sending to the driver. */
4383 skb_orphan(skb);
4384
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004385 err = hdev->send(hdev, skb);
4386 if (err < 0) {
4387 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4388 kfree_skb(skb);
4389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390}
4391
Johan Hedberg3119ae92013-03-05 20:37:44 +02004392void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4393{
4394 skb_queue_head_init(&req->cmd_q);
4395 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004396 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004397}
4398
4399int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4400{
4401 struct hci_dev *hdev = req->hdev;
4402 struct sk_buff *skb;
4403 unsigned long flags;
4404
4405 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4406
Andre Guedes5d73e032013-03-08 11:20:16 -03004407 /* If an error occured during request building, remove all HCI
4408 * commands queued on the HCI request queue.
4409 */
4410 if (req->err) {
4411 skb_queue_purge(&req->cmd_q);
4412 return req->err;
4413 }
4414
Johan Hedberg3119ae92013-03-05 20:37:44 +02004415 /* Do not allow empty requests */
4416 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004417 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004418
4419 skb = skb_peek_tail(&req->cmd_q);
4420 bt_cb(skb)->req.complete = complete;
4421
4422 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4423 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4424 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4425
4426 queue_work(hdev->workqueue, &hdev->cmd_work);
4427
4428 return 0;
4429}
4430
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004431static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004432 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433{
4434 int len = HCI_COMMAND_HDR_SIZE + plen;
4435 struct hci_command_hdr *hdr;
4436 struct sk_buff *skb;
4437
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004439 if (!skb)
4440 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441
4442 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004443 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 hdr->plen = plen;
4445
4446 if (plen)
4447 memcpy(skb_put(skb, plen), param, plen);
4448
4449 BT_DBG("skb len %d", skb->len);
4450
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004451 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004452
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004453 return skb;
4454}
4455
4456/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004457int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4458 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004459{
4460 struct sk_buff *skb;
4461
4462 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4463
4464 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4465 if (!skb) {
4466 BT_ERR("%s no memory for command", hdev->name);
4467 return -ENOMEM;
4468 }
4469
Johan Hedberg11714b32013-03-05 20:37:47 +02004470 /* Stand-alone HCI commands must be flaged as
4471 * single-command requests.
4472 */
4473 bt_cb(skb)->req.start = true;
4474
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004476 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477
4478 return 0;
4479}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480
Johan Hedberg71c76a12013-03-05 20:37:46 +02004481/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004482void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4483 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004484{
4485 struct hci_dev *hdev = req->hdev;
4486 struct sk_buff *skb;
4487
4488 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4489
Andre Guedes34739c12013-03-08 11:20:18 -03004490 /* If an error occured during request building, there is no point in
4491 * queueing the HCI command. We can simply return.
4492 */
4493 if (req->err)
4494 return;
4495
Johan Hedberg71c76a12013-03-05 20:37:46 +02004496 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4497 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004498 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4499 hdev->name, opcode);
4500 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004501 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004502 }
4503
4504 if (skb_queue_empty(&req->cmd_q))
4505 bt_cb(skb)->req.start = true;
4506
Johan Hedberg02350a72013-04-03 21:50:29 +03004507 bt_cb(skb)->req.event = event;
4508
Johan Hedberg71c76a12013-03-05 20:37:46 +02004509 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004510}
4511
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004512void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4513 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004514{
4515 hci_req_add_ev(req, opcode, plen, param, 0);
4516}
4517
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004519void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520{
4521 struct hci_command_hdr *hdr;
4522
4523 if (!hdev->sent_cmd)
4524 return NULL;
4525
4526 hdr = (void *) hdev->sent_cmd->data;
4527
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004528 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 return NULL;
4530
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004531 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532
4533 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4534}
4535
4536/* Send ACL data */
4537static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4538{
4539 struct hci_acl_hdr *hdr;
4540 int len = skb->len;
4541
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004542 skb_push(skb, HCI_ACL_HDR_SIZE);
4543 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004544 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004545 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4546 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547}
4548
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004549static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004550 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004552 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 struct hci_dev *hdev = conn->hdev;
4554 struct sk_buff *list;
4555
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004556 skb->len = skb_headlen(skb);
4557 skb->data_len = 0;
4558
4559 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004560
4561 switch (hdev->dev_type) {
4562 case HCI_BREDR:
4563 hci_add_acl_hdr(skb, conn->handle, flags);
4564 break;
4565 case HCI_AMP:
4566 hci_add_acl_hdr(skb, chan->handle, flags);
4567 break;
4568 default:
4569 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4570 return;
4571 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004572
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004573 list = skb_shinfo(skb)->frag_list;
4574 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 /* Non fragmented */
4576 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4577
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004578 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579 } else {
4580 /* Fragmented */
4581 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4582
4583 skb_shinfo(skb)->frag_list = NULL;
4584
4585 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004586 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004588 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004589
4590 flags &= ~ACL_START;
4591 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592 do {
4593 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004594
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004595 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004596 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597
4598 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4599
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004600 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 } while (list);
4602
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004603 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004605}
4606
4607void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4608{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004609 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004610
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004611 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004612
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004613 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004615 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617
4618/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004619void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620{
4621 struct hci_dev *hdev = conn->hdev;
4622 struct hci_sco_hdr hdr;
4623
4624 BT_DBG("%s len %d", hdev->name, skb->len);
4625
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004626 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 hdr.dlen = skb->len;
4628
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004629 skb_push(skb, HCI_SCO_HDR_SIZE);
4630 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004631 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004633 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004634
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004636 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638
4639/* ---- HCI TX task (outgoing data) ---- */
4640
4641/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004642static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4643 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644{
4645 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004646 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004647 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004649 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004651
4652 rcu_read_lock();
4653
4654 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004655 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004657
4658 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4659 continue;
4660
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 num++;
4662
4663 if (c->sent < min) {
4664 min = c->sent;
4665 conn = c;
4666 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004667
4668 if (hci_conn_num(hdev, type) == num)
4669 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 }
4671
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004672 rcu_read_unlock();
4673
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004675 int cnt, q;
4676
4677 switch (conn->type) {
4678 case ACL_LINK:
4679 cnt = hdev->acl_cnt;
4680 break;
4681 case SCO_LINK:
4682 case ESCO_LINK:
4683 cnt = hdev->sco_cnt;
4684 break;
4685 case LE_LINK:
4686 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4687 break;
4688 default:
4689 cnt = 0;
4690 BT_ERR("Unknown link type");
4691 }
4692
4693 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 *quote = q ? q : 1;
4695 } else
4696 *quote = 0;
4697
4698 BT_DBG("conn %p quote %d", conn, *quote);
4699 return conn;
4700}
4701
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004702static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703{
4704 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004705 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706
Ville Tervobae1f5d92011-02-10 22:38:53 -03004707 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004709 rcu_read_lock();
4710
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004712 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004713 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004714 BT_ERR("%s killing stalled connection %pMR",
4715 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004716 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 }
4718 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004719
4720 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721}
4722
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004723static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4724 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004725{
4726 struct hci_conn_hash *h = &hdev->conn_hash;
4727 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004728 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004729 struct hci_conn *conn;
4730 int cnt, q, conn_num = 0;
4731
4732 BT_DBG("%s", hdev->name);
4733
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004734 rcu_read_lock();
4735
4736 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004737 struct hci_chan *tmp;
4738
4739 if (conn->type != type)
4740 continue;
4741
4742 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4743 continue;
4744
4745 conn_num++;
4746
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004747 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004748 struct sk_buff *skb;
4749
4750 if (skb_queue_empty(&tmp->data_q))
4751 continue;
4752
4753 skb = skb_peek(&tmp->data_q);
4754 if (skb->priority < cur_prio)
4755 continue;
4756
4757 if (skb->priority > cur_prio) {
4758 num = 0;
4759 min = ~0;
4760 cur_prio = skb->priority;
4761 }
4762
4763 num++;
4764
4765 if (conn->sent < min) {
4766 min = conn->sent;
4767 chan = tmp;
4768 }
4769 }
4770
4771 if (hci_conn_num(hdev, type) == conn_num)
4772 break;
4773 }
4774
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004775 rcu_read_unlock();
4776
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004777 if (!chan)
4778 return NULL;
4779
4780 switch (chan->conn->type) {
4781 case ACL_LINK:
4782 cnt = hdev->acl_cnt;
4783 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004784 case AMP_LINK:
4785 cnt = hdev->block_cnt;
4786 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004787 case SCO_LINK:
4788 case ESCO_LINK:
4789 cnt = hdev->sco_cnt;
4790 break;
4791 case LE_LINK:
4792 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4793 break;
4794 default:
4795 cnt = 0;
4796 BT_ERR("Unknown link type");
4797 }
4798
4799 q = cnt / num;
4800 *quote = q ? q : 1;
4801 BT_DBG("chan %p quote %d", chan, *quote);
4802 return chan;
4803}
4804
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004805static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4806{
4807 struct hci_conn_hash *h = &hdev->conn_hash;
4808 struct hci_conn *conn;
4809 int num = 0;
4810
4811 BT_DBG("%s", hdev->name);
4812
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004813 rcu_read_lock();
4814
4815 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004816 struct hci_chan *chan;
4817
4818 if (conn->type != type)
4819 continue;
4820
4821 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4822 continue;
4823
4824 num++;
4825
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004826 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004827 struct sk_buff *skb;
4828
4829 if (chan->sent) {
4830 chan->sent = 0;
4831 continue;
4832 }
4833
4834 if (skb_queue_empty(&chan->data_q))
4835 continue;
4836
4837 skb = skb_peek(&chan->data_q);
4838 if (skb->priority >= HCI_PRIO_MAX - 1)
4839 continue;
4840
4841 skb->priority = HCI_PRIO_MAX - 1;
4842
4843 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004844 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004845 }
4846
4847 if (hci_conn_num(hdev, type) == num)
4848 break;
4849 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004850
4851 rcu_read_unlock();
4852
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004853}
4854
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004855static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4856{
4857 /* Calculate count of blocks used by this packet */
4858 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4859}
4860
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004861static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004863 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 /* ACL tx timeout must be longer than maximum
4865 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004866 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004867 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004868 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004870}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004872static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004873{
4874 unsigned int cnt = hdev->acl_cnt;
4875 struct hci_chan *chan;
4876 struct sk_buff *skb;
4877 int quote;
4878
4879 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004880
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004881 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004882 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004883 u32 priority = (skb_peek(&chan->data_q))->priority;
4884 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004886 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004887
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004888 /* Stop if priority has changed */
4889 if (skb->priority < priority)
4890 break;
4891
4892 skb = skb_dequeue(&chan->data_q);
4893
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004894 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004895 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004896
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004897 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 hdev->acl_last_tx = jiffies;
4899
4900 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004901 chan->sent++;
4902 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 }
4904 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004905
4906 if (cnt != hdev->acl_cnt)
4907 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908}
4909
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004910static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004911{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004912 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004913 struct hci_chan *chan;
4914 struct sk_buff *skb;
4915 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004916 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004917
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004918 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004919
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004920 BT_DBG("%s", hdev->name);
4921
4922 if (hdev->dev_type == HCI_AMP)
4923 type = AMP_LINK;
4924 else
4925 type = ACL_LINK;
4926
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004927 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004928 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004929 u32 priority = (skb_peek(&chan->data_q))->priority;
4930 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4931 int blocks;
4932
4933 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004934 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004935
4936 /* Stop if priority has changed */
4937 if (skb->priority < priority)
4938 break;
4939
4940 skb = skb_dequeue(&chan->data_q);
4941
4942 blocks = __get_blocks(hdev, skb);
4943 if (blocks > hdev->block_cnt)
4944 return;
4945
4946 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004947 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004948
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004949 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004950 hdev->acl_last_tx = jiffies;
4951
4952 hdev->block_cnt -= blocks;
4953 quote -= blocks;
4954
4955 chan->sent += blocks;
4956 chan->conn->sent += blocks;
4957 }
4958 }
4959
4960 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004961 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004962}
4963
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004964static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004965{
4966 BT_DBG("%s", hdev->name);
4967
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004968 /* No ACL link over BR/EDR controller */
4969 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4970 return;
4971
4972 /* No AMP link over AMP controller */
4973 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004974 return;
4975
4976 switch (hdev->flow_ctl_mode) {
4977 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4978 hci_sched_acl_pkt(hdev);
4979 break;
4980
4981 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4982 hci_sched_acl_blk(hdev);
4983 break;
4984 }
4985}
4986
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004988static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989{
4990 struct hci_conn *conn;
4991 struct sk_buff *skb;
4992 int quote;
4993
4994 BT_DBG("%s", hdev->name);
4995
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004996 if (!hci_conn_num(hdev, SCO_LINK))
4997 return;
4998
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5000 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5001 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005002 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003
5004 conn->sent++;
5005 if (conn->sent == ~0)
5006 conn->sent = 0;
5007 }
5008 }
5009}
5010
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005011static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005012{
5013 struct hci_conn *conn;
5014 struct sk_buff *skb;
5015 int quote;
5016
5017 BT_DBG("%s", hdev->name);
5018
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005019 if (!hci_conn_num(hdev, ESCO_LINK))
5020 return;
5021
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005022 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5023 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005024 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5025 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005026 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005027
5028 conn->sent++;
5029 if (conn->sent == ~0)
5030 conn->sent = 0;
5031 }
5032 }
5033}
5034
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005035static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005036{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005037 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005038 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005039 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005040
5041 BT_DBG("%s", hdev->name);
5042
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005043 if (!hci_conn_num(hdev, LE_LINK))
5044 return;
5045
Marcel Holtmann4a964402014-07-02 19:10:33 +02005046 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005047 /* LE tx timeout must be longer than maximum
5048 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005049 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005050 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005051 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005052 }
5053
5054 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005055 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005056 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005057 u32 priority = (skb_peek(&chan->data_q))->priority;
5058 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005059 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005060 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005061
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005062 /* Stop if priority has changed */
5063 if (skb->priority < priority)
5064 break;
5065
5066 skb = skb_dequeue(&chan->data_q);
5067
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005068 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005069 hdev->le_last_tx = jiffies;
5070
5071 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005072 chan->sent++;
5073 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005074 }
5075 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005076
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005077 if (hdev->le_pkts)
5078 hdev->le_cnt = cnt;
5079 else
5080 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005081
5082 if (cnt != tmp)
5083 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005084}
5085
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005086static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005088 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 struct sk_buff *skb;
5090
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005091 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005092 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093
Marcel Holtmann52de5992013-09-03 18:08:38 -07005094 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5095 /* Schedule queues and send stuff to HCI driver */
5096 hci_sched_acl(hdev);
5097 hci_sched_sco(hdev);
5098 hci_sched_esco(hdev);
5099 hci_sched_le(hdev);
5100 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005101
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102 /* Send next queued raw (unknown type) packet */
5103 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005104 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105}
5106
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005107/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108
5109/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005110static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111{
5112 struct hci_acl_hdr *hdr = (void *) skb->data;
5113 struct hci_conn *conn;
5114 __u16 handle, flags;
5115
5116 skb_pull(skb, HCI_ACL_HDR_SIZE);
5117
5118 handle = __le16_to_cpu(hdr->handle);
5119 flags = hci_flags(handle);
5120 handle = hci_handle(handle);
5121
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005122 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005123 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124
5125 hdev->stat.acl_rx++;
5126
5127 hci_dev_lock(hdev);
5128 conn = hci_conn_hash_lookup_handle(hdev, handle);
5129 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005130
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005132 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005133
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005135 l2cap_recv_acldata(conn, skb, flags);
5136 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005138 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005139 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140 }
5141
5142 kfree_skb(skb);
5143}
5144
5145/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005146static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147{
5148 struct hci_sco_hdr *hdr = (void *) skb->data;
5149 struct hci_conn *conn;
5150 __u16 handle;
5151
5152 skb_pull(skb, HCI_SCO_HDR_SIZE);
5153
5154 handle = __le16_to_cpu(hdr->handle);
5155
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005156 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157
5158 hdev->stat.sco_rx++;
5159
5160 hci_dev_lock(hdev);
5161 conn = hci_conn_hash_lookup_handle(hdev, handle);
5162 hci_dev_unlock(hdev);
5163
5164 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005166 sco_recv_scodata(conn, skb);
5167 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005169 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005170 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 }
5172
5173 kfree_skb(skb);
5174}
5175
Johan Hedberg9238f362013-03-05 20:37:48 +02005176static bool hci_req_is_complete(struct hci_dev *hdev)
5177{
5178 struct sk_buff *skb;
5179
5180 skb = skb_peek(&hdev->cmd_q);
5181 if (!skb)
5182 return true;
5183
5184 return bt_cb(skb)->req.start;
5185}
5186
Johan Hedberg42c6b122013-03-05 20:37:49 +02005187static void hci_resend_last(struct hci_dev *hdev)
5188{
5189 struct hci_command_hdr *sent;
5190 struct sk_buff *skb;
5191 u16 opcode;
5192
5193 if (!hdev->sent_cmd)
5194 return;
5195
5196 sent = (void *) hdev->sent_cmd->data;
5197 opcode = __le16_to_cpu(sent->opcode);
5198 if (opcode == HCI_OP_RESET)
5199 return;
5200
5201 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5202 if (!skb)
5203 return;
5204
5205 skb_queue_head(&hdev->cmd_q, skb);
5206 queue_work(hdev->workqueue, &hdev->cmd_work);
5207}
5208
Johan Hedberg9238f362013-03-05 20:37:48 +02005209void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5210{
5211 hci_req_complete_t req_complete = NULL;
5212 struct sk_buff *skb;
5213 unsigned long flags;
5214
5215 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5216
Johan Hedberg42c6b122013-03-05 20:37:49 +02005217 /* If the completed command doesn't match the last one that was
5218 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005219 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005220 if (!hci_sent_cmd_data(hdev, opcode)) {
5221 /* Some CSR based controllers generate a spontaneous
5222 * reset complete event during init and any pending
5223 * command will never be completed. In such a case we
5224 * need to resend whatever was the last sent
5225 * command.
5226 */
5227 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5228 hci_resend_last(hdev);
5229
Johan Hedberg9238f362013-03-05 20:37:48 +02005230 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005231 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005232
5233 /* If the command succeeded and there's still more commands in
5234 * this request the request is not yet complete.
5235 */
5236 if (!status && !hci_req_is_complete(hdev))
5237 return;
5238
5239 /* If this was the last command in a request the complete
5240 * callback would be found in hdev->sent_cmd instead of the
5241 * command queue (hdev->cmd_q).
5242 */
5243 if (hdev->sent_cmd) {
5244 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005245
5246 if (req_complete) {
5247 /* We must set the complete callback to NULL to
5248 * avoid calling the callback more than once if
5249 * this function gets called again.
5250 */
5251 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5252
Johan Hedberg9238f362013-03-05 20:37:48 +02005253 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005254 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005255 }
5256
5257 /* Remove all pending commands belonging to this request */
5258 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5259 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5260 if (bt_cb(skb)->req.start) {
5261 __skb_queue_head(&hdev->cmd_q, skb);
5262 break;
5263 }
5264
5265 req_complete = bt_cb(skb)->req.complete;
5266 kfree_skb(skb);
5267 }
5268 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5269
5270call_complete:
5271 if (req_complete)
5272 req_complete(hdev, status);
5273}
5274
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005275static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005277 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278 struct sk_buff *skb;
5279
5280 BT_DBG("%s", hdev->name);
5281
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005283 /* Send copy to monitor */
5284 hci_send_to_monitor(hdev, skb);
5285
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 if (atomic_read(&hdev->promisc)) {
5287 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005288 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 }
5290
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005291 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292 kfree_skb(skb);
5293 continue;
5294 }
5295
5296 if (test_bit(HCI_INIT, &hdev->flags)) {
5297 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005298 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 case HCI_ACLDATA_PKT:
5300 case HCI_SCODATA_PKT:
5301 kfree_skb(skb);
5302 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304 }
5305
5306 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005307 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005309 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 hci_event_packet(hdev, skb);
5311 break;
5312
5313 case HCI_ACLDATA_PKT:
5314 BT_DBG("%s ACL data packet", hdev->name);
5315 hci_acldata_packet(hdev, skb);
5316 break;
5317
5318 case HCI_SCODATA_PKT:
5319 BT_DBG("%s SCO data packet", hdev->name);
5320 hci_scodata_packet(hdev, skb);
5321 break;
5322
5323 default:
5324 kfree_skb(skb);
5325 break;
5326 }
5327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328}
5329
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005330static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005332 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 struct sk_buff *skb;
5334
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005335 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5336 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005339 if (atomic_read(&hdev->cmd_cnt)) {
5340 skb = skb_dequeue(&hdev->cmd_q);
5341 if (!skb)
5342 return;
5343
Wei Yongjun7585b972009-02-25 18:29:52 +08005344 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005346 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005347 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005349 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005350 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005351 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005352 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005353 schedule_delayed_work(&hdev->cmd_timer,
5354 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005355 } else {
5356 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005357 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005358 }
5359 }
5360}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005361
5362void hci_req_add_le_scan_disable(struct hci_request *req)
5363{
5364 struct hci_cp_le_set_scan_enable cp;
5365
5366 memset(&cp, 0, sizeof(cp));
5367 cp.enable = LE_SCAN_DISABLE;
5368 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5369}
Andre Guedesa4790db2014-02-26 20:21:47 -03005370
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005371void hci_req_add_le_passive_scan(struct hci_request *req)
5372{
5373 struct hci_cp_le_set_scan_param param_cp;
5374 struct hci_cp_le_set_scan_enable enable_cp;
5375 struct hci_dev *hdev = req->hdev;
5376 u8 own_addr_type;
5377
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005378 /* Set require_privacy to false since no SCAN_REQ are send
5379 * during passive scanning. Not using an unresolvable address
5380 * here is important so that peer devices using direct
5381 * advertising with our address will be correctly reported
5382 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005383 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005384 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005385 return;
5386
5387 memset(&param_cp, 0, sizeof(param_cp));
5388 param_cp.type = LE_SCAN_PASSIVE;
5389 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5390 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5391 param_cp.own_address_type = own_addr_type;
5392 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5393 &param_cp);
5394
5395 memset(&enable_cp, 0, sizeof(enable_cp));
5396 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005397 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005398 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5399 &enable_cp);
5400}
5401
Andre Guedesa4790db2014-02-26 20:21:47 -03005402static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5403{
5404 if (status)
5405 BT_DBG("HCI request failed to update background scanning: "
5406 "status 0x%2.2x", status);
5407}
5408
5409/* This function controls the background scanning based on hdev->pend_le_conns
5410 * list. If there are pending LE connection we start the background scanning,
5411 * otherwise we stop it.
5412 *
5413 * This function requires the caller holds hdev->lock.
5414 */
5415void hci_update_background_scan(struct hci_dev *hdev)
5416{
Andre Guedesa4790db2014-02-26 20:21:47 -03005417 struct hci_request req;
5418 struct hci_conn *conn;
5419 int err;
5420
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005421 if (!test_bit(HCI_UP, &hdev->flags) ||
5422 test_bit(HCI_INIT, &hdev->flags) ||
5423 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005424 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005425 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005426 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005427 return;
5428
Johan Hedberga70f4b52014-07-07 15:19:50 +03005429 /* No point in doing scanning if LE support hasn't been enabled */
5430 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5431 return;
5432
Johan Hedbergae23ada2014-07-07 13:24:59 +03005433 /* If discovery is active don't interfere with it */
5434 if (hdev->discovery.state != DISCOVERY_STOPPED)
5435 return;
5436
Andre Guedesa4790db2014-02-26 20:21:47 -03005437 hci_req_init(&req, hdev);
5438
Johan Hedberg2b7be332014-07-07 14:40:22 +03005439 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5440 list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005441 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005442 /* If there is no pending LE connections or devices
5443 * to be scanned for, we should stop the background
5444 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005445 */
5446
5447 /* If controller is not scanning we are done. */
5448 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5449 return;
5450
5451 hci_req_add_le_scan_disable(&req);
5452
5453 BT_DBG("%s stopping background scanning", hdev->name);
5454 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005455 /* If there is at least one pending LE connection, we should
5456 * keep the background scan running.
5457 */
5458
Andre Guedesa4790db2014-02-26 20:21:47 -03005459 /* If controller is connecting, we should not start scanning
5460 * since some controllers are not able to scan and connect at
5461 * the same time.
5462 */
5463 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5464 if (conn)
5465 return;
5466
Andre Guedes4340a122014-03-10 18:26:24 -03005467 /* If controller is currently scanning, we stop it to ensure we
5468 * don't miss any advertising (due to duplicates filter).
5469 */
5470 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5471 hci_req_add_le_scan_disable(&req);
5472
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005473 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005474
5475 BT_DBG("%s starting background scanning", hdev->name);
5476 }
5477
5478 err = hci_req_run(&req, update_background_scan_complete);
5479 if (err)
5480 BT_ERR("Failed to run HCI request: err %d", err);
5481}