blob: 9f1c3d7d1d745a1e71f024fa7a88e6dbdca4206f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
599 seq_printf(f, "%pMR (type %u) %*phN\n", addr, addr_type, 16, hdev->irk);
600
601 hci_dev_unlock(hdev);
602
603 return 0;
604}
605
606static int identity_open(struct inode *inode, struct file *file)
607{
608 return single_open(file, identity_show, inode->i_private);
609}
610
611static const struct file_operations identity_fops = {
612 .open = identity_open,
613 .read = seq_read,
614 .llseek = seq_lseek,
615 .release = single_release,
616};
617
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800618static int random_address_show(struct seq_file *f, void *p)
619{
620 struct hci_dev *hdev = f->private;
621
622 hci_dev_lock(hdev);
623 seq_printf(f, "%pMR\n", &hdev->random_addr);
624 hci_dev_unlock(hdev);
625
626 return 0;
627}
628
629static int random_address_open(struct inode *inode, struct file *file)
630{
631 return single_open(file, random_address_show, inode->i_private);
632}
633
634static const struct file_operations random_address_fops = {
635 .open = random_address_open,
636 .read = seq_read,
637 .llseek = seq_lseek,
638 .release = single_release,
639};
640
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700641static int static_address_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
644
645 hci_dev_lock(hdev);
646 seq_printf(f, "%pMR\n", &hdev->static_addr);
647 hci_dev_unlock(hdev);
648
649 return 0;
650}
651
652static int static_address_open(struct inode *inode, struct file *file)
653{
654 return single_open(file, static_address_show, inode->i_private);
655}
656
657static const struct file_operations static_address_fops = {
658 .open = static_address_open,
659 .read = seq_read,
660 .llseek = seq_lseek,
661 .release = single_release,
662};
663
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800664static ssize_t force_static_address_read(struct file *file,
665 char __user *user_buf,
666 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700667{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800668 struct hci_dev *hdev = file->private_data;
669 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700670
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
672 buf[1] = '\n';
673 buf[2] = '\0';
674 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
675}
676
677static ssize_t force_static_address_write(struct file *file,
678 const char __user *user_buf,
679 size_t count, loff_t *ppos)
680{
681 struct hci_dev *hdev = file->private_data;
682 char buf[32];
683 size_t buf_size = min(count, (sizeof(buf)-1));
684 bool enable;
685
686 if (test_bit(HCI_UP, &hdev->flags))
687 return -EBUSY;
688
689 if (copy_from_user(buf, user_buf, buf_size))
690 return -EFAULT;
691
692 buf[buf_size] = '\0';
693 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700694 return -EINVAL;
695
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800696 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
697 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700698
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
700
701 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700702}
703
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800704static const struct file_operations force_static_address_fops = {
705 .open = simple_open,
706 .read = force_static_address_read,
707 .write = force_static_address_write,
708 .llseek = default_llseek,
709};
Marcel Holtmann92202182013-10-18 16:38:10 -0700710
Marcel Holtmann3698d702014-02-18 21:54:49 -0800711static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
712{
713 struct hci_dev *hdev = f->private;
714 struct list_head *p, *n;
715
716 hci_dev_lock(hdev);
717 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
718 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
719 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
720 &irk->bdaddr, irk->addr_type,
721 16, irk->val, &irk->rpa);
722 }
723 hci_dev_unlock(hdev);
724
725 return 0;
726}
727
728static int identity_resolving_keys_open(struct inode *inode, struct file *file)
729{
730 return single_open(file, identity_resolving_keys_show,
731 inode->i_private);
732}
733
734static const struct file_operations identity_resolving_keys_fops = {
735 .open = identity_resolving_keys_open,
736 .read = seq_read,
737 .llseek = seq_lseek,
738 .release = single_release,
739};
740
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700741static int long_term_keys_show(struct seq_file *f, void *ptr)
742{
743 struct hci_dev *hdev = f->private;
744 struct list_head *p, *n;
745
746 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800747 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700748 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800749 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700750 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
751 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
752 8, ltk->rand, 16, ltk->val);
753 }
754 hci_dev_unlock(hdev);
755
756 return 0;
757}
758
759static int long_term_keys_open(struct inode *inode, struct file *file)
760{
761 return single_open(file, long_term_keys_show, inode->i_private);
762}
763
764static const struct file_operations long_term_keys_fops = {
765 .open = long_term_keys_open,
766 .read = seq_read,
767 .llseek = seq_lseek,
768 .release = single_release,
769};
770
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700771static int conn_min_interval_set(void *data, u64 val)
772{
773 struct hci_dev *hdev = data;
774
775 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
776 return -EINVAL;
777
778 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700779 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700780 hci_dev_unlock(hdev);
781
782 return 0;
783}
784
785static int conn_min_interval_get(void *data, u64 *val)
786{
787 struct hci_dev *hdev = data;
788
789 hci_dev_lock(hdev);
790 *val = hdev->le_conn_min_interval;
791 hci_dev_unlock(hdev);
792
793 return 0;
794}
795
796DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
797 conn_min_interval_set, "%llu\n");
798
799static int conn_max_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_max_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_max_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
825 conn_max_interval_set, "%llu\n");
826
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800827static int adv_channel_map_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x01 || val > 0x07)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
835 hdev->le_adv_channel_map = val;
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int adv_channel_map_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_adv_channel_map;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
853 adv_channel_map_set, "%llu\n");
854
Jukka Rissanen89863102013-12-11 17:05:38 +0200855static ssize_t lowpan_read(struct file *file, char __user *user_buf,
856 size_t count, loff_t *ppos)
857{
858 struct hci_dev *hdev = file->private_data;
859 char buf[3];
860
861 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
862 buf[1] = '\n';
863 buf[2] = '\0';
864 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
865}
866
867static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
868 size_t count, loff_t *position)
869{
870 struct hci_dev *hdev = fp->private_data;
871 bool enable;
872 char buf[32];
873 size_t buf_size = min(count, (sizeof(buf)-1));
874
875 if (copy_from_user(buf, user_buffer, buf_size))
876 return -EFAULT;
877
878 buf[buf_size] = '\0';
879
880 if (strtobool(buf, &enable) < 0)
881 return -EINVAL;
882
883 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
884 return -EALREADY;
885
886 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
887
888 return count;
889}
890
891static const struct file_operations lowpan_debugfs_fops = {
892 .open = simple_open,
893 .read = lowpan_read,
894 .write = lowpan_write,
895 .llseek = default_llseek,
896};
897
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898/* ---- HCI requests ---- */
899
Johan Hedberg42c6b122013-03-05 20:37:49 +0200900static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904 if (hdev->req_status == HCI_REQ_PEND) {
905 hdev->req_result = result;
906 hdev->req_status = HCI_REQ_DONE;
907 wake_up_interruptible(&hdev->req_wait_q);
908 }
909}
910
911static void hci_req_cancel(struct hci_dev *hdev, int err)
912{
913 BT_DBG("%s err 0x%2.2x", hdev->name, err);
914
915 if (hdev->req_status == HCI_REQ_PEND) {
916 hdev->req_result = err;
917 hdev->req_status = HCI_REQ_CANCELED;
918 wake_up_interruptible(&hdev->req_wait_q);
919 }
920}
921
Fengguang Wu77a63e02013-04-20 16:24:31 +0300922static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
923 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300924{
925 struct hci_ev_cmd_complete *ev;
926 struct hci_event_hdr *hdr;
927 struct sk_buff *skb;
928
929 hci_dev_lock(hdev);
930
931 skb = hdev->recv_evt;
932 hdev->recv_evt = NULL;
933
934 hci_dev_unlock(hdev);
935
936 if (!skb)
937 return ERR_PTR(-ENODATA);
938
939 if (skb->len < sizeof(*hdr)) {
940 BT_ERR("Too short HCI event");
941 goto failed;
942 }
943
944 hdr = (void *) skb->data;
945 skb_pull(skb, HCI_EVENT_HDR_SIZE);
946
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300947 if (event) {
948 if (hdr->evt != event)
949 goto failed;
950 return skb;
951 }
952
Johan Hedberg75e84b72013-04-02 13:35:04 +0300953 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
954 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
955 goto failed;
956 }
957
958 if (skb->len < sizeof(*ev)) {
959 BT_ERR("Too short cmd_complete event");
960 goto failed;
961 }
962
963 ev = (void *) skb->data;
964 skb_pull(skb, sizeof(*ev));
965
966 if (opcode == __le16_to_cpu(ev->opcode))
967 return skb;
968
969 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
970 __le16_to_cpu(ev->opcode));
971
972failed:
973 kfree_skb(skb);
974 return ERR_PTR(-ENODATA);
975}
976
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300977struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300978 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300979{
980 DECLARE_WAITQUEUE(wait, current);
981 struct hci_request req;
982 int err = 0;
983
984 BT_DBG("%s", hdev->name);
985
986 hci_req_init(&req, hdev);
987
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300988 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300989
990 hdev->req_status = HCI_REQ_PEND;
991
992 err = hci_req_run(&req, hci_req_sync_complete);
993 if (err < 0)
994 return ERR_PTR(err);
995
996 add_wait_queue(&hdev->req_wait_q, &wait);
997 set_current_state(TASK_INTERRUPTIBLE);
998
999 schedule_timeout(timeout);
1000
1001 remove_wait_queue(&hdev->req_wait_q, &wait);
1002
1003 if (signal_pending(current))
1004 return ERR_PTR(-EINTR);
1005
1006 switch (hdev->req_status) {
1007 case HCI_REQ_DONE:
1008 err = -bt_to_errno(hdev->req_result);
1009 break;
1010
1011 case HCI_REQ_CANCELED:
1012 err = -hdev->req_result;
1013 break;
1014
1015 default:
1016 err = -ETIMEDOUT;
1017 break;
1018 }
1019
1020 hdev->req_status = hdev->req_result = 0;
1021
1022 BT_DBG("%s end: err %d", hdev->name, err);
1023
1024 if (err < 0)
1025 return ERR_PTR(err);
1026
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001027 return hci_get_cmd_complete(hdev, opcode, event);
1028}
1029EXPORT_SYMBOL(__hci_cmd_sync_ev);
1030
1031struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001032 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001033{
1034 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001035}
1036EXPORT_SYMBOL(__hci_cmd_sync);
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001039static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001040 void (*func)(struct hci_request *req,
1041 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001042 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 DECLARE_WAITQUEUE(wait, current);
1046 int err = 0;
1047
1048 BT_DBG("%s start", hdev->name);
1049
Johan Hedberg42c6b122013-03-05 20:37:49 +02001050 hci_req_init(&req, hdev);
1051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 hdev->req_status = HCI_REQ_PEND;
1053
Johan Hedberg42c6b122013-03-05 20:37:49 +02001054 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001055
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056 err = hci_req_run(&req, hci_req_sync_complete);
1057 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001058 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001059
1060 /* ENODATA means the HCI request command queue is empty.
1061 * This can happen when a request with conditionals doesn't
1062 * trigger any commands to be sent. This is normal behavior
1063 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001064 */
Andre Guedes920c8302013-03-08 11:20:15 -03001065 if (err == -ENODATA)
1066 return 0;
1067
1068 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001069 }
1070
Andre Guedesbc4445c2013-03-08 11:20:13 -03001071 add_wait_queue(&hdev->req_wait_q, &wait);
1072 set_current_state(TASK_INTERRUPTIBLE);
1073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 schedule_timeout(timeout);
1075
1076 remove_wait_queue(&hdev->req_wait_q, &wait);
1077
1078 if (signal_pending(current))
1079 return -EINTR;
1080
1081 switch (hdev->req_status) {
1082 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001083 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 break;
1085
1086 case HCI_REQ_CANCELED:
1087 err = -hdev->req_result;
1088 break;
1089
1090 default:
1091 err = -ETIMEDOUT;
1092 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
Johan Hedberga5040ef2011-01-10 13:28:59 +02001095 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 BT_DBG("%s end: err %d", hdev->name, err);
1098
1099 return err;
1100}
1101
Johan Hedberg01178cd2013-03-05 20:37:41 +02001102static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001103 void (*req)(struct hci_request *req,
1104 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001105 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106{
1107 int ret;
1108
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001109 if (!test_bit(HCI_UP, &hdev->flags))
1110 return -ENETDOWN;
1111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 /* Serialize all requests */
1113 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001114 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 hci_req_unlock(hdev);
1116
1117 return ret;
1118}
1119
Johan Hedberg42c6b122013-03-05 20:37:49 +02001120static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
1124 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001125 set_bit(HCI_RESET, &req->hdev->flags);
1126 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127}
1128
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001134 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001136 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001137 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001138
1139 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001140 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001144{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001145 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001146
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001147 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001148 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001149
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001150 /* Read Local Supported Commands */
1151 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1152
1153 /* Read Local Supported Features */
1154 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1155
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001156 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001158
1159 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001160 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001161
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001162 /* Read Flow Control Mode */
1163 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1164
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001165 /* Read Location Data */
1166 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001167}
1168
Johan Hedberg42c6b122013-03-05 20:37:49 +02001169static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001170{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001172
1173 BT_DBG("%s %ld", hdev->name, opt);
1174
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001175 /* Reset */
1176 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001177 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001179 switch (hdev->dev_type) {
1180 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001181 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001182 break;
1183
1184 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001186 break;
1187
1188 default:
1189 BT_ERR("Unknown device type %d", hdev->dev_type);
1190 break;
1191 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001192}
1193
Johan Hedberg42c6b122013-03-05 20:37:49 +02001194static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001195{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001196 struct hci_dev *hdev = req->hdev;
1197
Johan Hedberg2177bab2013-03-05 20:37:43 +02001198 __le16 param;
1199 __u8 flt_type;
1200
1201 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001203
1204 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209
1210 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001213 /* Read Number of Supported IAC */
1214 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1215
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001216 /* Read Current IAC LAP */
1217 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1218
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219 /* Clear Event Filters */
1220 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222
1223 /* Connection accept timeout ~20 secs */
1224 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001226
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001227 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1228 * but it does not support page scan related HCI commands.
1229 */
1230 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001231 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1233 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001234}
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001238 struct hci_dev *hdev = req->hdev;
1239
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001241 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001242
1243 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001245
1246 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248
1249 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251
1252 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001254
1255 /* LE-only controllers have LE implicitly enabled */
1256 if (!lmp_bredr_capable(hdev))
1257 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001258}
1259
1260static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1261{
1262 if (lmp_ext_inq_capable(hdev))
1263 return 0x02;
1264
1265 if (lmp_inq_rssi_capable(hdev))
1266 return 0x01;
1267
1268 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1269 hdev->lmp_subver == 0x0757)
1270 return 0x01;
1271
1272 if (hdev->manufacturer == 15) {
1273 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1274 return 0x01;
1275 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1276 return 0x01;
1277 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1278 return 0x01;
1279 }
1280
1281 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1282 hdev->lmp_subver == 0x1805)
1283 return 0x01;
1284
1285 return 0x00;
1286}
1287
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289{
1290 u8 mode;
1291
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295}
1296
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001298{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 struct hci_dev *hdev = req->hdev;
1300
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301 /* The second byte is 0xff instead of 0x9f (two reserved bits
1302 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1303 * command otherwise.
1304 */
1305 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1306
1307 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1308 * any event mask for pre 1.2 devices.
1309 */
1310 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1311 return;
1312
1313 if (lmp_bredr_capable(hdev)) {
1314 events[4] |= 0x01; /* Flow Specification Complete */
1315 events[4] |= 0x02; /* Inquiry Result with RSSI */
1316 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1317 events[5] |= 0x08; /* Synchronous Connection Complete */
1318 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001319 } else {
1320 /* Use a different default for LE-only devices */
1321 memset(events, 0, sizeof(events));
1322 events[0] |= 0x10; /* Disconnection Complete */
1323 events[0] |= 0x80; /* Encryption Change */
1324 events[1] |= 0x08; /* Read Remote Version Information Complete */
1325 events[1] |= 0x20; /* Command Complete */
1326 events[1] |= 0x40; /* Command Status */
1327 events[1] |= 0x80; /* Hardware Error */
1328 events[2] |= 0x04; /* Number of Completed Packets */
1329 events[3] |= 0x02; /* Data Buffer Overflow */
1330 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001331 }
1332
1333 if (lmp_inq_rssi_capable(hdev))
1334 events[4] |= 0x02; /* Inquiry Result with RSSI */
1335
1336 if (lmp_sniffsubr_capable(hdev))
1337 events[5] |= 0x20; /* Sniff Subrating */
1338
1339 if (lmp_pause_enc_capable(hdev))
1340 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1341
1342 if (lmp_ext_inq_capable(hdev))
1343 events[5] |= 0x40; /* Extended Inquiry Result */
1344
1345 if (lmp_no_flush_capable(hdev))
1346 events[7] |= 0x01; /* Enhanced Flush Complete */
1347
1348 if (lmp_lsto_capable(hdev))
1349 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1350
1351 if (lmp_ssp_capable(hdev)) {
1352 events[6] |= 0x01; /* IO Capability Request */
1353 events[6] |= 0x02; /* IO Capability Response */
1354 events[6] |= 0x04; /* User Confirmation Request */
1355 events[6] |= 0x08; /* User Passkey Request */
1356 events[6] |= 0x10; /* Remote OOB Data Request */
1357 events[6] |= 0x20; /* Simple Pairing Complete */
1358 events[7] |= 0x04; /* User Passkey Notification */
1359 events[7] |= 0x08; /* Keypress Notification */
1360 events[7] |= 0x10; /* Remote Host Supported
1361 * Features Notification
1362 */
1363 }
1364
1365 if (lmp_le_capable(hdev))
1366 events[7] |= 0x20; /* LE Meta-Event */
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369
1370 if (lmp_le_capable(hdev)) {
1371 memset(events, 0, sizeof(events));
1372 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1374 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 }
1376}
1377
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001380 struct hci_dev *hdev = req->hdev;
1381
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001384 else
1385 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001392 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1393 * local supported commands HCI command.
1394 */
1395 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397
1398 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001399 /* When SSP is available, then the host features page
1400 * should also be available as well. However some
1401 * controllers list the max_page as 0 as long as SSP
1402 * has not been enabled. To achieve proper debugging
1403 * output, force the minimum max_page to 1 at least.
1404 */
1405 hdev->max_page = 0x01;
1406
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1408 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001409 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1410 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411 } else {
1412 struct hci_cp_write_eir cp;
1413
1414 memset(hdev->eir, 0, sizeof(hdev->eir));
1415 memset(&cp, 0, sizeof(cp));
1416
Johan Hedberg42c6b122013-03-05 20:37:49 +02001417 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001418 }
1419 }
1420
1421 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001422 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423
1424 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426
1427 if (lmp_ext_feat_capable(hdev)) {
1428 struct hci_cp_read_local_ext_features cp;
1429
1430 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1432 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001433 }
1434
1435 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1436 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001437 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1438 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439 }
1440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001443{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001445 struct hci_cp_write_def_link_policy cp;
1446 u16 link_policy = 0;
1447
1448 if (lmp_rswitch_capable(hdev))
1449 link_policy |= HCI_LP_RSWITCH;
1450 if (lmp_hold_capable(hdev))
1451 link_policy |= HCI_LP_HOLD;
1452 if (lmp_sniff_capable(hdev))
1453 link_policy |= HCI_LP_SNIFF;
1454 if (lmp_park_capable(hdev))
1455 link_policy |= HCI_LP_PARK;
1456
1457 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001458 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459}
1460
Johan Hedberg42c6b122013-03-05 20:37:49 +02001461static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001462{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001463 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001464 struct hci_cp_write_le_host_supported cp;
1465
Johan Hedbergc73eee92013-04-19 18:35:21 +03001466 /* LE-only devices do not support explicit enablement */
1467 if (!lmp_bredr_capable(hdev))
1468 return;
1469
Johan Hedberg2177bab2013-03-05 20:37:43 +02001470 memset(&cp, 0, sizeof(cp));
1471
1472 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1473 cp.le = 0x01;
1474 cp.simul = lmp_le_br_capable(hdev);
1475 }
1476
1477 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1479 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480}
1481
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001482static void hci_set_event_mask_page_2(struct hci_request *req)
1483{
1484 struct hci_dev *hdev = req->hdev;
1485 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* If Connectionless Slave Broadcast master role is supported
1488 * enable all necessary events for it.
1489 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001490 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001491 events[1] |= 0x40; /* Triggered Clock Capture */
1492 events[1] |= 0x80; /* Synchronization Train Complete */
1493 events[2] |= 0x10; /* Slave Page Response Timeout */
1494 events[2] |= 0x20; /* CSB Channel Map Change */
1495 }
1496
1497 /* If Connectionless Slave Broadcast slave role is supported
1498 * enable all necessary events for it.
1499 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001500 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001501 events[2] |= 0x01; /* Synchronization Train Received */
1502 events[2] |= 0x02; /* CSB Receive */
1503 events[2] |= 0x04; /* CSB Timeout */
1504 events[2] |= 0x08; /* Truncated Page Complete */
1505 }
1506
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001507 /* Enable Authenticated Payload Timeout Expired event if supported */
1508 if (lmp_ping_capable(hdev))
1509 events[2] |= 0x80;
1510
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001511 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1512}
1513
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001517 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001519 /* Some Broadcom based Bluetooth controllers do not support the
1520 * Delete Stored Link Key command. They are clearly indicating its
1521 * absence in the bit mask of supported commands.
1522 *
1523 * Check the supported commands and only if the the command is marked
1524 * as supported send it. If not supported assume that the controller
1525 * does not have actual support for stored link keys which makes this
1526 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001527 *
1528 * Some controllers indicate that they support handling deleting
1529 * stored link keys, but they don't. The quirk lets a driver
1530 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001531 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001532 if (hdev->commands[6] & 0x80 &&
1533 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001534 struct hci_cp_delete_stored_link_key cp;
1535
1536 bacpy(&cp.bdaddr, BDADDR_ANY);
1537 cp.delete_all = 0x01;
1538 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1539 sizeof(cp), &cp);
1540 }
1541
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001543 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544
Johan Hedberg7bf32042014-02-23 19:42:29 +02001545 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001546 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001547
1548 /* Read features beyond page 1 if available */
1549 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1550 struct hci_cp_read_local_ext_features cp;
1551
1552 cp.page = p;
1553 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1554 sizeof(cp), &cp);
1555 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001556}
1557
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001558static void hci_init4_req(struct hci_request *req, unsigned long opt)
1559{
1560 struct hci_dev *hdev = req->hdev;
1561
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001562 /* Set event mask page 2 if the HCI command for it is supported */
1563 if (hdev->commands[22] & 0x04)
1564 hci_set_event_mask_page_2(req);
1565
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001566 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001567 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001568 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001569
1570 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001571 if ((lmp_sc_capable(hdev) ||
1572 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001573 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1574 u8 support = 0x01;
1575 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1576 sizeof(support), &support);
1577 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001578}
1579
Johan Hedberg2177bab2013-03-05 20:37:43 +02001580static int __hci_init(struct hci_dev *hdev)
1581{
1582 int err;
1583
1584 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1585 if (err < 0)
1586 return err;
1587
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001588 /* The Device Under Test (DUT) mode is special and available for
1589 * all controller types. So just create it early on.
1590 */
1591 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1592 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1593 &dut_mode_fops);
1594 }
1595
Johan Hedberg2177bab2013-03-05 20:37:43 +02001596 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1597 * BR/EDR/LE type controllers. AMP controllers only need the
1598 * first stage init.
1599 */
1600 if (hdev->dev_type != HCI_BREDR)
1601 return 0;
1602
1603 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1604 if (err < 0)
1605 return err;
1606
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001607 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1608 if (err < 0)
1609 return err;
1610
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001611 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1612 if (err < 0)
1613 return err;
1614
1615 /* Only create debugfs entries during the initial setup
1616 * phase and not every time the controller gets powered on.
1617 */
1618 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1619 return 0;
1620
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001621 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1622 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001623 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1624 &hdev->manufacturer);
1625 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1626 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001627 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1628 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001629 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1630
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001631 if (lmp_bredr_capable(hdev)) {
1632 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1633 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001634 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1635 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001636 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1637 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001638 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1639 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001640 }
1641
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001642 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001643 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1644 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001645 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1646 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001647 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1648 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001649 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1650 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001651 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001652
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001653 if (lmp_sniff_capable(hdev)) {
1654 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1655 hdev, &idle_timeout_fops);
1656 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1657 hdev, &sniff_min_interval_fops);
1658 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1659 hdev, &sniff_max_interval_fops);
1660 }
1661
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001662 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001663 debugfs_create_file("identity", 0400, hdev->debugfs,
1664 hdev, &identity_fops);
1665 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1666 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001667 debugfs_create_file("random_address", 0444, hdev->debugfs,
1668 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001669 debugfs_create_file("static_address", 0444, hdev->debugfs,
1670 hdev, &static_address_fops);
1671
1672 /* For controllers with a public address, provide a debug
1673 * option to force the usage of the configured static
1674 * address. By default the public address is used.
1675 */
1676 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1677 debugfs_create_file("force_static_address", 0644,
1678 hdev->debugfs, hdev,
1679 &force_static_address_fops);
1680
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001681 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1682 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001683 debugfs_create_file("identity_resolving_keys", 0400,
1684 hdev->debugfs, hdev,
1685 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001686 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1687 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001688 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1689 hdev, &conn_min_interval_fops);
1690 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1691 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001692 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1693 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001694 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1695 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001696 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001697
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001698 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001699}
1700
Johan Hedberg42c6b122013-03-05 20:37:49 +02001701static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702{
1703 __u8 scan = opt;
1704
Johan Hedberg42c6b122013-03-05 20:37:49 +02001705 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001708 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709}
1710
Johan Hedberg42c6b122013-03-05 20:37:49 +02001711static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712{
1713 __u8 auth = opt;
1714
Johan Hedberg42c6b122013-03-05 20:37:49 +02001715 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
1717 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001718 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719}
1720
Johan Hedberg42c6b122013-03-05 20:37:49 +02001721static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722{
1723 __u8 encrypt = opt;
1724
Johan Hedberg42c6b122013-03-05 20:37:49 +02001725 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001727 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001728 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729}
1730
Johan Hedberg42c6b122013-03-05 20:37:49 +02001731static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001732{
1733 __le16 policy = cpu_to_le16(opt);
1734
Johan Hedberg42c6b122013-03-05 20:37:49 +02001735 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001736
1737 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001738 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001739}
1740
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001741/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 * Device is held on return. */
1743struct hci_dev *hci_dev_get(int index)
1744{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001745 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747 BT_DBG("%d", index);
1748
1749 if (index < 0)
1750 return NULL;
1751
1752 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001753 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 if (d->id == index) {
1755 hdev = hci_dev_hold(d);
1756 break;
1757 }
1758 }
1759 read_unlock(&hci_dev_list_lock);
1760 return hdev;
1761}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
1763/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001764
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001765bool hci_discovery_active(struct hci_dev *hdev)
1766{
1767 struct discovery_state *discov = &hdev->discovery;
1768
Andre Guedes6fbe1952012-02-03 17:47:58 -03001769 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001770 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001771 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001772 return true;
1773
Andre Guedes6fbe1952012-02-03 17:47:58 -03001774 default:
1775 return false;
1776 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001777}
1778
Johan Hedbergff9ef572012-01-04 14:23:45 +02001779void hci_discovery_set_state(struct hci_dev *hdev, int state)
1780{
1781 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1782
1783 if (hdev->discovery.state == state)
1784 return;
1785
1786 switch (state) {
1787 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001788 if (hdev->discovery.state != DISCOVERY_STARTING)
1789 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001790 break;
1791 case DISCOVERY_STARTING:
1792 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001793 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001794 mgmt_discovering(hdev, 1);
1795 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001796 case DISCOVERY_RESOLVING:
1797 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001798 case DISCOVERY_STOPPING:
1799 break;
1800 }
1801
1802 hdev->discovery.state = state;
1803}
1804
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001805void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Johan Hedberg30883512012-01-04 14:16:21 +02001807 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001808 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Johan Hedberg561aafb2012-01-04 13:31:59 +02001810 list_for_each_entry_safe(p, n, &cache->all, all) {
1811 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001812 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001814
1815 INIT_LIST_HEAD(&cache->unknown);
1816 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817}
1818
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001819struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1820 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
Johan Hedberg30883512012-01-04 14:16:21 +02001822 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 struct inquiry_entry *e;
1824
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001825 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Johan Hedberg561aafb2012-01-04 13:31:59 +02001827 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001829 return e;
1830 }
1831
1832 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}
1834
Johan Hedberg561aafb2012-01-04 13:31:59 +02001835struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001836 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001837{
Johan Hedberg30883512012-01-04 14:16:21 +02001838 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001839 struct inquiry_entry *e;
1840
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001841 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001842
1843 list_for_each_entry(e, &cache->unknown, list) {
1844 if (!bacmp(&e->data.bdaddr, bdaddr))
1845 return e;
1846 }
1847
1848 return NULL;
1849}
1850
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001851struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001852 bdaddr_t *bdaddr,
1853 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001854{
1855 struct discovery_state *cache = &hdev->discovery;
1856 struct inquiry_entry *e;
1857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001858 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001859
1860 list_for_each_entry(e, &cache->resolve, list) {
1861 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1862 return e;
1863 if (!bacmp(&e->data.bdaddr, bdaddr))
1864 return e;
1865 }
1866
1867 return NULL;
1868}
1869
Johan Hedberga3d4e202012-01-09 00:53:02 +02001870void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001871 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001872{
1873 struct discovery_state *cache = &hdev->discovery;
1874 struct list_head *pos = &cache->resolve;
1875 struct inquiry_entry *p;
1876
1877 list_del(&ie->list);
1878
1879 list_for_each_entry(p, &cache->resolve, list) {
1880 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001881 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001882 break;
1883 pos = &p->list;
1884 }
1885
1886 list_add(&ie->list, pos);
1887}
1888
Johan Hedberg31754052012-01-04 13:39:52 +02001889bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001890 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891{
Johan Hedberg30883512012-01-04 14:16:21 +02001892 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001893 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001895 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Szymon Janc2b2fec42012-11-20 11:38:54 +01001897 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1898
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001899 if (ssp)
1900 *ssp = data->ssp_mode;
1901
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001902 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001903 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001904 if (ie->data.ssp_mode && ssp)
1905 *ssp = true;
1906
Johan Hedberga3d4e202012-01-09 00:53:02 +02001907 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001908 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001909 ie->data.rssi = data->rssi;
1910 hci_inquiry_cache_update_resolve(hdev, ie);
1911 }
1912
Johan Hedberg561aafb2012-01-04 13:31:59 +02001913 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001914 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001915
Johan Hedberg561aafb2012-01-04 13:31:59 +02001916 /* Entry not in the cache. Add new one. */
1917 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1918 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001919 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001920
1921 list_add(&ie->all, &cache->all);
1922
1923 if (name_known) {
1924 ie->name_state = NAME_KNOWN;
1925 } else {
1926 ie->name_state = NAME_NOT_KNOWN;
1927 list_add(&ie->list, &cache->unknown);
1928 }
1929
1930update:
1931 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001932 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001933 ie->name_state = NAME_KNOWN;
1934 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 }
1936
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001937 memcpy(&ie->data, data, sizeof(*data));
1938 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001940
1941 if (ie->name_state == NAME_NOT_KNOWN)
1942 return false;
1943
1944 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945}
1946
1947static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1948{
Johan Hedberg30883512012-01-04 14:16:21 +02001949 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 struct inquiry_info *info = (struct inquiry_info *) buf;
1951 struct inquiry_entry *e;
1952 int copied = 0;
1953
Johan Hedberg561aafb2012-01-04 13:31:59 +02001954 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001956
1957 if (copied >= num)
1958 break;
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 bacpy(&info->bdaddr, &data->bdaddr);
1961 info->pscan_rep_mode = data->pscan_rep_mode;
1962 info->pscan_period_mode = data->pscan_period_mode;
1963 info->pscan_mode = data->pscan_mode;
1964 memcpy(info->dev_class, data->dev_class, 3);
1965 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001968 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 }
1970
1971 BT_DBG("cache %p, copied %d", cache, copied);
1972 return copied;
1973}
1974
Johan Hedberg42c6b122013-03-05 20:37:49 +02001975static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
1977 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001978 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 struct hci_cp_inquiry cp;
1980
1981 BT_DBG("%s", hdev->name);
1982
1983 if (test_bit(HCI_INQUIRY, &hdev->flags))
1984 return;
1985
1986 /* Start Inquiry */
1987 memcpy(&cp.lap, &ir->lap, 3);
1988 cp.length = ir->length;
1989 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001990 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991}
1992
Andre Guedes3e13fa12013-03-27 20:04:56 -03001993static int wait_inquiry(void *word)
1994{
1995 schedule();
1996 return signal_pending(current);
1997}
1998
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999int hci_inquiry(void __user *arg)
2000{
2001 __u8 __user *ptr = arg;
2002 struct hci_inquiry_req ir;
2003 struct hci_dev *hdev;
2004 int err = 0, do_inquiry = 0, max_rsp;
2005 long timeo;
2006 __u8 *buf;
2007
2008 if (copy_from_user(&ir, ptr, sizeof(ir)))
2009 return -EFAULT;
2010
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002011 hdev = hci_dev_get(ir.dev_id);
2012 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 return -ENODEV;
2014
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002015 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2016 err = -EBUSY;
2017 goto done;
2018 }
2019
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002020 if (hdev->dev_type != HCI_BREDR) {
2021 err = -EOPNOTSUPP;
2022 goto done;
2023 }
2024
Johan Hedberg56f87902013-10-02 13:43:13 +03002025 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2026 err = -EOPNOTSUPP;
2027 goto done;
2028 }
2029
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002030 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002031 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002032 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002033 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 do_inquiry = 1;
2035 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002036 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Marcel Holtmann04837f62006-07-03 10:02:33 +02002038 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002039
2040 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002041 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2042 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002043 if (err < 0)
2044 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002045
2046 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2047 * cleared). If it is interrupted by a signal, return -EINTR.
2048 */
2049 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2050 TASK_INTERRUPTIBLE))
2051 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002054 /* for unlimited number of responses we will use buffer with
2055 * 255 entries
2056 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2058
2059 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2060 * copy it to the user space.
2061 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002062 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002063 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 err = -ENOMEM;
2065 goto done;
2066 }
2067
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002068 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002070 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
2072 BT_DBG("num_rsp %d", ir.num_rsp);
2073
2074 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2075 ptr += sizeof(ir);
2076 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002077 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002079 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 err = -EFAULT;
2081
2082 kfree(buf);
2083
2084done:
2085 hci_dev_put(hdev);
2086 return err;
2087}
2088
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002089static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 int ret = 0;
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 BT_DBG("%s %p", hdev->name, hdev);
2094
2095 hci_req_lock(hdev);
2096
Johan Hovold94324962012-03-15 14:48:41 +01002097 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2098 ret = -ENODEV;
2099 goto done;
2100 }
2101
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002102 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2103 /* Check for rfkill but allow the HCI setup stage to
2104 * proceed (which in itself doesn't cause any RF activity).
2105 */
2106 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2107 ret = -ERFKILL;
2108 goto done;
2109 }
2110
2111 /* Check for valid public address or a configured static
2112 * random adddress, but let the HCI setup proceed to
2113 * be able to determine if there is a public address
2114 * or not.
2115 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002116 * In case of user channel usage, it is not important
2117 * if a public address or static random address is
2118 * available.
2119 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002120 * This check is only valid for BR/EDR controllers
2121 * since AMP controllers do not have an address.
2122 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002123 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2124 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002125 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2126 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2127 ret = -EADDRNOTAVAIL;
2128 goto done;
2129 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002130 }
2131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 if (test_bit(HCI_UP, &hdev->flags)) {
2133 ret = -EALREADY;
2134 goto done;
2135 }
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 if (hdev->open(hdev)) {
2138 ret = -EIO;
2139 goto done;
2140 }
2141
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002142 atomic_set(&hdev->cmd_cnt, 1);
2143 set_bit(HCI_INIT, &hdev->flags);
2144
2145 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2146 ret = hdev->setup(hdev);
2147
2148 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2150 set_bit(HCI_RAW, &hdev->flags);
2151
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002152 if (!test_bit(HCI_RAW, &hdev->flags) &&
2153 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002154 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002157 clear_bit(HCI_INIT, &hdev->flags);
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 if (!ret) {
2160 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002161 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 set_bit(HCI_UP, &hdev->flags);
2163 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002164 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002165 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002166 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002167 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002168 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002169 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002170 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002171 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002173 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002174 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002175 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177 skb_queue_purge(&hdev->cmd_q);
2178 skb_queue_purge(&hdev->rx_q);
2179
2180 if (hdev->flush)
2181 hdev->flush(hdev);
2182
2183 if (hdev->sent_cmd) {
2184 kfree_skb(hdev->sent_cmd);
2185 hdev->sent_cmd = NULL;
2186 }
2187
2188 hdev->close(hdev);
2189 hdev->flags = 0;
2190 }
2191
2192done:
2193 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 return ret;
2195}
2196
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002197/* ---- HCI ioctl helpers ---- */
2198
2199int hci_dev_open(__u16 dev)
2200{
2201 struct hci_dev *hdev;
2202 int err;
2203
2204 hdev = hci_dev_get(dev);
2205 if (!hdev)
2206 return -ENODEV;
2207
Johan Hedberge1d08f42013-10-01 22:44:50 +03002208 /* We need to ensure that no other power on/off work is pending
2209 * before proceeding to call hci_dev_do_open. This is
2210 * particularly important if the setup procedure has not yet
2211 * completed.
2212 */
2213 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2214 cancel_delayed_work(&hdev->power_off);
2215
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002216 /* After this call it is guaranteed that the setup procedure
2217 * has finished. This means that error conditions like RFKILL
2218 * or no valid public or static random address apply.
2219 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002220 flush_workqueue(hdev->req_workqueue);
2221
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002222 err = hci_dev_do_open(hdev);
2223
2224 hci_dev_put(hdev);
2225
2226 return err;
2227}
2228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229static int hci_dev_do_close(struct hci_dev *hdev)
2230{
2231 BT_DBG("%s %p", hdev->name, hdev);
2232
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002233 cancel_delayed_work(&hdev->power_off);
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 hci_req_cancel(hdev, ENODEV);
2236 hci_req_lock(hdev);
2237
2238 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002239 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 hci_req_unlock(hdev);
2241 return 0;
2242 }
2243
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002244 /* Flush RX and TX works */
2245 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002246 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002248 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002249 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002250 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002251 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002252 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002253 }
2254
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002255 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002256 cancel_delayed_work(&hdev->service_cache);
2257
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002258 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002259 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002260
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002261 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002262 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002264 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
2266 hci_notify(hdev, HCI_DEV_DOWN);
2267
2268 if (hdev->flush)
2269 hdev->flush(hdev);
2270
2271 /* Reset device */
2272 skb_queue_purge(&hdev->cmd_q);
2273 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002274 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002275 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002276 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002278 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 clear_bit(HCI_INIT, &hdev->flags);
2280 }
2281
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002282 /* flush cmd work */
2283 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
2285 /* Drop queues */
2286 skb_queue_purge(&hdev->rx_q);
2287 skb_queue_purge(&hdev->cmd_q);
2288 skb_queue_purge(&hdev->raw_q);
2289
2290 /* Drop last sent command */
2291 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002292 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 kfree_skb(hdev->sent_cmd);
2294 hdev->sent_cmd = NULL;
2295 }
2296
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002297 kfree_skb(hdev->recv_evt);
2298 hdev->recv_evt = NULL;
2299
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 /* After this point our queues are empty
2301 * and no tasks are scheduled. */
2302 hdev->close(hdev);
2303
Johan Hedberg35b973c2013-03-15 17:06:59 -05002304 /* Clear flags */
2305 hdev->flags = 0;
2306 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2307
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002308 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2309 if (hdev->dev_type == HCI_BREDR) {
2310 hci_dev_lock(hdev);
2311 mgmt_powered(hdev, 0);
2312 hci_dev_unlock(hdev);
2313 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002314 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002315
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002316 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002317 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002318
Johan Hedberge59fda82012-02-22 18:11:53 +02002319 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002320 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002321 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 hci_req_unlock(hdev);
2324
2325 hci_dev_put(hdev);
2326 return 0;
2327}
2328
2329int hci_dev_close(__u16 dev)
2330{
2331 struct hci_dev *hdev;
2332 int err;
2333
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002334 hdev = hci_dev_get(dev);
2335 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002337
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002338 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2339 err = -EBUSY;
2340 goto done;
2341 }
2342
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002343 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2344 cancel_delayed_work(&hdev->power_off);
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002347
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002348done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 hci_dev_put(hdev);
2350 return err;
2351}
2352
2353int hci_dev_reset(__u16 dev)
2354{
2355 struct hci_dev *hdev;
2356 int ret = 0;
2357
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002358 hdev = hci_dev_get(dev);
2359 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 return -ENODEV;
2361
2362 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Marcel Holtmann808a0492013-08-26 20:57:58 -07002364 if (!test_bit(HCI_UP, &hdev->flags)) {
2365 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2370 ret = -EBUSY;
2371 goto done;
2372 }
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 /* Drop queues */
2375 skb_queue_purge(&hdev->rx_q);
2376 skb_queue_purge(&hdev->cmd_q);
2377
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002378 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002379 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002381 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
2383 if (hdev->flush)
2384 hdev->flush(hdev);
2385
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002386 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002387 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
2389 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002390 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 hci_req_unlock(hdev);
2394 hci_dev_put(hdev);
2395 return ret;
2396}
2397
2398int hci_dev_reset_stat(__u16 dev)
2399{
2400 struct hci_dev *hdev;
2401 int ret = 0;
2402
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002403 hdev = hci_dev_get(dev);
2404 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 return -ENODEV;
2406
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002407 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2408 ret = -EBUSY;
2409 goto done;
2410 }
2411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2413
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002414done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 return ret;
2417}
2418
2419int hci_dev_cmd(unsigned int cmd, void __user *arg)
2420{
2421 struct hci_dev *hdev;
2422 struct hci_dev_req dr;
2423 int err = 0;
2424
2425 if (copy_from_user(&dr, arg, sizeof(dr)))
2426 return -EFAULT;
2427
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002428 hdev = hci_dev_get(dr.dev_id);
2429 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 return -ENODEV;
2431
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002432 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2433 err = -EBUSY;
2434 goto done;
2435 }
2436
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002437 if (hdev->dev_type != HCI_BREDR) {
2438 err = -EOPNOTSUPP;
2439 goto done;
2440 }
2441
Johan Hedberg56f87902013-10-02 13:43:13 +03002442 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2443 err = -EOPNOTSUPP;
2444 goto done;
2445 }
2446
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 switch (cmd) {
2448 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002449 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2450 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 break;
2452
2453 case HCISETENCRYPT:
2454 if (!lmp_encrypt_capable(hdev)) {
2455 err = -EOPNOTSUPP;
2456 break;
2457 }
2458
2459 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2460 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002461 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2462 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 if (err)
2464 break;
2465 }
2466
Johan Hedberg01178cd2013-03-05 20:37:41 +02002467 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2468 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 break;
2470
2471 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002472 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2473 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 break;
2475
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002476 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002477 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2478 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002479 break;
2480
2481 case HCISETLINKMODE:
2482 hdev->link_mode = ((__u16) dr.dev_opt) &
2483 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2484 break;
2485
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 case HCISETPTYPE:
2487 hdev->pkt_type = (__u16) dr.dev_opt;
2488 break;
2489
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002491 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2492 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 break;
2494
2495 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002496 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2497 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 break;
2499
2500 default:
2501 err = -EINVAL;
2502 break;
2503 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002504
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002505done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 hci_dev_put(hdev);
2507 return err;
2508}
2509
2510int hci_get_dev_list(void __user *arg)
2511{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002512 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 struct hci_dev_list_req *dl;
2514 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 int n = 0, size, err;
2516 __u16 dev_num;
2517
2518 if (get_user(dev_num, (__u16 __user *) arg))
2519 return -EFAULT;
2520
2521 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2522 return -EINVAL;
2523
2524 size = sizeof(*dl) + dev_num * sizeof(*dr);
2525
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002526 dl = kzalloc(size, GFP_KERNEL);
2527 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return -ENOMEM;
2529
2530 dr = dl->dev_req;
2531
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002532 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002533 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002534 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002535 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002536
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002537 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2538 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002539
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 (dr + n)->dev_id = hdev->id;
2541 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 if (++n >= dev_num)
2544 break;
2545 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002546 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
2548 dl->dev_num = n;
2549 size = sizeof(*dl) + n * sizeof(*dr);
2550
2551 err = copy_to_user(arg, dl, size);
2552 kfree(dl);
2553
2554 return err ? -EFAULT : 0;
2555}
2556
2557int hci_get_dev_info(void __user *arg)
2558{
2559 struct hci_dev *hdev;
2560 struct hci_dev_info di;
2561 int err = 0;
2562
2563 if (copy_from_user(&di, arg, sizeof(di)))
2564 return -EFAULT;
2565
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002566 hdev = hci_dev_get(di.dev_id);
2567 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 return -ENODEV;
2569
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002570 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002571 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002572
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002573 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2574 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002575
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 strcpy(di.name, hdev->name);
2577 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002578 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 di.flags = hdev->flags;
2580 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002581 if (lmp_bredr_capable(hdev)) {
2582 di.acl_mtu = hdev->acl_mtu;
2583 di.acl_pkts = hdev->acl_pkts;
2584 di.sco_mtu = hdev->sco_mtu;
2585 di.sco_pkts = hdev->sco_pkts;
2586 } else {
2587 di.acl_mtu = hdev->le_mtu;
2588 di.acl_pkts = hdev->le_pkts;
2589 di.sco_mtu = 0;
2590 di.sco_pkts = 0;
2591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 di.link_policy = hdev->link_policy;
2593 di.link_mode = hdev->link_mode;
2594
2595 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2596 memcpy(&di.features, &hdev->features, sizeof(di.features));
2597
2598 if (copy_to_user(arg, &di, sizeof(di)))
2599 err = -EFAULT;
2600
2601 hci_dev_put(hdev);
2602
2603 return err;
2604}
2605
2606/* ---- Interface to HCI drivers ---- */
2607
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002608static int hci_rfkill_set_block(void *data, bool blocked)
2609{
2610 struct hci_dev *hdev = data;
2611
2612 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2613
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002614 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2615 return -EBUSY;
2616
Johan Hedberg5e130362013-09-13 08:58:17 +03002617 if (blocked) {
2618 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2620 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002621 } else {
2622 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002623 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002624
2625 return 0;
2626}
2627
2628static const struct rfkill_ops hci_rfkill_ops = {
2629 .set_block = hci_rfkill_set_block,
2630};
2631
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002632static void hci_power_on(struct work_struct *work)
2633{
2634 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002635 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002636
2637 BT_DBG("%s", hdev->name);
2638
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002639 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002640 if (err < 0) {
2641 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002642 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002643 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002644
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002645 /* During the HCI setup phase, a few error conditions are
2646 * ignored and they need to be checked now. If they are still
2647 * valid, it is important to turn the device back off.
2648 */
2649 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2650 (hdev->dev_type == HCI_BREDR &&
2651 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2652 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002653 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2654 hci_dev_do_close(hdev);
2655 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002656 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2657 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002658 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002659
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002660 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002661 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002662}
2663
2664static void hci_power_off(struct work_struct *work)
2665{
Johan Hedberg32435532011-11-07 22:16:04 +02002666 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002667 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002668
2669 BT_DBG("%s", hdev->name);
2670
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002671 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002672}
2673
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002674static void hci_discov_off(struct work_struct *work)
2675{
2676 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002677
2678 hdev = container_of(work, struct hci_dev, discov_off.work);
2679
2680 BT_DBG("%s", hdev->name);
2681
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002682 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002683}
2684
Johan Hedberg35f74982014-02-18 17:14:32 +02002685void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002686{
Johan Hedberg48210022013-01-27 00:31:28 +02002687 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002688
Johan Hedberg48210022013-01-27 00:31:28 +02002689 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2690 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002691 kfree(uuid);
2692 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002693}
2694
Johan Hedberg35f74982014-02-18 17:14:32 +02002695void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002696{
2697 struct list_head *p, *n;
2698
2699 list_for_each_safe(p, n, &hdev->link_keys) {
2700 struct link_key *key;
2701
2702 key = list_entry(p, struct link_key, list);
2703
2704 list_del(p);
2705 kfree(key);
2706 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002707}
2708
Johan Hedberg35f74982014-02-18 17:14:32 +02002709void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002710{
2711 struct smp_ltk *k, *tmp;
2712
2713 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2714 list_del(&k->list);
2715 kfree(k);
2716 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002717}
2718
Johan Hedberg970c4e42014-02-18 10:19:33 +02002719void hci_smp_irks_clear(struct hci_dev *hdev)
2720{
2721 struct smp_irk *k, *tmp;
2722
2723 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2724 list_del(&k->list);
2725 kfree(k);
2726 }
2727}
2728
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002729struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2730{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002731 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002732
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002733 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002734 if (bacmp(bdaddr, &k->bdaddr) == 0)
2735 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002736
2737 return NULL;
2738}
2739
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302740static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002741 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002742{
2743 /* Legacy key */
2744 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302745 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002746
2747 /* Debug keys are insecure so don't store them persistently */
2748 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302749 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002750
2751 /* Changed combination key and there's no previous one */
2752 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302753 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002754
2755 /* Security mode 3 case */
2756 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302757 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002758
2759 /* Neither local nor remote side had no-bonding as requirement */
2760 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302761 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002762
2763 /* Local side had dedicated bonding as requirement */
2764 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302765 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002766
2767 /* Remote side had dedicated bonding as requirement */
2768 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302769 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002770
2771 /* If none of the above criteria match, then don't store the key
2772 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302773 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002774}
2775
Johan Hedberg98a0b842014-01-30 19:40:00 -08002776static bool ltk_type_master(u8 type)
2777{
2778 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2779 return true;
2780
2781 return false;
2782}
2783
2784struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2785 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002786{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002787 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002788
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002789 list_for_each_entry(k, &hdev->long_term_keys, list) {
2790 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002791 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002792 continue;
2793
Johan Hedberg98a0b842014-01-30 19:40:00 -08002794 if (ltk_type_master(k->type) != master)
2795 continue;
2796
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002797 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002798 }
2799
2800 return NULL;
2801}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002802
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002803struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002804 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002805{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002806 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002807
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002808 list_for_each_entry(k, &hdev->long_term_keys, list)
2809 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002810 bacmp(bdaddr, &k->bdaddr) == 0 &&
2811 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002812 return k;
2813
2814 return NULL;
2815}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002816
Johan Hedberg970c4e42014-02-18 10:19:33 +02002817struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2818{
2819 struct smp_irk *irk;
2820
2821 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2822 if (!bacmp(&irk->rpa, rpa))
2823 return irk;
2824 }
2825
2826 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2827 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2828 bacpy(&irk->rpa, rpa);
2829 return irk;
2830 }
2831 }
2832
2833 return NULL;
2834}
2835
2836struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2837 u8 addr_type)
2838{
2839 struct smp_irk *irk;
2840
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002841 /* Identity Address must be public or static random */
2842 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2843 return NULL;
2844
Johan Hedberg970c4e42014-02-18 10:19:33 +02002845 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2846 if (addr_type == irk->addr_type &&
2847 bacmp(bdaddr, &irk->bdaddr) == 0)
2848 return irk;
2849 }
2850
2851 return NULL;
2852}
2853
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002854int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002855 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002856{
2857 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302858 u8 old_key_type;
2859 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002860
2861 old_key = hci_find_link_key(hdev, bdaddr);
2862 if (old_key) {
2863 old_key_type = old_key->type;
2864 key = old_key;
2865 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002866 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002867 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002868 if (!key)
2869 return -ENOMEM;
2870 list_add(&key->list, &hdev->link_keys);
2871 }
2872
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002873 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002874
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002875 /* Some buggy controller combinations generate a changed
2876 * combination key for legacy pairing even when there's no
2877 * previous key */
2878 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002879 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002880 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002881 if (conn)
2882 conn->key_type = type;
2883 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002884
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002885 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002886 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002887 key->pin_len = pin_len;
2888
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002889 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002890 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002891 else
2892 key->type = type;
2893
Johan Hedberg4df378a2011-04-28 11:29:03 -07002894 if (!new_key)
2895 return 0;
2896
2897 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2898
Johan Hedberg744cf192011-11-08 20:40:14 +02002899 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002900
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302901 if (conn)
2902 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002903
2904 return 0;
2905}
2906
Johan Hedbergca9142b2014-02-19 14:57:44 +02002907struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002908 u8 addr_type, u8 type, u8 authenticated,
2909 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002910{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002911 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002912 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002913
Johan Hedberg98a0b842014-01-30 19:40:00 -08002914 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002915 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002916 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002917 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002918 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002919 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002920 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002921 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922 }
2923
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002924 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002925 key->bdaddr_type = addr_type;
2926 memcpy(key->val, tk, sizeof(key->val));
2927 key->authenticated = authenticated;
2928 key->ediv = ediv;
2929 key->enc_size = enc_size;
2930 key->type = type;
2931 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002932
Johan Hedbergca9142b2014-02-19 14:57:44 +02002933 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002934}
2935
Johan Hedbergca9142b2014-02-19 14:57:44 +02002936struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2937 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002938{
2939 struct smp_irk *irk;
2940
2941 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2942 if (!irk) {
2943 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2944 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002945 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002946
2947 bacpy(&irk->bdaddr, bdaddr);
2948 irk->addr_type = addr_type;
2949
2950 list_add(&irk->list, &hdev->identity_resolving_keys);
2951 }
2952
2953 memcpy(irk->val, val, 16);
2954 bacpy(&irk->rpa, rpa);
2955
Johan Hedbergca9142b2014-02-19 14:57:44 +02002956 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002957}
2958
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002959int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2960{
2961 struct link_key *key;
2962
2963 key = hci_find_link_key(hdev, bdaddr);
2964 if (!key)
2965 return -ENOENT;
2966
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002967 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002968
2969 list_del(&key->list);
2970 kfree(key);
2971
2972 return 0;
2973}
2974
Johan Hedberge0b2b272014-02-18 17:14:31 +02002975int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002976{
2977 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002978 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002979
2980 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002981 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002982 continue;
2983
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002984 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002985
2986 list_del(&k->list);
2987 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002988 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002989 }
2990
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002991 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002992}
2993
Johan Hedberga7ec7332014-02-18 17:14:35 +02002994void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2995{
2996 struct smp_irk *k, *tmp;
2997
Johan Hedberg668b7b12014-02-21 16:03:31 +02002998 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002999 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3000 continue;
3001
3002 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3003
3004 list_del(&k->list);
3005 kfree(k);
3006 }
3007}
3008
Ville Tervo6bd32322011-02-16 16:32:41 +02003009/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003010static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003011{
3012 struct hci_dev *hdev = (void *) arg;
3013
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003014 if (hdev->sent_cmd) {
3015 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3016 u16 opcode = __le16_to_cpu(sent->opcode);
3017
3018 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3019 } else {
3020 BT_ERR("%s command tx timeout", hdev->name);
3021 }
3022
Ville Tervo6bd32322011-02-16 16:32:41 +02003023 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003024 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003025}
3026
Szymon Janc2763eda2011-03-22 13:12:22 +01003027struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003028 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003029{
3030 struct oob_data *data;
3031
3032 list_for_each_entry(data, &hdev->remote_oob_data, list)
3033 if (bacmp(bdaddr, &data->bdaddr) == 0)
3034 return data;
3035
3036 return NULL;
3037}
3038
3039int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3040{
3041 struct oob_data *data;
3042
3043 data = hci_find_remote_oob_data(hdev, bdaddr);
3044 if (!data)
3045 return -ENOENT;
3046
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003047 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003048
3049 list_del(&data->list);
3050 kfree(data);
3051
3052 return 0;
3053}
3054
Johan Hedberg35f74982014-02-18 17:14:32 +02003055void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003056{
3057 struct oob_data *data, *n;
3058
3059 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3060 list_del(&data->list);
3061 kfree(data);
3062 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003063}
3064
Marcel Holtmann07988722014-01-10 02:07:29 -08003065int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3066 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003067{
3068 struct oob_data *data;
3069
3070 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003071 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003072 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003073 if (!data)
3074 return -ENOMEM;
3075
3076 bacpy(&data->bdaddr, bdaddr);
3077 list_add(&data->list, &hdev->remote_oob_data);
3078 }
3079
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003080 memcpy(data->hash192, hash, sizeof(data->hash192));
3081 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003082
Marcel Holtmann07988722014-01-10 02:07:29 -08003083 memset(data->hash256, 0, sizeof(data->hash256));
3084 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3085
3086 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3087
3088 return 0;
3089}
3090
3091int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3092 u8 *hash192, u8 *randomizer192,
3093 u8 *hash256, u8 *randomizer256)
3094{
3095 struct oob_data *data;
3096
3097 data = hci_find_remote_oob_data(hdev, bdaddr);
3098 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003099 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003100 if (!data)
3101 return -ENOMEM;
3102
3103 bacpy(&data->bdaddr, bdaddr);
3104 list_add(&data->list, &hdev->remote_oob_data);
3105 }
3106
3107 memcpy(data->hash192, hash192, sizeof(data->hash192));
3108 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3109
3110 memcpy(data->hash256, hash256, sizeof(data->hash256));
3111 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3112
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003113 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003114
3115 return 0;
3116}
3117
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003118struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3119 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003120{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003121 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003122
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003123 list_for_each_entry(b, &hdev->blacklist, list) {
3124 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003125 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003126 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003127
3128 return NULL;
3129}
3130
Johan Hedberg35f74982014-02-18 17:14:32 +02003131void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003132{
3133 struct list_head *p, *n;
3134
3135 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003136 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003137
3138 list_del(p);
3139 kfree(b);
3140 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003141}
3142
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003143int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003144{
3145 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003146
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003147 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003148 return -EBADF;
3149
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003150 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003151 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003152
3153 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003154 if (!entry)
3155 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003156
3157 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003158 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003159
3160 list_add(&entry->list, &hdev->blacklist);
3161
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003162 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003163}
3164
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003165int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003166{
3167 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003168
Johan Hedberg35f74982014-02-18 17:14:32 +02003169 if (!bacmp(bdaddr, BDADDR_ANY)) {
3170 hci_blacklist_clear(hdev);
3171 return 0;
3172 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003173
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003174 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003175 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003176 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003177
3178 list_del(&entry->list);
3179 kfree(entry);
3180
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003181 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003182}
3183
Andre Guedes15819a72014-02-03 13:56:18 -03003184/* This function requires the caller holds hdev->lock */
3185struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3186 bdaddr_t *addr, u8 addr_type)
3187{
3188 struct hci_conn_params *params;
3189
3190 list_for_each_entry(params, &hdev->le_conn_params, list) {
3191 if (bacmp(&params->addr, addr) == 0 &&
3192 params->addr_type == addr_type) {
3193 return params;
3194 }
3195 }
3196
3197 return NULL;
3198}
3199
3200/* This function requires the caller holds hdev->lock */
3201void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3202 u16 conn_min_interval, u16 conn_max_interval)
3203{
3204 struct hci_conn_params *params;
3205
3206 params = hci_conn_params_lookup(hdev, addr, addr_type);
3207 if (params) {
3208 params->conn_min_interval = conn_min_interval;
3209 params->conn_max_interval = conn_max_interval;
3210 return;
3211 }
3212
3213 params = kzalloc(sizeof(*params), GFP_KERNEL);
3214 if (!params) {
3215 BT_ERR("Out of memory");
3216 return;
3217 }
3218
3219 bacpy(&params->addr, addr);
3220 params->addr_type = addr_type;
3221 params->conn_min_interval = conn_min_interval;
3222 params->conn_max_interval = conn_max_interval;
3223
3224 list_add(&params->list, &hdev->le_conn_params);
3225
3226 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3227 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3228 conn_max_interval);
3229}
3230
3231/* This function requires the caller holds hdev->lock */
3232void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3233{
3234 struct hci_conn_params *params;
3235
3236 params = hci_conn_params_lookup(hdev, addr, addr_type);
3237 if (!params)
3238 return;
3239
3240 list_del(&params->list);
3241 kfree(params);
3242
3243 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3244}
3245
3246/* This function requires the caller holds hdev->lock */
3247void hci_conn_params_clear(struct hci_dev *hdev)
3248{
3249 struct hci_conn_params *params, *tmp;
3250
3251 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3252 list_del(&params->list);
3253 kfree(params);
3254 }
3255
3256 BT_DBG("All LE connection parameters were removed");
3257}
3258
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003259static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003260{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003261 if (status) {
3262 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003263
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003264 hci_dev_lock(hdev);
3265 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3266 hci_dev_unlock(hdev);
3267 return;
3268 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003269}
3270
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003271static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003272{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003273 /* General inquiry access code (GIAC) */
3274 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3275 struct hci_request req;
3276 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003277 int err;
3278
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003279 if (status) {
3280 BT_ERR("Failed to disable LE scanning: status %d", status);
3281 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003282 }
3283
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003284 switch (hdev->discovery.type) {
3285 case DISCOV_TYPE_LE:
3286 hci_dev_lock(hdev);
3287 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3288 hci_dev_unlock(hdev);
3289 break;
3290
3291 case DISCOV_TYPE_INTERLEAVED:
3292 hci_req_init(&req, hdev);
3293
3294 memset(&cp, 0, sizeof(cp));
3295 memcpy(&cp.lap, lap, sizeof(cp.lap));
3296 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3297 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3298
3299 hci_dev_lock(hdev);
3300
3301 hci_inquiry_cache_flush(hdev);
3302
3303 err = hci_req_run(&req, inquiry_complete);
3304 if (err) {
3305 BT_ERR("Inquiry request failed: err %d", err);
3306 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3307 }
3308
3309 hci_dev_unlock(hdev);
3310 break;
3311 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003312}
3313
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003314static void le_scan_disable_work(struct work_struct *work)
3315{
3316 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003317 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003318 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003319 struct hci_request req;
3320 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003321
3322 BT_DBG("%s", hdev->name);
3323
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003324 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003325
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003326 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003327 cp.enable = LE_SCAN_DISABLE;
3328 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003329
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003330 err = hci_req_run(&req, le_scan_disable_work_complete);
3331 if (err)
3332 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003333}
3334
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003335int hci_update_random_address(struct hci_request *req, bool require_privacy,
3336 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003337{
3338 struct hci_dev *hdev = req->hdev;
3339 int err;
3340
3341 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003342 * current RPA has expired or there is something else than
3343 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003344 */
3345 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003346 int to;
3347
3348 *own_addr_type = ADDR_LE_DEV_RANDOM;
3349
3350 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003351 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003352 return 0;
3353
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003354 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003355 if (err < 0) {
3356 BT_ERR("%s failed to generate new RPA", hdev->name);
3357 return err;
3358 }
3359
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003360 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003361
3362 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3363 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3364
3365 return 0;
3366 }
3367
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003368 /* In case of required privacy without resolvable private address,
3369 * use an unresolvable private address. This is useful for active
3370 * scanning and non-connectable advertising.
3371 */
3372 if (require_privacy) {
3373 bdaddr_t urpa;
3374
3375 get_random_bytes(&urpa, 6);
3376 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3377
3378 *own_addr_type = ADDR_LE_DEV_RANDOM;
3379 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3380 return 0;
3381 }
3382
Johan Hedbergebd3a742014-02-23 19:42:21 +02003383 /* If forcing static address is in use or there is no public
3384 * address use the static address as random address (but skip
3385 * the HCI command if the current random address is already the
3386 * static one.
3387 */
3388 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3389 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3390 *own_addr_type = ADDR_LE_DEV_RANDOM;
3391 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3392 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3393 &hdev->static_addr);
3394 return 0;
3395 }
3396
3397 /* Neither privacy nor static address is being used so use a
3398 * public address.
3399 */
3400 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3401
3402 return 0;
3403}
3404
David Herrmann9be0dab2012-04-22 14:39:57 +02003405/* Alloc HCI device */
3406struct hci_dev *hci_alloc_dev(void)
3407{
3408 struct hci_dev *hdev;
3409
3410 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3411 if (!hdev)
3412 return NULL;
3413
David Herrmannb1b813d2012-04-22 14:39:58 +02003414 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3415 hdev->esco_type = (ESCO_HV1);
3416 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003417 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3418 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003419 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3420 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003421
David Herrmannb1b813d2012-04-22 14:39:58 +02003422 hdev->sniff_max_interval = 800;
3423 hdev->sniff_min_interval = 80;
3424
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003425 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003426 hdev->le_scan_interval = 0x0060;
3427 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003428 hdev->le_conn_min_interval = 0x0028;
3429 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003430
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003431 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3432
David Herrmannb1b813d2012-04-22 14:39:58 +02003433 mutex_init(&hdev->lock);
3434 mutex_init(&hdev->req_lock);
3435
3436 INIT_LIST_HEAD(&hdev->mgmt_pending);
3437 INIT_LIST_HEAD(&hdev->blacklist);
3438 INIT_LIST_HEAD(&hdev->uuids);
3439 INIT_LIST_HEAD(&hdev->link_keys);
3440 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003441 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003442 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003443 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003444 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003445
3446 INIT_WORK(&hdev->rx_work, hci_rx_work);
3447 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3448 INIT_WORK(&hdev->tx_work, hci_tx_work);
3449 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003450
David Herrmannb1b813d2012-04-22 14:39:58 +02003451 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3452 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3453 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3454
David Herrmannb1b813d2012-04-22 14:39:58 +02003455 skb_queue_head_init(&hdev->rx_q);
3456 skb_queue_head_init(&hdev->cmd_q);
3457 skb_queue_head_init(&hdev->raw_q);
3458
3459 init_waitqueue_head(&hdev->req_wait_q);
3460
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003461 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003462
David Herrmannb1b813d2012-04-22 14:39:58 +02003463 hci_init_sysfs(hdev);
3464 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003465
3466 return hdev;
3467}
3468EXPORT_SYMBOL(hci_alloc_dev);
3469
3470/* Free HCI device */
3471void hci_free_dev(struct hci_dev *hdev)
3472{
David Herrmann9be0dab2012-04-22 14:39:57 +02003473 /* will free via device release */
3474 put_device(&hdev->dev);
3475}
3476EXPORT_SYMBOL(hci_free_dev);
3477
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478/* Register HCI device */
3479int hci_register_dev(struct hci_dev *hdev)
3480{
David Herrmannb1b813d2012-04-22 14:39:58 +02003481 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482
David Herrmann010666a2012-01-07 15:47:07 +01003483 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 return -EINVAL;
3485
Mat Martineau08add512011-11-02 16:18:36 -07003486 /* Do not allow HCI_AMP devices to register at index 0,
3487 * so the index can be used as the AMP controller ID.
3488 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003489 switch (hdev->dev_type) {
3490 case HCI_BREDR:
3491 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3492 break;
3493 case HCI_AMP:
3494 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3495 break;
3496 default:
3497 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003499
Sasha Levin3df92b32012-05-27 22:36:56 +02003500 if (id < 0)
3501 return id;
3502
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 sprintf(hdev->name, "hci%d", id);
3504 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003505
3506 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3507
Kees Cookd8537542013-07-03 15:04:57 -07003508 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3509 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003510 if (!hdev->workqueue) {
3511 error = -ENOMEM;
3512 goto err;
3513 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003514
Kees Cookd8537542013-07-03 15:04:57 -07003515 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3516 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003517 if (!hdev->req_workqueue) {
3518 destroy_workqueue(hdev->workqueue);
3519 error = -ENOMEM;
3520 goto err;
3521 }
3522
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003523 if (!IS_ERR_OR_NULL(bt_debugfs))
3524 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3525
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003526 dev_set_name(&hdev->dev, "%s", hdev->name);
3527
Johan Hedberg99780a72014-02-18 10:40:07 +02003528 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3529 CRYPTO_ALG_ASYNC);
3530 if (IS_ERR(hdev->tfm_aes)) {
3531 BT_ERR("Unable to create crypto context");
3532 error = PTR_ERR(hdev->tfm_aes);
3533 hdev->tfm_aes = NULL;
3534 goto err_wqueue;
3535 }
3536
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003537 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003538 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003539 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003541 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003542 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3543 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003544 if (hdev->rfkill) {
3545 if (rfkill_register(hdev->rfkill) < 0) {
3546 rfkill_destroy(hdev->rfkill);
3547 hdev->rfkill = NULL;
3548 }
3549 }
3550
Johan Hedberg5e130362013-09-13 08:58:17 +03003551 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3552 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3553
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003554 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003555 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003556
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003557 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003558 /* Assume BR/EDR support until proven otherwise (such as
3559 * through reading supported features during init.
3560 */
3561 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3562 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003563
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003564 write_lock(&hci_dev_list_lock);
3565 list_add(&hdev->list, &hci_dev_list);
3566 write_unlock(&hci_dev_list_lock);
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003569 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570
Johan Hedberg19202572013-01-14 22:33:51 +02003571 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003572
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003574
Johan Hedberg99780a72014-02-18 10:40:07 +02003575err_tfm:
3576 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003577err_wqueue:
3578 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003579 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003580err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003581 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003582
David Herrmann33ca9542011-10-08 14:58:49 +02003583 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584}
3585EXPORT_SYMBOL(hci_register_dev);
3586
3587/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003588void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589{
Sasha Levin3df92b32012-05-27 22:36:56 +02003590 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003591
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003592 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
Johan Hovold94324962012-03-15 14:48:41 +01003594 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3595
Sasha Levin3df92b32012-05-27 22:36:56 +02003596 id = hdev->id;
3597
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003598 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003600 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601
3602 hci_dev_do_close(hdev);
3603
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303604 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003605 kfree_skb(hdev->reassembly[i]);
3606
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003607 cancel_work_sync(&hdev->power_on);
3608
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003609 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003610 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003611 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003612 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003613 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003614 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003615
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003616 /* mgmt_index_removed should take care of emptying the
3617 * pending list */
3618 BUG_ON(!list_empty(&hdev->mgmt_pending));
3619
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 hci_notify(hdev, HCI_DEV_UNREG);
3621
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003622 if (hdev->rfkill) {
3623 rfkill_unregister(hdev->rfkill);
3624 rfkill_destroy(hdev->rfkill);
3625 }
3626
Johan Hedberg99780a72014-02-18 10:40:07 +02003627 if (hdev->tfm_aes)
3628 crypto_free_blkcipher(hdev->tfm_aes);
3629
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003630 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003631
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003632 debugfs_remove_recursive(hdev->debugfs);
3633
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003634 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003635 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003636
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003637 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003638 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003639 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003640 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003641 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003642 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003643 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003644 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003645 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003646
David Herrmanndc946bd2012-01-07 15:47:24 +01003647 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003648
3649 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650}
3651EXPORT_SYMBOL(hci_unregister_dev);
3652
3653/* Suspend HCI device */
3654int hci_suspend_dev(struct hci_dev *hdev)
3655{
3656 hci_notify(hdev, HCI_DEV_SUSPEND);
3657 return 0;
3658}
3659EXPORT_SYMBOL(hci_suspend_dev);
3660
3661/* Resume HCI device */
3662int hci_resume_dev(struct hci_dev *hdev)
3663{
3664 hci_notify(hdev, HCI_DEV_RESUME);
3665 return 0;
3666}
3667EXPORT_SYMBOL(hci_resume_dev);
3668
Marcel Holtmann76bca882009-11-18 00:40:39 +01003669/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003670int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003671{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003672 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003673 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003674 kfree_skb(skb);
3675 return -ENXIO;
3676 }
3677
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003678 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003679 bt_cb(skb)->incoming = 1;
3680
3681 /* Time stamp */
3682 __net_timestamp(skb);
3683
Marcel Holtmann76bca882009-11-18 00:40:39 +01003684 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003685 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003686
Marcel Holtmann76bca882009-11-18 00:40:39 +01003687 return 0;
3688}
3689EXPORT_SYMBOL(hci_recv_frame);
3690
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303691static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003692 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303693{
3694 int len = 0;
3695 int hlen = 0;
3696 int remain = count;
3697 struct sk_buff *skb;
3698 struct bt_skb_cb *scb;
3699
3700 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003701 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303702 return -EILSEQ;
3703
3704 skb = hdev->reassembly[index];
3705
3706 if (!skb) {
3707 switch (type) {
3708 case HCI_ACLDATA_PKT:
3709 len = HCI_MAX_FRAME_SIZE;
3710 hlen = HCI_ACL_HDR_SIZE;
3711 break;
3712 case HCI_EVENT_PKT:
3713 len = HCI_MAX_EVENT_SIZE;
3714 hlen = HCI_EVENT_HDR_SIZE;
3715 break;
3716 case HCI_SCODATA_PKT:
3717 len = HCI_MAX_SCO_SIZE;
3718 hlen = HCI_SCO_HDR_SIZE;
3719 break;
3720 }
3721
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003722 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303723 if (!skb)
3724 return -ENOMEM;
3725
3726 scb = (void *) skb->cb;
3727 scb->expect = hlen;
3728 scb->pkt_type = type;
3729
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303730 hdev->reassembly[index] = skb;
3731 }
3732
3733 while (count) {
3734 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003735 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303736
3737 memcpy(skb_put(skb, len), data, len);
3738
3739 count -= len;
3740 data += len;
3741 scb->expect -= len;
3742 remain = count;
3743
3744 switch (type) {
3745 case HCI_EVENT_PKT:
3746 if (skb->len == HCI_EVENT_HDR_SIZE) {
3747 struct hci_event_hdr *h = hci_event_hdr(skb);
3748 scb->expect = h->plen;
3749
3750 if (skb_tailroom(skb) < scb->expect) {
3751 kfree_skb(skb);
3752 hdev->reassembly[index] = NULL;
3753 return -ENOMEM;
3754 }
3755 }
3756 break;
3757
3758 case HCI_ACLDATA_PKT:
3759 if (skb->len == HCI_ACL_HDR_SIZE) {
3760 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3761 scb->expect = __le16_to_cpu(h->dlen);
3762
3763 if (skb_tailroom(skb) < scb->expect) {
3764 kfree_skb(skb);
3765 hdev->reassembly[index] = NULL;
3766 return -ENOMEM;
3767 }
3768 }
3769 break;
3770
3771 case HCI_SCODATA_PKT:
3772 if (skb->len == HCI_SCO_HDR_SIZE) {
3773 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3774 scb->expect = h->dlen;
3775
3776 if (skb_tailroom(skb) < scb->expect) {
3777 kfree_skb(skb);
3778 hdev->reassembly[index] = NULL;
3779 return -ENOMEM;
3780 }
3781 }
3782 break;
3783 }
3784
3785 if (scb->expect == 0) {
3786 /* Complete frame */
3787
3788 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003789 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303790
3791 hdev->reassembly[index] = NULL;
3792 return remain;
3793 }
3794 }
3795
3796 return remain;
3797}
3798
Marcel Holtmannef222012007-07-11 06:42:04 +02003799int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3800{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303801 int rem = 0;
3802
Marcel Holtmannef222012007-07-11 06:42:04 +02003803 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3804 return -EILSEQ;
3805
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003806 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003807 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303808 if (rem < 0)
3809 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003810
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303811 data += (count - rem);
3812 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003813 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003814
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303815 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003816}
3817EXPORT_SYMBOL(hci_recv_fragment);
3818
Suraj Sumangala99811512010-07-14 13:02:19 +05303819#define STREAM_REASSEMBLY 0
3820
3821int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3822{
3823 int type;
3824 int rem = 0;
3825
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003826 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303827 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3828
3829 if (!skb) {
3830 struct { char type; } *pkt;
3831
3832 /* Start of the frame */
3833 pkt = data;
3834 type = pkt->type;
3835
3836 data++;
3837 count--;
3838 } else
3839 type = bt_cb(skb)->pkt_type;
3840
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003841 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003842 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303843 if (rem < 0)
3844 return rem;
3845
3846 data += (count - rem);
3847 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003848 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303849
3850 return rem;
3851}
3852EXPORT_SYMBOL(hci_recv_stream_fragment);
3853
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854/* ---- Interface to upper protocols ---- */
3855
Linus Torvalds1da177e2005-04-16 15:20:36 -07003856int hci_register_cb(struct hci_cb *cb)
3857{
3858 BT_DBG("%p name %s", cb, cb->name);
3859
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003860 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003862 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863
3864 return 0;
3865}
3866EXPORT_SYMBOL(hci_register_cb);
3867
3868int hci_unregister_cb(struct hci_cb *cb)
3869{
3870 BT_DBG("%p name %s", cb, cb->name);
3871
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003872 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003874 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875
3876 return 0;
3877}
3878EXPORT_SYMBOL(hci_unregister_cb);
3879
Marcel Holtmann51086992013-10-10 14:54:19 -07003880static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003882 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003884 /* Time stamp */
3885 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003887 /* Send copy to monitor */
3888 hci_send_to_monitor(hdev, skb);
3889
3890 if (atomic_read(&hdev->promisc)) {
3891 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003892 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 }
3894
3895 /* Get rid of skb owner, prior to sending to the driver. */
3896 skb_orphan(skb);
3897
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003898 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003899 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900}
3901
Johan Hedberg3119ae92013-03-05 20:37:44 +02003902void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3903{
3904 skb_queue_head_init(&req->cmd_q);
3905 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003906 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003907}
3908
3909int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3910{
3911 struct hci_dev *hdev = req->hdev;
3912 struct sk_buff *skb;
3913 unsigned long flags;
3914
3915 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3916
Andre Guedes5d73e032013-03-08 11:20:16 -03003917 /* If an error occured during request building, remove all HCI
3918 * commands queued on the HCI request queue.
3919 */
3920 if (req->err) {
3921 skb_queue_purge(&req->cmd_q);
3922 return req->err;
3923 }
3924
Johan Hedberg3119ae92013-03-05 20:37:44 +02003925 /* Do not allow empty requests */
3926 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003927 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003928
3929 skb = skb_peek_tail(&req->cmd_q);
3930 bt_cb(skb)->req.complete = complete;
3931
3932 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3933 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3934 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3935
3936 queue_work(hdev->workqueue, &hdev->cmd_work);
3937
3938 return 0;
3939}
3940
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003941static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003942 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943{
3944 int len = HCI_COMMAND_HDR_SIZE + plen;
3945 struct hci_command_hdr *hdr;
3946 struct sk_buff *skb;
3947
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003949 if (!skb)
3950 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951
3952 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003953 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 hdr->plen = plen;
3955
3956 if (plen)
3957 memcpy(skb_put(skb, plen), param, plen);
3958
3959 BT_DBG("skb len %d", skb->len);
3960
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003961 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003962
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003963 return skb;
3964}
3965
3966/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003967int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3968 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003969{
3970 struct sk_buff *skb;
3971
3972 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3973
3974 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3975 if (!skb) {
3976 BT_ERR("%s no memory for command", hdev->name);
3977 return -ENOMEM;
3978 }
3979
Johan Hedberg11714b32013-03-05 20:37:47 +02003980 /* Stand-alone HCI commands must be flaged as
3981 * single-command requests.
3982 */
3983 bt_cb(skb)->req.start = true;
3984
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003986 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987
3988 return 0;
3989}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990
Johan Hedberg71c76a12013-03-05 20:37:46 +02003991/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003992void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3993 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003994{
3995 struct hci_dev *hdev = req->hdev;
3996 struct sk_buff *skb;
3997
3998 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3999
Andre Guedes34739c12013-03-08 11:20:18 -03004000 /* If an error occured during request building, there is no point in
4001 * queueing the HCI command. We can simply return.
4002 */
4003 if (req->err)
4004 return;
4005
Johan Hedberg71c76a12013-03-05 20:37:46 +02004006 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4007 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004008 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4009 hdev->name, opcode);
4010 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004011 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004012 }
4013
4014 if (skb_queue_empty(&req->cmd_q))
4015 bt_cb(skb)->req.start = true;
4016
Johan Hedberg02350a72013-04-03 21:50:29 +03004017 bt_cb(skb)->req.event = event;
4018
Johan Hedberg71c76a12013-03-05 20:37:46 +02004019 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004020}
4021
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004022void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4023 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004024{
4025 hci_req_add_ev(req, opcode, plen, param, 0);
4026}
4027
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004029void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030{
4031 struct hci_command_hdr *hdr;
4032
4033 if (!hdev->sent_cmd)
4034 return NULL;
4035
4036 hdr = (void *) hdev->sent_cmd->data;
4037
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004038 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 return NULL;
4040
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004041 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042
4043 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4044}
4045
4046/* Send ACL data */
4047static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4048{
4049 struct hci_acl_hdr *hdr;
4050 int len = skb->len;
4051
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004052 skb_push(skb, HCI_ACL_HDR_SIZE);
4053 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004054 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004055 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4056 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057}
4058
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004059static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004060 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004062 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063 struct hci_dev *hdev = conn->hdev;
4064 struct sk_buff *list;
4065
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004066 skb->len = skb_headlen(skb);
4067 skb->data_len = 0;
4068
4069 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004070
4071 switch (hdev->dev_type) {
4072 case HCI_BREDR:
4073 hci_add_acl_hdr(skb, conn->handle, flags);
4074 break;
4075 case HCI_AMP:
4076 hci_add_acl_hdr(skb, chan->handle, flags);
4077 break;
4078 default:
4079 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4080 return;
4081 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004082
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004083 list = skb_shinfo(skb)->frag_list;
4084 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 /* Non fragmented */
4086 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4087
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004088 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 } else {
4090 /* Fragmented */
4091 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4092
4093 skb_shinfo(skb)->frag_list = NULL;
4094
4095 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004096 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004098 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004099
4100 flags &= ~ACL_START;
4101 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 do {
4103 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004104
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004105 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004106 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
4108 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4109
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004110 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111 } while (list);
4112
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004113 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004115}
4116
4117void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4118{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004119 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004120
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004121 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004122
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004123 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004125 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127
4128/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004129void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130{
4131 struct hci_dev *hdev = conn->hdev;
4132 struct hci_sco_hdr hdr;
4133
4134 BT_DBG("%s len %d", hdev->name, skb->len);
4135
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004136 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 hdr.dlen = skb->len;
4138
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004139 skb_push(skb, HCI_SCO_HDR_SIZE);
4140 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004141 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004143 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004146 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148
4149/* ---- HCI TX task (outgoing data) ---- */
4150
4151/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004152static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4153 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154{
4155 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004156 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004157 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004159 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004161
4162 rcu_read_lock();
4163
4164 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004165 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004167
4168 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4169 continue;
4170
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 num++;
4172
4173 if (c->sent < min) {
4174 min = c->sent;
4175 conn = c;
4176 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004177
4178 if (hci_conn_num(hdev, type) == num)
4179 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 }
4181
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004182 rcu_read_unlock();
4183
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004185 int cnt, q;
4186
4187 switch (conn->type) {
4188 case ACL_LINK:
4189 cnt = hdev->acl_cnt;
4190 break;
4191 case SCO_LINK:
4192 case ESCO_LINK:
4193 cnt = hdev->sco_cnt;
4194 break;
4195 case LE_LINK:
4196 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4197 break;
4198 default:
4199 cnt = 0;
4200 BT_ERR("Unknown link type");
4201 }
4202
4203 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 *quote = q ? q : 1;
4205 } else
4206 *quote = 0;
4207
4208 BT_DBG("conn %p quote %d", conn, *quote);
4209 return conn;
4210}
4211
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004212static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213{
4214 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004215 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216
Ville Tervobae1f5d92011-02-10 22:38:53 -03004217 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004219 rcu_read_lock();
4220
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004222 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004223 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004224 BT_ERR("%s killing stalled connection %pMR",
4225 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004226 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 }
4228 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004229
4230 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231}
4232
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004233static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4234 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004235{
4236 struct hci_conn_hash *h = &hdev->conn_hash;
4237 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004238 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004239 struct hci_conn *conn;
4240 int cnt, q, conn_num = 0;
4241
4242 BT_DBG("%s", hdev->name);
4243
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004244 rcu_read_lock();
4245
4246 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004247 struct hci_chan *tmp;
4248
4249 if (conn->type != type)
4250 continue;
4251
4252 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4253 continue;
4254
4255 conn_num++;
4256
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004257 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004258 struct sk_buff *skb;
4259
4260 if (skb_queue_empty(&tmp->data_q))
4261 continue;
4262
4263 skb = skb_peek(&tmp->data_q);
4264 if (skb->priority < cur_prio)
4265 continue;
4266
4267 if (skb->priority > cur_prio) {
4268 num = 0;
4269 min = ~0;
4270 cur_prio = skb->priority;
4271 }
4272
4273 num++;
4274
4275 if (conn->sent < min) {
4276 min = conn->sent;
4277 chan = tmp;
4278 }
4279 }
4280
4281 if (hci_conn_num(hdev, type) == conn_num)
4282 break;
4283 }
4284
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004285 rcu_read_unlock();
4286
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004287 if (!chan)
4288 return NULL;
4289
4290 switch (chan->conn->type) {
4291 case ACL_LINK:
4292 cnt = hdev->acl_cnt;
4293 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004294 case AMP_LINK:
4295 cnt = hdev->block_cnt;
4296 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004297 case SCO_LINK:
4298 case ESCO_LINK:
4299 cnt = hdev->sco_cnt;
4300 break;
4301 case LE_LINK:
4302 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4303 break;
4304 default:
4305 cnt = 0;
4306 BT_ERR("Unknown link type");
4307 }
4308
4309 q = cnt / num;
4310 *quote = q ? q : 1;
4311 BT_DBG("chan %p quote %d", chan, *quote);
4312 return chan;
4313}
4314
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004315static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4316{
4317 struct hci_conn_hash *h = &hdev->conn_hash;
4318 struct hci_conn *conn;
4319 int num = 0;
4320
4321 BT_DBG("%s", hdev->name);
4322
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004323 rcu_read_lock();
4324
4325 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004326 struct hci_chan *chan;
4327
4328 if (conn->type != type)
4329 continue;
4330
4331 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4332 continue;
4333
4334 num++;
4335
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004336 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004337 struct sk_buff *skb;
4338
4339 if (chan->sent) {
4340 chan->sent = 0;
4341 continue;
4342 }
4343
4344 if (skb_queue_empty(&chan->data_q))
4345 continue;
4346
4347 skb = skb_peek(&chan->data_q);
4348 if (skb->priority >= HCI_PRIO_MAX - 1)
4349 continue;
4350
4351 skb->priority = HCI_PRIO_MAX - 1;
4352
4353 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004354 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004355 }
4356
4357 if (hci_conn_num(hdev, type) == num)
4358 break;
4359 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004360
4361 rcu_read_unlock();
4362
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004363}
4364
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004365static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4366{
4367 /* Calculate count of blocks used by this packet */
4368 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4369}
4370
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004371static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 if (!test_bit(HCI_RAW, &hdev->flags)) {
4374 /* ACL tx timeout must be longer than maximum
4375 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004376 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004377 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004378 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004380}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004382static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004383{
4384 unsigned int cnt = hdev->acl_cnt;
4385 struct hci_chan *chan;
4386 struct sk_buff *skb;
4387 int quote;
4388
4389 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004390
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004391 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004392 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004393 u32 priority = (skb_peek(&chan->data_q))->priority;
4394 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004395 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004396 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004397
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004398 /* Stop if priority has changed */
4399 if (skb->priority < priority)
4400 break;
4401
4402 skb = skb_dequeue(&chan->data_q);
4403
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004404 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004405 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004406
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004407 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 hdev->acl_last_tx = jiffies;
4409
4410 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004411 chan->sent++;
4412 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413 }
4414 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004415
4416 if (cnt != hdev->acl_cnt)
4417 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418}
4419
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004420static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004421{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004422 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004423 struct hci_chan *chan;
4424 struct sk_buff *skb;
4425 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004426 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004427
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004428 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004429
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004430 BT_DBG("%s", hdev->name);
4431
4432 if (hdev->dev_type == HCI_AMP)
4433 type = AMP_LINK;
4434 else
4435 type = ACL_LINK;
4436
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004437 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004438 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004439 u32 priority = (skb_peek(&chan->data_q))->priority;
4440 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4441 int blocks;
4442
4443 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004444 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004445
4446 /* Stop if priority has changed */
4447 if (skb->priority < priority)
4448 break;
4449
4450 skb = skb_dequeue(&chan->data_q);
4451
4452 blocks = __get_blocks(hdev, skb);
4453 if (blocks > hdev->block_cnt)
4454 return;
4455
4456 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004457 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004458
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004459 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004460 hdev->acl_last_tx = jiffies;
4461
4462 hdev->block_cnt -= blocks;
4463 quote -= blocks;
4464
4465 chan->sent += blocks;
4466 chan->conn->sent += blocks;
4467 }
4468 }
4469
4470 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004471 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004472}
4473
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004474static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004475{
4476 BT_DBG("%s", hdev->name);
4477
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004478 /* No ACL link over BR/EDR controller */
4479 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4480 return;
4481
4482 /* No AMP link over AMP controller */
4483 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004484 return;
4485
4486 switch (hdev->flow_ctl_mode) {
4487 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4488 hci_sched_acl_pkt(hdev);
4489 break;
4490
4491 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4492 hci_sched_acl_blk(hdev);
4493 break;
4494 }
4495}
4496
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004498static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499{
4500 struct hci_conn *conn;
4501 struct sk_buff *skb;
4502 int quote;
4503
4504 BT_DBG("%s", hdev->name);
4505
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004506 if (!hci_conn_num(hdev, SCO_LINK))
4507 return;
4508
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4510 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4511 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004512 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
4514 conn->sent++;
4515 if (conn->sent == ~0)
4516 conn->sent = 0;
4517 }
4518 }
4519}
4520
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004521static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004522{
4523 struct hci_conn *conn;
4524 struct sk_buff *skb;
4525 int quote;
4526
4527 BT_DBG("%s", hdev->name);
4528
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004529 if (!hci_conn_num(hdev, ESCO_LINK))
4530 return;
4531
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004532 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4533 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004534 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4535 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004536 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004537
4538 conn->sent++;
4539 if (conn->sent == ~0)
4540 conn->sent = 0;
4541 }
4542 }
4543}
4544
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004545static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004546{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004547 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004548 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004549 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004550
4551 BT_DBG("%s", hdev->name);
4552
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004553 if (!hci_conn_num(hdev, LE_LINK))
4554 return;
4555
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004556 if (!test_bit(HCI_RAW, &hdev->flags)) {
4557 /* LE tx timeout must be longer than maximum
4558 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004559 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004560 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004561 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004562 }
4563
4564 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004565 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004566 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004567 u32 priority = (skb_peek(&chan->data_q))->priority;
4568 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004569 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004570 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004571
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004572 /* Stop if priority has changed */
4573 if (skb->priority < priority)
4574 break;
4575
4576 skb = skb_dequeue(&chan->data_q);
4577
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004578 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004579 hdev->le_last_tx = jiffies;
4580
4581 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004582 chan->sent++;
4583 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004584 }
4585 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004586
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004587 if (hdev->le_pkts)
4588 hdev->le_cnt = cnt;
4589 else
4590 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004591
4592 if (cnt != tmp)
4593 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004594}
4595
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004596static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004598 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 struct sk_buff *skb;
4600
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004601 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004602 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603
Marcel Holtmann52de5992013-09-03 18:08:38 -07004604 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4605 /* Schedule queues and send stuff to HCI driver */
4606 hci_sched_acl(hdev);
4607 hci_sched_sco(hdev);
4608 hci_sched_esco(hdev);
4609 hci_sched_le(hdev);
4610 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004611
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 /* Send next queued raw (unknown type) packet */
4613 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004614 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615}
4616
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004617/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618
4619/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004620static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621{
4622 struct hci_acl_hdr *hdr = (void *) skb->data;
4623 struct hci_conn *conn;
4624 __u16 handle, flags;
4625
4626 skb_pull(skb, HCI_ACL_HDR_SIZE);
4627
4628 handle = __le16_to_cpu(hdr->handle);
4629 flags = hci_flags(handle);
4630 handle = hci_handle(handle);
4631
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004632 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004633 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634
4635 hdev->stat.acl_rx++;
4636
4637 hci_dev_lock(hdev);
4638 conn = hci_conn_hash_lookup_handle(hdev, handle);
4639 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004640
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004642 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004643
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004645 l2cap_recv_acldata(conn, skb, flags);
4646 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004648 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004649 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 }
4651
4652 kfree_skb(skb);
4653}
4654
4655/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004656static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657{
4658 struct hci_sco_hdr *hdr = (void *) skb->data;
4659 struct hci_conn *conn;
4660 __u16 handle;
4661
4662 skb_pull(skb, HCI_SCO_HDR_SIZE);
4663
4664 handle = __le16_to_cpu(hdr->handle);
4665
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004666 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667
4668 hdev->stat.sco_rx++;
4669
4670 hci_dev_lock(hdev);
4671 conn = hci_conn_hash_lookup_handle(hdev, handle);
4672 hci_dev_unlock(hdev);
4673
4674 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004676 sco_recv_scodata(conn, skb);
4677 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004679 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004680 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 }
4682
4683 kfree_skb(skb);
4684}
4685
Johan Hedberg9238f362013-03-05 20:37:48 +02004686static bool hci_req_is_complete(struct hci_dev *hdev)
4687{
4688 struct sk_buff *skb;
4689
4690 skb = skb_peek(&hdev->cmd_q);
4691 if (!skb)
4692 return true;
4693
4694 return bt_cb(skb)->req.start;
4695}
4696
Johan Hedberg42c6b122013-03-05 20:37:49 +02004697static void hci_resend_last(struct hci_dev *hdev)
4698{
4699 struct hci_command_hdr *sent;
4700 struct sk_buff *skb;
4701 u16 opcode;
4702
4703 if (!hdev->sent_cmd)
4704 return;
4705
4706 sent = (void *) hdev->sent_cmd->data;
4707 opcode = __le16_to_cpu(sent->opcode);
4708 if (opcode == HCI_OP_RESET)
4709 return;
4710
4711 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4712 if (!skb)
4713 return;
4714
4715 skb_queue_head(&hdev->cmd_q, skb);
4716 queue_work(hdev->workqueue, &hdev->cmd_work);
4717}
4718
Johan Hedberg9238f362013-03-05 20:37:48 +02004719void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4720{
4721 hci_req_complete_t req_complete = NULL;
4722 struct sk_buff *skb;
4723 unsigned long flags;
4724
4725 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4726
Johan Hedberg42c6b122013-03-05 20:37:49 +02004727 /* If the completed command doesn't match the last one that was
4728 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004729 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004730 if (!hci_sent_cmd_data(hdev, opcode)) {
4731 /* Some CSR based controllers generate a spontaneous
4732 * reset complete event during init and any pending
4733 * command will never be completed. In such a case we
4734 * need to resend whatever was the last sent
4735 * command.
4736 */
4737 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4738 hci_resend_last(hdev);
4739
Johan Hedberg9238f362013-03-05 20:37:48 +02004740 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004741 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004742
4743 /* If the command succeeded and there's still more commands in
4744 * this request the request is not yet complete.
4745 */
4746 if (!status && !hci_req_is_complete(hdev))
4747 return;
4748
4749 /* If this was the last command in a request the complete
4750 * callback would be found in hdev->sent_cmd instead of the
4751 * command queue (hdev->cmd_q).
4752 */
4753 if (hdev->sent_cmd) {
4754 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004755
4756 if (req_complete) {
4757 /* We must set the complete callback to NULL to
4758 * avoid calling the callback more than once if
4759 * this function gets called again.
4760 */
4761 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4762
Johan Hedberg9238f362013-03-05 20:37:48 +02004763 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004764 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004765 }
4766
4767 /* Remove all pending commands belonging to this request */
4768 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4769 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4770 if (bt_cb(skb)->req.start) {
4771 __skb_queue_head(&hdev->cmd_q, skb);
4772 break;
4773 }
4774
4775 req_complete = bt_cb(skb)->req.complete;
4776 kfree_skb(skb);
4777 }
4778 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4779
4780call_complete:
4781 if (req_complete)
4782 req_complete(hdev, status);
4783}
4784
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004785static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004787 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788 struct sk_buff *skb;
4789
4790 BT_DBG("%s", hdev->name);
4791
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004793 /* Send copy to monitor */
4794 hci_send_to_monitor(hdev, skb);
4795
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796 if (atomic_read(&hdev->promisc)) {
4797 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004798 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 }
4800
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004801 if (test_bit(HCI_RAW, &hdev->flags) ||
4802 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803 kfree_skb(skb);
4804 continue;
4805 }
4806
4807 if (test_bit(HCI_INIT, &hdev->flags)) {
4808 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004809 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810 case HCI_ACLDATA_PKT:
4811 case HCI_SCODATA_PKT:
4812 kfree_skb(skb);
4813 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815 }
4816
4817 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004818 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004820 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821 hci_event_packet(hdev, skb);
4822 break;
4823
4824 case HCI_ACLDATA_PKT:
4825 BT_DBG("%s ACL data packet", hdev->name);
4826 hci_acldata_packet(hdev, skb);
4827 break;
4828
4829 case HCI_SCODATA_PKT:
4830 BT_DBG("%s SCO data packet", hdev->name);
4831 hci_scodata_packet(hdev, skb);
4832 break;
4833
4834 default:
4835 kfree_skb(skb);
4836 break;
4837 }
4838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839}
4840
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004841static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004843 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844 struct sk_buff *skb;
4845
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004846 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4847 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004850 if (atomic_read(&hdev->cmd_cnt)) {
4851 skb = skb_dequeue(&hdev->cmd_q);
4852 if (!skb)
4853 return;
4854
Wei Yongjun7585b972009-02-25 18:29:52 +08004855 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004857 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004858 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004860 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004861 if (test_bit(HCI_RESET, &hdev->flags))
4862 del_timer(&hdev->cmd_timer);
4863 else
4864 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004865 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 } else {
4867 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004868 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 }
4870 }
4871}