blob: 946631ffe802669f59799f12f068262e49c79d9a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800418static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
419 size_t count, loff_t *ppos)
420{
421 struct hci_dev *hdev = file->private_data;
422 char buf[3];
423
424 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
425 buf[1] = '\n';
426 buf[2] = '\0';
427 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
428}
429
430static ssize_t force_sc_support_write(struct file *file,
431 const char __user *user_buf,
432 size_t count, loff_t *ppos)
433{
434 struct hci_dev *hdev = file->private_data;
435 char buf[32];
436 size_t buf_size = min(count, (sizeof(buf)-1));
437 bool enable;
438
439 if (test_bit(HCI_UP, &hdev->flags))
440 return -EBUSY;
441
442 if (copy_from_user(buf, user_buf, buf_size))
443 return -EFAULT;
444
445 buf[buf_size] = '\0';
446 if (strtobool(buf, &enable))
447 return -EINVAL;
448
449 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
450 return -EALREADY;
451
452 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
453
454 return count;
455}
456
457static const struct file_operations force_sc_support_fops = {
458 .open = simple_open,
459 .read = force_sc_support_read,
460 .write = force_sc_support_write,
461 .llseek = default_llseek,
462};
463
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700464static int idle_timeout_set(void *data, u64 val)
465{
466 struct hci_dev *hdev = data;
467
468 if (val != 0 && (val < 500 || val > 3600000))
469 return -EINVAL;
470
471 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700472 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700473 hci_dev_unlock(hdev);
474
475 return 0;
476}
477
478static int idle_timeout_get(void *data, u64 *val)
479{
480 struct hci_dev *hdev = data;
481
482 hci_dev_lock(hdev);
483 *val = hdev->idle_timeout;
484 hci_dev_unlock(hdev);
485
486 return 0;
487}
488
489DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
490 idle_timeout_set, "%llu\n");
491
492static int sniff_min_interval_set(void *data, u64 val)
493{
494 struct hci_dev *hdev = data;
495
496 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
497 return -EINVAL;
498
499 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700500 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700501 hci_dev_unlock(hdev);
502
503 return 0;
504}
505
506static int sniff_min_interval_get(void *data, u64 *val)
507{
508 struct hci_dev *hdev = data;
509
510 hci_dev_lock(hdev);
511 *val = hdev->sniff_min_interval;
512 hci_dev_unlock(hdev);
513
514 return 0;
515}
516
517DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
518 sniff_min_interval_set, "%llu\n");
519
520static int sniff_max_interval_set(void *data, u64 val)
521{
522 struct hci_dev *hdev = data;
523
524 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700528 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529 hci_dev_unlock(hdev);
530
531 return 0;
532}
533
534static int sniff_max_interval_get(void *data, u64 *val)
535{
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->sniff_max_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543}
544
545DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
546 sniff_max_interval_set, "%llu\n");
547
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700548static int static_address_show(struct seq_file *f, void *p)
549{
550 struct hci_dev *hdev = f->private;
551
552 hci_dev_lock(hdev);
553 seq_printf(f, "%pMR\n", &hdev->static_addr);
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559static int static_address_open(struct inode *inode, struct file *file)
560{
561 return single_open(file, static_address_show, inode->i_private);
562}
563
564static const struct file_operations static_address_fops = {
565 .open = static_address_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = single_release,
569};
570
Marcel Holtmann92202182013-10-18 16:38:10 -0700571static int own_address_type_set(void *data, u64 val)
572{
573 struct hci_dev *hdev = data;
574
575 if (val != 0 && val != 1)
576 return -EINVAL;
577
578 hci_dev_lock(hdev);
579 hdev->own_addr_type = val;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int own_address_type_get(void *data, u64 *val)
586{
587 struct hci_dev *hdev = data;
588
589 hci_dev_lock(hdev);
590 *val = hdev->own_addr_type;
591 hci_dev_unlock(hdev);
592
593 return 0;
594}
595
596DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
597 own_address_type_set, "%llu\n");
598
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700599static int long_term_keys_show(struct seq_file *f, void *ptr)
600{
601 struct hci_dev *hdev = f->private;
602 struct list_head *p, *n;
603
604 hci_dev_lock(hdev);
605 list_for_each_safe(p, n, &hdev->link_keys) {
606 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
607 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
608 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
609 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
610 8, ltk->rand, 16, ltk->val);
611 }
612 hci_dev_unlock(hdev);
613
614 return 0;
615}
616
617static int long_term_keys_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, long_term_keys_show, inode->i_private);
620}
621
622static const struct file_operations long_term_keys_fops = {
623 .open = long_term_keys_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700629static int conn_min_interval_set(void *data, u64 val)
630{
631 struct hci_dev *hdev = data;
632
633 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
634 return -EINVAL;
635
636 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700637 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int conn_min_interval_get(void *data, u64 *val)
644{
645 struct hci_dev *hdev = data;
646
647 hci_dev_lock(hdev);
648 *val = hdev->le_conn_min_interval;
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
655 conn_min_interval_set, "%llu\n");
656
657static int conn_max_interval_set(void *data, u64 val)
658{
659 struct hci_dev *hdev = data;
660
661 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
662 return -EINVAL;
663
664 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700665 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700666 hci_dev_unlock(hdev);
667
668 return 0;
669}
670
671static int conn_max_interval_get(void *data, u64 *val)
672{
673 struct hci_dev *hdev = data;
674
675 hci_dev_lock(hdev);
676 *val = hdev->le_conn_max_interval;
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
683 conn_max_interval_set, "%llu\n");
684
Jukka Rissanen89863102013-12-11 17:05:38 +0200685static ssize_t lowpan_read(struct file *file, char __user *user_buf,
686 size_t count, loff_t *ppos)
687{
688 struct hci_dev *hdev = file->private_data;
689 char buf[3];
690
691 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
692 buf[1] = '\n';
693 buf[2] = '\0';
694 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
695}
696
697static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
698 size_t count, loff_t *position)
699{
700 struct hci_dev *hdev = fp->private_data;
701 bool enable;
702 char buf[32];
703 size_t buf_size = min(count, (sizeof(buf)-1));
704
705 if (copy_from_user(buf, user_buffer, buf_size))
706 return -EFAULT;
707
708 buf[buf_size] = '\0';
709
710 if (strtobool(buf, &enable) < 0)
711 return -EINVAL;
712
713 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
714 return -EALREADY;
715
716 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
717
718 return count;
719}
720
721static const struct file_operations lowpan_debugfs_fops = {
722 .open = simple_open,
723 .read = lowpan_read,
724 .write = lowpan_write,
725 .llseek = default_llseek,
726};
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728/* ---- HCI requests ---- */
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (hdev->req_status == HCI_REQ_PEND) {
735 hdev->req_result = result;
736 hdev->req_status = HCI_REQ_DONE;
737 wake_up_interruptible(&hdev->req_wait_q);
738 }
739}
740
741static void hci_req_cancel(struct hci_dev *hdev, int err)
742{
743 BT_DBG("%s err 0x%2.2x", hdev->name, err);
744
745 if (hdev->req_status == HCI_REQ_PEND) {
746 hdev->req_result = err;
747 hdev->req_status = HCI_REQ_CANCELED;
748 wake_up_interruptible(&hdev->req_wait_q);
749 }
750}
751
Fengguang Wu77a63e02013-04-20 16:24:31 +0300752static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
753 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300754{
755 struct hci_ev_cmd_complete *ev;
756 struct hci_event_hdr *hdr;
757 struct sk_buff *skb;
758
759 hci_dev_lock(hdev);
760
761 skb = hdev->recv_evt;
762 hdev->recv_evt = NULL;
763
764 hci_dev_unlock(hdev);
765
766 if (!skb)
767 return ERR_PTR(-ENODATA);
768
769 if (skb->len < sizeof(*hdr)) {
770 BT_ERR("Too short HCI event");
771 goto failed;
772 }
773
774 hdr = (void *) skb->data;
775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
776
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300777 if (event) {
778 if (hdr->evt != event)
779 goto failed;
780 return skb;
781 }
782
Johan Hedberg75e84b72013-04-02 13:35:04 +0300783 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
784 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
785 goto failed;
786 }
787
788 if (skb->len < sizeof(*ev)) {
789 BT_ERR("Too short cmd_complete event");
790 goto failed;
791 }
792
793 ev = (void *) skb->data;
794 skb_pull(skb, sizeof(*ev));
795
796 if (opcode == __le16_to_cpu(ev->opcode))
797 return skb;
798
799 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
800 __le16_to_cpu(ev->opcode));
801
802failed:
803 kfree_skb(skb);
804 return ERR_PTR(-ENODATA);
805}
806
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300807struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300808 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300809{
810 DECLARE_WAITQUEUE(wait, current);
811 struct hci_request req;
812 int err = 0;
813
814 BT_DBG("%s", hdev->name);
815
816 hci_req_init(&req, hdev);
817
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300818 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819
820 hdev->req_status = HCI_REQ_PEND;
821
822 err = hci_req_run(&req, hci_req_sync_complete);
823 if (err < 0)
824 return ERR_PTR(err);
825
826 add_wait_queue(&hdev->req_wait_q, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828
829 schedule_timeout(timeout);
830
831 remove_wait_queue(&hdev->req_wait_q, &wait);
832
833 if (signal_pending(current))
834 return ERR_PTR(-EINTR);
835
836 switch (hdev->req_status) {
837 case HCI_REQ_DONE:
838 err = -bt_to_errno(hdev->req_result);
839 break;
840
841 case HCI_REQ_CANCELED:
842 err = -hdev->req_result;
843 break;
844
845 default:
846 err = -ETIMEDOUT;
847 break;
848 }
849
850 hdev->req_status = hdev->req_result = 0;
851
852 BT_DBG("%s end: err %d", hdev->name, err);
853
854 if (err < 0)
855 return ERR_PTR(err);
856
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300857 return hci_get_cmd_complete(hdev, opcode, event);
858}
859EXPORT_SYMBOL(__hci_cmd_sync_ev);
860
861struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300862 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300863{
864 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300865}
866EXPORT_SYMBOL(__hci_cmd_sync);
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200869static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870 void (*func)(struct hci_request *req,
871 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200874 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 DECLARE_WAITQUEUE(wait, current);
876 int err = 0;
877
878 BT_DBG("%s start", hdev->name);
879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_init(&req, hdev);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 hdev->req_status = HCI_REQ_PEND;
883
Johan Hedberg42c6b122013-03-05 20:37:49 +0200884 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200885
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 err = hci_req_run(&req, hci_req_sync_complete);
887 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200888 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300889
890 /* ENODATA means the HCI request command queue is empty.
891 * This can happen when a request with conditionals doesn't
892 * trigger any commands to be sent. This is normal behavior
893 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200894 */
Andre Guedes920c8302013-03-08 11:20:15 -0300895 if (err == -ENODATA)
896 return 0;
897
898 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200899 }
900
Andre Guedesbc4445c2013-03-08 11:20:13 -0300901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return -EINTR;
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700913 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Johan Hedberga5040ef2011-01-10 13:28:59 +0200925 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 return err;
930}
931
Johan Hedberg01178cd2013-03-05 20:37:41 +0200932static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 void (*req)(struct hci_request *req,
934 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
937 int ret;
938
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200939 if (!test_bit(HCI_UP, &hdev->flags))
940 return -ENETDOWN;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* Serialize all requests */
943 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200944 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 hci_req_unlock(hdev);
946
947 return ret;
948}
949
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200952 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 set_bit(HCI_RESET, &req->hdev->flags);
956 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958
Johan Hedberg42c6b122013-03-05 20:37:49 +0200959static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200966 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200968
969 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200974{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200976
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300979
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700980 /* Read Local Supported Commands */
981 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
982
983 /* Read Local Supported Features */
984 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
985
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300986 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300988
989 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700991
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700992 /* Read Flow Control Mode */
993 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
994
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700995 /* Read Location Data */
996 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200997}
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001002
1003 BT_DBG("%s %ld", hdev->name, opt);
1004
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001005 /* Reset */
1006 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001009 switch (hdev->dev_type) {
1010 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 break;
1013
1014 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001016 break;
1017
1018 default:
1019 BT_ERR("Unknown device type %d", hdev->dev_type);
1020 break;
1021 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001022}
1023
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001026 struct hci_dev *hdev = req->hdev;
1027
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028 __le16 param;
1029 __u8 flt_type;
1030
1031 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001033
1034 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001043 /* Read Number of Supported IAC */
1044 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1045
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001046 /* Read Current IAC LAP */
1047 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1048
Johan Hedberg2177bab2013-03-05 20:37:43 +02001049 /* Clear Event Filters */
1050 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052
1053 /* Connection accept timeout ~20 secs */
1054 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001056
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001057 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1058 * but it does not support page scan related HCI commands.
1059 */
1060 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001061 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1063 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001064}
1065
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001068 struct hci_dev *hdev = req->hdev;
1069
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001071 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001072
1073 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001084
1085 /* LE-only controllers have LE implicitly enabled */
1086 if (!lmp_bredr_capable(hdev))
1087 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088}
1089
1090static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1091{
1092 if (lmp_ext_inq_capable(hdev))
1093 return 0x02;
1094
1095 if (lmp_inq_rssi_capable(hdev))
1096 return 0x01;
1097
1098 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1099 hdev->lmp_subver == 0x0757)
1100 return 0x01;
1101
1102 if (hdev->manufacturer == 15) {
1103 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1104 return 0x01;
1105 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1106 return 0x01;
1107 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1108 return 0x01;
1109 }
1110
1111 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1112 hdev->lmp_subver == 0x1805)
1113 return 0x01;
1114
1115 return 0x00;
1116}
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001119{
1120 u8 mode;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001125}
1126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 struct hci_dev *hdev = req->hdev;
1130
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131 /* The second byte is 0xff instead of 0x9f (two reserved bits
1132 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1133 * command otherwise.
1134 */
1135 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1136
1137 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1138 * any event mask for pre 1.2 devices.
1139 */
1140 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1141 return;
1142
1143 if (lmp_bredr_capable(hdev)) {
1144 events[4] |= 0x01; /* Flow Specification Complete */
1145 events[4] |= 0x02; /* Inquiry Result with RSSI */
1146 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1147 events[5] |= 0x08; /* Synchronous Connection Complete */
1148 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001149 } else {
1150 /* Use a different default for LE-only devices */
1151 memset(events, 0, sizeof(events));
1152 events[0] |= 0x10; /* Disconnection Complete */
1153 events[0] |= 0x80; /* Encryption Change */
1154 events[1] |= 0x08; /* Read Remote Version Information Complete */
1155 events[1] |= 0x20; /* Command Complete */
1156 events[1] |= 0x40; /* Command Status */
1157 events[1] |= 0x80; /* Hardware Error */
1158 events[2] |= 0x04; /* Number of Completed Packets */
1159 events[3] |= 0x02; /* Data Buffer Overflow */
1160 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001161 }
1162
1163 if (lmp_inq_rssi_capable(hdev))
1164 events[4] |= 0x02; /* Inquiry Result with RSSI */
1165
1166 if (lmp_sniffsubr_capable(hdev))
1167 events[5] |= 0x20; /* Sniff Subrating */
1168
1169 if (lmp_pause_enc_capable(hdev))
1170 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1171
1172 if (lmp_ext_inq_capable(hdev))
1173 events[5] |= 0x40; /* Extended Inquiry Result */
1174
1175 if (lmp_no_flush_capable(hdev))
1176 events[7] |= 0x01; /* Enhanced Flush Complete */
1177
1178 if (lmp_lsto_capable(hdev))
1179 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1180
1181 if (lmp_ssp_capable(hdev)) {
1182 events[6] |= 0x01; /* IO Capability Request */
1183 events[6] |= 0x02; /* IO Capability Response */
1184 events[6] |= 0x04; /* User Confirmation Request */
1185 events[6] |= 0x08; /* User Passkey Request */
1186 events[6] |= 0x10; /* Remote OOB Data Request */
1187 events[6] |= 0x20; /* Simple Pairing Complete */
1188 events[7] |= 0x04; /* User Passkey Notification */
1189 events[7] |= 0x08; /* Keypress Notification */
1190 events[7] |= 0x10; /* Remote Host Supported
1191 * Features Notification
1192 */
1193 }
1194
1195 if (lmp_le_capable(hdev))
1196 events[7] |= 0x20; /* LE Meta-Event */
1197
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001199
1200 if (lmp_le_capable(hdev)) {
1201 memset(events, 0, sizeof(events));
1202 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001203 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1204 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205 }
1206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
1211
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001214 else
1215 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001216
1217 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001221
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001222 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1223 * local supported commands HCI command.
1224 */
1225 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227
1228 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001229 /* When SSP is available, then the host features page
1230 * should also be available as well. However some
1231 * controllers list the max_page as 0 as long as SSP
1232 * has not been enabled. To achieve proper debugging
1233 * output, force the minimum max_page to 1 at least.
1234 */
1235 hdev->max_page = 0x01;
1236
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1238 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1240 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241 } else {
1242 struct hci_cp_write_eir cp;
1243
1244 memset(hdev->eir, 0, sizeof(hdev->eir));
1245 memset(&cp, 0, sizeof(cp));
1246
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 }
1249 }
1250
1251 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253
1254 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_ext_feat_capable(hdev)) {
1258 struct hci_cp_read_local_ext_features cp;
1259
1260 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1262 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263 }
1264
1265 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1266 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1268 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001269 }
1270}
1271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275 struct hci_cp_write_def_link_policy cp;
1276 u16 link_policy = 0;
1277
1278 if (lmp_rswitch_capable(hdev))
1279 link_policy |= HCI_LP_RSWITCH;
1280 if (lmp_hold_capable(hdev))
1281 link_policy |= HCI_LP_HOLD;
1282 if (lmp_sniff_capable(hdev))
1283 link_policy |= HCI_LP_SNIFF;
1284 if (lmp_park_capable(hdev))
1285 link_policy |= HCI_LP_PARK;
1286
1287 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289}
1290
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294 struct hci_cp_write_le_host_supported cp;
1295
Johan Hedbergc73eee92013-04-19 18:35:21 +03001296 /* LE-only devices do not support explicit enablement */
1297 if (!lmp_bredr_capable(hdev))
1298 return;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 memset(&cp, 0, sizeof(cp));
1301
1302 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1303 cp.le = 0x01;
1304 cp.simul = lmp_le_br_capable(hdev);
1305 }
1306
1307 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1309 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310}
1311
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001312static void hci_set_event_mask_page_2(struct hci_request *req)
1313{
1314 struct hci_dev *hdev = req->hdev;
1315 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1316
1317 /* If Connectionless Slave Broadcast master role is supported
1318 * enable all necessary events for it.
1319 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001320 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001321 events[1] |= 0x40; /* Triggered Clock Capture */
1322 events[1] |= 0x80; /* Synchronization Train Complete */
1323 events[2] |= 0x10; /* Slave Page Response Timeout */
1324 events[2] |= 0x20; /* CSB Channel Map Change */
1325 }
1326
1327 /* If Connectionless Slave Broadcast slave role is supported
1328 * enable all necessary events for it.
1329 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001330 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001331 events[2] |= 0x01; /* Synchronization Train Received */
1332 events[2] |= 0x02; /* CSB Receive */
1333 events[2] |= 0x04; /* CSB Timeout */
1334 events[2] |= 0x08; /* Truncated Page Complete */
1335 }
1336
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001337 /* Enable Authenticated Payload Timeout Expired event if supported */
1338 if (lmp_ping_capable(hdev))
1339 events[2] |= 0x80;
1340
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001341 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1342}
1343
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001347 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001349 /* Some Broadcom based Bluetooth controllers do not support the
1350 * Delete Stored Link Key command. They are clearly indicating its
1351 * absence in the bit mask of supported commands.
1352 *
1353 * Check the supported commands and only if the the command is marked
1354 * as supported send it. If not supported assume that the controller
1355 * does not have actual support for stored link keys which makes this
1356 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001357 *
1358 * Some controllers indicate that they support handling deleting
1359 * stored link keys, but they don't. The quirk lets a driver
1360 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001361 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001362 if (hdev->commands[6] & 0x80 &&
1363 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001364 struct hci_cp_delete_stored_link_key cp;
1365
1366 bacpy(&cp.bdaddr, BDADDR_ANY);
1367 cp.delete_all = 0x01;
1368 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1369 sizeof(cp), &cp);
1370 }
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
Marcel Holtmann79830f62013-10-18 16:38:09 -07001375 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001376 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1377 /* If the controller has a public BD_ADDR, then
1378 * by default use that one. If this is a LE only
1379 * controller without a public address, default
1380 * to the random address.
1381 */
1382 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1383 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1384 else
1385 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1386 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001389 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001390
1391 /* Read features beyond page 1 if available */
1392 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1393 struct hci_cp_read_local_ext_features cp;
1394
1395 cp.page = p;
1396 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1397 sizeof(cp), &cp);
1398 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399}
1400
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001401static void hci_init4_req(struct hci_request *req, unsigned long opt)
1402{
1403 struct hci_dev *hdev = req->hdev;
1404
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001405 /* Set event mask page 2 if the HCI command for it is supported */
1406 if (hdev->commands[22] & 0x04)
1407 hci_set_event_mask_page_2(req);
1408
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001409 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001410 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001411 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001412
1413 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001414 if ((lmp_sc_capable(hdev) ||
1415 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001416 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1417 u8 support = 0x01;
1418 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1419 sizeof(support), &support);
1420 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001421}
1422
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423static int __hci_init(struct hci_dev *hdev)
1424{
1425 int err;
1426
1427 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1428 if (err < 0)
1429 return err;
1430
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001431 /* The Device Under Test (DUT) mode is special and available for
1432 * all controller types. So just create it early on.
1433 */
1434 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1435 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1436 &dut_mode_fops);
1437 }
1438
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1440 * BR/EDR/LE type controllers. AMP controllers only need the
1441 * first stage init.
1442 */
1443 if (hdev->dev_type != HCI_BREDR)
1444 return 0;
1445
1446 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1447 if (err < 0)
1448 return err;
1449
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001450 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1451 if (err < 0)
1452 return err;
1453
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001454 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1455 if (err < 0)
1456 return err;
1457
1458 /* Only create debugfs entries during the initial setup
1459 * phase and not every time the controller gets powered on.
1460 */
1461 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1462 return 0;
1463
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001464 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1465 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001466 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1467 &hdev->manufacturer);
1468 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1469 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001470 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1471 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001472 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1473
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001474 if (lmp_bredr_capable(hdev)) {
1475 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1476 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001477 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1478 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001479 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1480 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001481 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1482 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001483 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1484 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001485 }
1486
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001487 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001488 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1489 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001490 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1491 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001492 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1493 hdev, &force_sc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001494 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001495
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001496 if (lmp_sniff_capable(hdev)) {
1497 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1498 hdev, &idle_timeout_fops);
1499 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1500 hdev, &sniff_min_interval_fops);
1501 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1502 hdev, &sniff_max_interval_fops);
1503 }
1504
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001505 if (lmp_le_capable(hdev)) {
1506 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1507 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001508 debugfs_create_file("static_address", 0444, hdev->debugfs,
1509 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001510 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1511 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001512 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1513 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001514 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1515 hdev, &conn_min_interval_fops);
1516 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1517 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001518 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1519 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001520 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001521
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001522 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526{
1527 __u8 scan = opt;
1528
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536{
1537 __u8 auth = opt;
1538
Johan Hedberg42c6b122013-03-05 20:37:49 +02001539 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 __u8 encrypt = opt;
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001551 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001556{
1557 __le16 policy = cpu_to_le16(opt);
1558
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001560
1561 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563}
1564
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001565/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * Device is held on return. */
1567struct hci_dev *hci_dev_get(int index)
1568{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001569 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
1571 BT_DBG("%d", index);
1572
1573 if (index < 0)
1574 return NULL;
1575
1576 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001577 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (d->id == index) {
1579 hdev = hci_dev_hold(d);
1580 break;
1581 }
1582 }
1583 read_unlock(&hci_dev_list_lock);
1584 return hdev;
1585}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001588
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001589bool hci_discovery_active(struct hci_dev *hdev)
1590{
1591 struct discovery_state *discov = &hdev->discovery;
1592
Andre Guedes6fbe1952012-02-03 17:47:58 -03001593 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001594 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001595 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001596 return true;
1597
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 default:
1599 return false;
1600 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001601}
1602
Johan Hedbergff9ef572012-01-04 14:23:45 +02001603void hci_discovery_set_state(struct hci_dev *hdev, int state)
1604{
1605 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1606
1607 if (hdev->discovery.state == state)
1608 return;
1609
1610 switch (state) {
1611 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001612 if (hdev->discovery.state != DISCOVERY_STARTING)
1613 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001614 break;
1615 case DISCOVERY_STARTING:
1616 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001617 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001618 mgmt_discovering(hdev, 1);
1619 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001620 case DISCOVERY_RESOLVING:
1621 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001622 case DISCOVERY_STOPPING:
1623 break;
1624 }
1625
1626 hdev->discovery.state = state;
1627}
1628
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001629void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630{
Johan Hedberg30883512012-01-04 14:16:21 +02001631 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001632 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Johan Hedberg561aafb2012-01-04 13:31:59 +02001634 list_for_each_entry_safe(p, n, &cache->all, all) {
1635 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001636 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001638
1639 INIT_LIST_HEAD(&cache->unknown);
1640 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001643struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1644 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Johan Hedberg30883512012-01-04 14:16:21 +02001646 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 struct inquiry_entry *e;
1648
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001649 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Johan Hedberg561aafb2012-01-04 13:31:59 +02001651 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001653 return e;
1654 }
1655
1656 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657}
1658
Johan Hedberg561aafb2012-01-04 13:31:59 +02001659struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001660 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001661{
Johan Hedberg30883512012-01-04 14:16:21 +02001662 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001663 struct inquiry_entry *e;
1664
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001665 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666
1667 list_for_each_entry(e, &cache->unknown, list) {
1668 if (!bacmp(&e->data.bdaddr, bdaddr))
1669 return e;
1670 }
1671
1672 return NULL;
1673}
1674
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001675struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001676 bdaddr_t *bdaddr,
1677 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678{
1679 struct discovery_state *cache = &hdev->discovery;
1680 struct inquiry_entry *e;
1681
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001682 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001683
1684 list_for_each_entry(e, &cache->resolve, list) {
1685 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1686 return e;
1687 if (!bacmp(&e->data.bdaddr, bdaddr))
1688 return e;
1689 }
1690
1691 return NULL;
1692}
1693
Johan Hedberga3d4e202012-01-09 00:53:02 +02001694void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001696{
1697 struct discovery_state *cache = &hdev->discovery;
1698 struct list_head *pos = &cache->resolve;
1699 struct inquiry_entry *p;
1700
1701 list_del(&ie->list);
1702
1703 list_for_each_entry(p, &cache->resolve, list) {
1704 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001705 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001706 break;
1707 pos = &p->list;
1708 }
1709
1710 list_add(&ie->list, pos);
1711}
1712
Johan Hedberg31754052012-01-04 13:39:52 +02001713bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001714 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715{
Johan Hedberg30883512012-01-04 14:16:21 +02001716 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001717 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001719 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Szymon Janc2b2fec42012-11-20 11:38:54 +01001721 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1722
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001723 if (ssp)
1724 *ssp = data->ssp_mode;
1725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001726 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001727 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001728 if (ie->data.ssp_mode && ssp)
1729 *ssp = true;
1730
Johan Hedberga3d4e202012-01-09 00:53:02 +02001731 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001732 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001733 ie->data.rssi = data->rssi;
1734 hci_inquiry_cache_update_resolve(hdev, ie);
1735 }
1736
Johan Hedberg561aafb2012-01-04 13:31:59 +02001737 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001738 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 /* Entry not in the cache. Add new one. */
1741 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1742 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001743 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001744
1745 list_add(&ie->all, &cache->all);
1746
1747 if (name_known) {
1748 ie->name_state = NAME_KNOWN;
1749 } else {
1750 ie->name_state = NAME_NOT_KNOWN;
1751 list_add(&ie->list, &cache->unknown);
1752 }
1753
1754update:
1755 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001756 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001757 ie->name_state = NAME_KNOWN;
1758 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 }
1760
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001761 memcpy(&ie->data, data, sizeof(*data));
1762 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001764
1765 if (ie->name_state == NAME_NOT_KNOWN)
1766 return false;
1767
1768 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
1771static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1772{
Johan Hedberg30883512012-01-04 14:16:21 +02001773 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 struct inquiry_info *info = (struct inquiry_info *) buf;
1775 struct inquiry_entry *e;
1776 int copied = 0;
1777
Johan Hedberg561aafb2012-01-04 13:31:59 +02001778 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001780
1781 if (copied >= num)
1782 break;
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 bacpy(&info->bdaddr, &data->bdaddr);
1785 info->pscan_rep_mode = data->pscan_rep_mode;
1786 info->pscan_period_mode = data->pscan_period_mode;
1787 info->pscan_mode = data->pscan_mode;
1788 memcpy(info->dev_class, data->dev_class, 3);
1789 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001792 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 }
1794
1795 BT_DBG("cache %p, copied %d", cache, copied);
1796 return copied;
1797}
1798
Johan Hedberg42c6b122013-03-05 20:37:49 +02001799static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800{
1801 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 struct hci_cp_inquiry cp;
1804
1805 BT_DBG("%s", hdev->name);
1806
1807 if (test_bit(HCI_INQUIRY, &hdev->flags))
1808 return;
1809
1810 /* Start Inquiry */
1811 memcpy(&cp.lap, &ir->lap, 3);
1812 cp.length = ir->length;
1813 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815}
1816
Andre Guedes3e13fa12013-03-27 20:04:56 -03001817static int wait_inquiry(void *word)
1818{
1819 schedule();
1820 return signal_pending(current);
1821}
1822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823int hci_inquiry(void __user *arg)
1824{
1825 __u8 __user *ptr = arg;
1826 struct hci_inquiry_req ir;
1827 struct hci_dev *hdev;
1828 int err = 0, do_inquiry = 0, max_rsp;
1829 long timeo;
1830 __u8 *buf;
1831
1832 if (copy_from_user(&ir, ptr, sizeof(ir)))
1833 return -EFAULT;
1834
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001835 hdev = hci_dev_get(ir.dev_id);
1836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -ENODEV;
1838
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001839 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Johan Hedberg56f87902013-10-02 13:43:13 +03001849 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001854 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001855 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001856 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001857 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 do_inquiry = 1;
1859 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001860 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Marcel Holtmann04837f62006-07-03 10:02:33 +02001862 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001863
1864 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001865 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1866 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001867 if (err < 0)
1868 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001869
1870 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1871 * cleared). If it is interrupted by a signal, return -EINTR.
1872 */
1873 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1874 TASK_INTERRUPTIBLE))
1875 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001878 /* for unlimited number of responses we will use buffer with
1879 * 255 entries
1880 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1882
1883 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1884 * copy it to the user space.
1885 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001886 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 err = -ENOMEM;
1889 goto done;
1890 }
1891
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 BT_DBG("num_rsp %d", ir.num_rsp);
1897
1898 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1899 ptr += sizeof(ir);
1900 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001901 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001903 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 err = -EFAULT;
1905
1906 kfree(buf);
1907
1908done:
1909 hci_dev_put(hdev);
1910 return err;
1911}
1912
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001913static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 int ret = 0;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 BT_DBG("%s %p", hdev->name, hdev);
1918
1919 hci_req_lock(hdev);
1920
Johan Hovold94324962012-03-15 14:48:41 +01001921 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1922 ret = -ENODEV;
1923 goto done;
1924 }
1925
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001926 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1927 /* Check for rfkill but allow the HCI setup stage to
1928 * proceed (which in itself doesn't cause any RF activity).
1929 */
1930 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1931 ret = -ERFKILL;
1932 goto done;
1933 }
1934
1935 /* Check for valid public address or a configured static
1936 * random adddress, but let the HCI setup proceed to
1937 * be able to determine if there is a public address
1938 * or not.
1939 *
1940 * This check is only valid for BR/EDR controllers
1941 * since AMP controllers do not have an address.
1942 */
1943 if (hdev->dev_type == HCI_BREDR &&
1944 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1945 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1946 ret = -EADDRNOTAVAIL;
1947 goto done;
1948 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001949 }
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 if (test_bit(HCI_UP, &hdev->flags)) {
1952 ret = -EALREADY;
1953 goto done;
1954 }
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 if (hdev->open(hdev)) {
1957 ret = -EIO;
1958 goto done;
1959 }
1960
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001961 atomic_set(&hdev->cmd_cnt, 1);
1962 set_bit(HCI_INIT, &hdev->flags);
1963
1964 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1965 ret = hdev->setup(hdev);
1966
1967 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001968 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1969 set_bit(HCI_RAW, &hdev->flags);
1970
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001971 if (!test_bit(HCI_RAW, &hdev->flags) &&
1972 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001973 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
1975
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 clear_bit(HCI_INIT, &hdev->flags);
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 if (!ret) {
1979 hci_dev_hold(hdev);
1980 set_bit(HCI_UP, &hdev->flags);
1981 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001982 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001983 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001984 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001985 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001986 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001987 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001988 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001989 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001991 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001992 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001993 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 skb_queue_purge(&hdev->cmd_q);
1996 skb_queue_purge(&hdev->rx_q);
1997
1998 if (hdev->flush)
1999 hdev->flush(hdev);
2000
2001 if (hdev->sent_cmd) {
2002 kfree_skb(hdev->sent_cmd);
2003 hdev->sent_cmd = NULL;
2004 }
2005
2006 hdev->close(hdev);
2007 hdev->flags = 0;
2008 }
2009
2010done:
2011 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return ret;
2013}
2014
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002015/* ---- HCI ioctl helpers ---- */
2016
2017int hci_dev_open(__u16 dev)
2018{
2019 struct hci_dev *hdev;
2020 int err;
2021
2022 hdev = hci_dev_get(dev);
2023 if (!hdev)
2024 return -ENODEV;
2025
Johan Hedberge1d08f42013-10-01 22:44:50 +03002026 /* We need to ensure that no other power on/off work is pending
2027 * before proceeding to call hci_dev_do_open. This is
2028 * particularly important if the setup procedure has not yet
2029 * completed.
2030 */
2031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2032 cancel_delayed_work(&hdev->power_off);
2033
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002034 /* After this call it is guaranteed that the setup procedure
2035 * has finished. This means that error conditions like RFKILL
2036 * or no valid public or static random address apply.
2037 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002038 flush_workqueue(hdev->req_workqueue);
2039
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002040 err = hci_dev_do_open(hdev);
2041
2042 hci_dev_put(hdev);
2043
2044 return err;
2045}
2046
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047static int hci_dev_do_close(struct hci_dev *hdev)
2048{
2049 BT_DBG("%s %p", hdev->name, hdev);
2050
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002051 cancel_delayed_work(&hdev->power_off);
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 hci_req_cancel(hdev, ENODEV);
2054 hci_req_lock(hdev);
2055
2056 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002057 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 hci_req_unlock(hdev);
2059 return 0;
2060 }
2061
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002062 /* Flush RX and TX works */
2063 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002064 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002066 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002067 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002068 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002069 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002070 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002071 }
2072
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002073 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002074 cancel_delayed_work(&hdev->service_cache);
2075
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002076 cancel_delayed_work_sync(&hdev->le_scan_disable);
2077
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002078 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002079 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002081 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 hci_notify(hdev, HCI_DEV_DOWN);
2084
2085 if (hdev->flush)
2086 hdev->flush(hdev);
2087
2088 /* Reset device */
2089 skb_queue_purge(&hdev->cmd_q);
2090 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002091 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002092 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002093 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002095 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 clear_bit(HCI_INIT, &hdev->flags);
2097 }
2098
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002099 /* flush cmd work */
2100 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 /* Drop queues */
2103 skb_queue_purge(&hdev->rx_q);
2104 skb_queue_purge(&hdev->cmd_q);
2105 skb_queue_purge(&hdev->raw_q);
2106
2107 /* Drop last sent command */
2108 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002109 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 kfree_skb(hdev->sent_cmd);
2111 hdev->sent_cmd = NULL;
2112 }
2113
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002114 kfree_skb(hdev->recv_evt);
2115 hdev->recv_evt = NULL;
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 /* After this point our queues are empty
2118 * and no tasks are scheduled. */
2119 hdev->close(hdev);
2120
Johan Hedberg35b973c2013-03-15 17:06:59 -05002121 /* Clear flags */
2122 hdev->flags = 0;
2123 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2124
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002125 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2126 if (hdev->dev_type == HCI_BREDR) {
2127 hci_dev_lock(hdev);
2128 mgmt_powered(hdev, 0);
2129 hci_dev_unlock(hdev);
2130 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002131 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002132
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002133 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002134 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002135
Johan Hedberge59fda82012-02-22 18:11:53 +02002136 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002137 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002138
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 hci_req_unlock(hdev);
2140
2141 hci_dev_put(hdev);
2142 return 0;
2143}
2144
2145int hci_dev_close(__u16 dev)
2146{
2147 struct hci_dev *hdev;
2148 int err;
2149
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002150 hdev = hci_dev_get(dev);
2151 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002153
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002154 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2155 err = -EBUSY;
2156 goto done;
2157 }
2158
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002159 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2160 cancel_delayed_work(&hdev->power_off);
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002163
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002164done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 hci_dev_put(hdev);
2166 return err;
2167}
2168
2169int hci_dev_reset(__u16 dev)
2170{
2171 struct hci_dev *hdev;
2172 int ret = 0;
2173
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002174 hdev = hci_dev_get(dev);
2175 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return -ENODEV;
2177
2178 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
Marcel Holtmann808a0492013-08-26 20:57:58 -07002180 if (!test_bit(HCI_UP, &hdev->flags)) {
2181 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002185 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2186 ret = -EBUSY;
2187 goto done;
2188 }
2189
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 /* Drop queues */
2191 skb_queue_purge(&hdev->rx_q);
2192 skb_queue_purge(&hdev->cmd_q);
2193
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002194 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002195 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002197 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 if (hdev->flush)
2200 hdev->flush(hdev);
2201
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002202 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002203 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002206 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_req_unlock(hdev);
2210 hci_dev_put(hdev);
2211 return ret;
2212}
2213
2214int hci_dev_reset_stat(__u16 dev)
2215{
2216 struct hci_dev *hdev;
2217 int ret = 0;
2218
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002219 hdev = hci_dev_get(dev);
2220 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 return -ENODEV;
2222
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002223 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2224 ret = -EBUSY;
2225 goto done;
2226 }
2227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2229
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002230done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 return ret;
2233}
2234
2235int hci_dev_cmd(unsigned int cmd, void __user *arg)
2236{
2237 struct hci_dev *hdev;
2238 struct hci_dev_req dr;
2239 int err = 0;
2240
2241 if (copy_from_user(&dr, arg, sizeof(dr)))
2242 return -EFAULT;
2243
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002244 hdev = hci_dev_get(dr.dev_id);
2245 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return -ENODEV;
2247
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002248 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2249 err = -EBUSY;
2250 goto done;
2251 }
2252
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002253 if (hdev->dev_type != HCI_BREDR) {
2254 err = -EOPNOTSUPP;
2255 goto done;
2256 }
2257
Johan Hedberg56f87902013-10-02 13:43:13 +03002258 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2259 err = -EOPNOTSUPP;
2260 goto done;
2261 }
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 switch (cmd) {
2264 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002265 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2266 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 break;
2268
2269 case HCISETENCRYPT:
2270 if (!lmp_encrypt_capable(hdev)) {
2271 err = -EOPNOTSUPP;
2272 break;
2273 }
2274
2275 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2276 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002277 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2278 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 if (err)
2280 break;
2281 }
2282
Johan Hedberg01178cd2013-03-05 20:37:41 +02002283 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2284 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 break;
2286
2287 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002288 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2289 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 break;
2291
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002292 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002293 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2294 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002295 break;
2296
2297 case HCISETLINKMODE:
2298 hdev->link_mode = ((__u16) dr.dev_opt) &
2299 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2300 break;
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 case HCISETPTYPE:
2303 hdev->pkt_type = (__u16) dr.dev_opt;
2304 break;
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002307 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2308 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 break;
2310
2311 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002312 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2313 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 break;
2315
2316 default:
2317 err = -EINVAL;
2318 break;
2319 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002321done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 hci_dev_put(hdev);
2323 return err;
2324}
2325
2326int hci_get_dev_list(void __user *arg)
2327{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002328 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 struct hci_dev_list_req *dl;
2330 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 int n = 0, size, err;
2332 __u16 dev_num;
2333
2334 if (get_user(dev_num, (__u16 __user *) arg))
2335 return -EFAULT;
2336
2337 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2338 return -EINVAL;
2339
2340 size = sizeof(*dl) + dev_num * sizeof(*dr);
2341
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 dl = kzalloc(size, GFP_KERNEL);
2343 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 return -ENOMEM;
2345
2346 dr = dl->dev_req;
2347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002348 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002349 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002350 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002351 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002352
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002353 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2354 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 (dr + n)->dev_id = hdev->id;
2357 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (++n >= dev_num)
2360 break;
2361 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002362 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
2364 dl->dev_num = n;
2365 size = sizeof(*dl) + n * sizeof(*dr);
2366
2367 err = copy_to_user(arg, dl, size);
2368 kfree(dl);
2369
2370 return err ? -EFAULT : 0;
2371}
2372
2373int hci_get_dev_info(void __user *arg)
2374{
2375 struct hci_dev *hdev;
2376 struct hci_dev_info di;
2377 int err = 0;
2378
2379 if (copy_from_user(&di, arg, sizeof(di)))
2380 return -EFAULT;
2381
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002382 hdev = hci_dev_get(di.dev_id);
2383 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 return -ENODEV;
2385
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002387 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002388
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002389 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2390 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002391
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 strcpy(di.name, hdev->name);
2393 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002394 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 di.flags = hdev->flags;
2396 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002397 if (lmp_bredr_capable(hdev)) {
2398 di.acl_mtu = hdev->acl_mtu;
2399 di.acl_pkts = hdev->acl_pkts;
2400 di.sco_mtu = hdev->sco_mtu;
2401 di.sco_pkts = hdev->sco_pkts;
2402 } else {
2403 di.acl_mtu = hdev->le_mtu;
2404 di.acl_pkts = hdev->le_pkts;
2405 di.sco_mtu = 0;
2406 di.sco_pkts = 0;
2407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 di.link_policy = hdev->link_policy;
2409 di.link_mode = hdev->link_mode;
2410
2411 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2412 memcpy(&di.features, &hdev->features, sizeof(di.features));
2413
2414 if (copy_to_user(arg, &di, sizeof(di)))
2415 err = -EFAULT;
2416
2417 hci_dev_put(hdev);
2418
2419 return err;
2420}
2421
2422/* ---- Interface to HCI drivers ---- */
2423
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002424static int hci_rfkill_set_block(void *data, bool blocked)
2425{
2426 struct hci_dev *hdev = data;
2427
2428 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2429
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002430 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2431 return -EBUSY;
2432
Johan Hedberg5e130362013-09-13 08:58:17 +03002433 if (blocked) {
2434 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002435 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2436 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002437 } else {
2438 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002439 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002440
2441 return 0;
2442}
2443
2444static const struct rfkill_ops hci_rfkill_ops = {
2445 .set_block = hci_rfkill_set_block,
2446};
2447
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002448static void hci_power_on(struct work_struct *work)
2449{
2450 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002451 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002452
2453 BT_DBG("%s", hdev->name);
2454
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002455 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002456 if (err < 0) {
2457 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002458 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002461 /* During the HCI setup phase, a few error conditions are
2462 * ignored and they need to be checked now. If they are still
2463 * valid, it is important to turn the device back off.
2464 */
2465 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2466 (hdev->dev_type == HCI_BREDR &&
2467 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2468 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002469 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2470 hci_dev_do_close(hdev);
2471 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002472 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2473 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002474 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002475
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002476 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002477 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002478}
2479
2480static void hci_power_off(struct work_struct *work)
2481{
Johan Hedberg32435532011-11-07 22:16:04 +02002482 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002483 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002484
2485 BT_DBG("%s", hdev->name);
2486
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002487 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002488}
2489
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002490static void hci_discov_off(struct work_struct *work)
2491{
2492 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002493
2494 hdev = container_of(work, struct hci_dev, discov_off.work);
2495
2496 BT_DBG("%s", hdev->name);
2497
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002498 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002499}
2500
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002501int hci_uuids_clear(struct hci_dev *hdev)
2502{
Johan Hedberg48210022013-01-27 00:31:28 +02002503 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002504
Johan Hedberg48210022013-01-27 00:31:28 +02002505 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2506 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002507 kfree(uuid);
2508 }
2509
2510 return 0;
2511}
2512
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002513int hci_link_keys_clear(struct hci_dev *hdev)
2514{
2515 struct list_head *p, *n;
2516
2517 list_for_each_safe(p, n, &hdev->link_keys) {
2518 struct link_key *key;
2519
2520 key = list_entry(p, struct link_key, list);
2521
2522 list_del(p);
2523 kfree(key);
2524 }
2525
2526 return 0;
2527}
2528
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002529int hci_smp_ltks_clear(struct hci_dev *hdev)
2530{
2531 struct smp_ltk *k, *tmp;
2532
2533 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2534 list_del(&k->list);
2535 kfree(k);
2536 }
2537
2538 return 0;
2539}
2540
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002541struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2542{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002543 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002544
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002545 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002546 if (bacmp(bdaddr, &k->bdaddr) == 0)
2547 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002548
2549 return NULL;
2550}
2551
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302552static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002553 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002554{
2555 /* Legacy key */
2556 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302557 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002558
2559 /* Debug keys are insecure so don't store them persistently */
2560 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302561 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002562
2563 /* Changed combination key and there's no previous one */
2564 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302565 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002566
2567 /* Security mode 3 case */
2568 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302569 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002570
2571 /* Neither local nor remote side had no-bonding as requirement */
2572 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302573 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574
2575 /* Local side had dedicated bonding as requirement */
2576 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Remote side had dedicated bonding as requirement */
2580 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* If none of the above criteria match, then don't store the key
2584 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586}
2587
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002588struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002589{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002590 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002591
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002592 list_for_each_entry(k, &hdev->long_term_keys, list) {
2593 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002594 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002595 continue;
2596
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002597 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002598 }
2599
2600 return NULL;
2601}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002602
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002603struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002604 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002605{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002606 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002607
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002608 list_for_each_entry(k, &hdev->long_term_keys, list)
2609 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002610 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002611 return k;
2612
2613 return NULL;
2614}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002615
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002616int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002617 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002618{
2619 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302620 u8 old_key_type;
2621 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002622
2623 old_key = hci_find_link_key(hdev, bdaddr);
2624 if (old_key) {
2625 old_key_type = old_key->type;
2626 key = old_key;
2627 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002628 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002629 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2630 if (!key)
2631 return -ENOMEM;
2632 list_add(&key->list, &hdev->link_keys);
2633 }
2634
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002635 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002636
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002637 /* Some buggy controller combinations generate a changed
2638 * combination key for legacy pairing even when there's no
2639 * previous key */
2640 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002641 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002642 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002643 if (conn)
2644 conn->key_type = type;
2645 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002646
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002647 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002648 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002649 key->pin_len = pin_len;
2650
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002651 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002652 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002653 else
2654 key->type = type;
2655
Johan Hedberg4df378a2011-04-28 11:29:03 -07002656 if (!new_key)
2657 return 0;
2658
2659 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2660
Johan Hedberg744cf192011-11-08 20:40:14 +02002661 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002662
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302663 if (conn)
2664 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002665
2666 return 0;
2667}
2668
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002669int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002670 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002671 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002672{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002673 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002674
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002675 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2676 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002677
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002678 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2679 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002680 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002681 else {
2682 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002683 if (!key)
2684 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002685 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002686 }
2687
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002688 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002689 key->bdaddr_type = addr_type;
2690 memcpy(key->val, tk, sizeof(key->val));
2691 key->authenticated = authenticated;
2692 key->ediv = ediv;
2693 key->enc_size = enc_size;
2694 key->type = type;
2695 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002696
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002697 if (!new_key)
2698 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002699
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002700 if (type & HCI_SMP_LTK)
2701 mgmt_new_ltk(hdev, key, 1);
2702
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002703 return 0;
2704}
2705
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002706int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2707{
2708 struct link_key *key;
2709
2710 key = hci_find_link_key(hdev, bdaddr);
2711 if (!key)
2712 return -ENOENT;
2713
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002714 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002715
2716 list_del(&key->list);
2717 kfree(key);
2718
2719 return 0;
2720}
2721
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002722int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2723{
2724 struct smp_ltk *k, *tmp;
2725
2726 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2727 if (bacmp(bdaddr, &k->bdaddr))
2728 continue;
2729
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002730 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002731
2732 list_del(&k->list);
2733 kfree(k);
2734 }
2735
2736 return 0;
2737}
2738
Ville Tervo6bd32322011-02-16 16:32:41 +02002739/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002740static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002741{
2742 struct hci_dev *hdev = (void *) arg;
2743
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002744 if (hdev->sent_cmd) {
2745 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2746 u16 opcode = __le16_to_cpu(sent->opcode);
2747
2748 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2749 } else {
2750 BT_ERR("%s command tx timeout", hdev->name);
2751 }
2752
Ville Tervo6bd32322011-02-16 16:32:41 +02002753 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002754 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002755}
2756
Szymon Janc2763eda2011-03-22 13:12:22 +01002757struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002758 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002759{
2760 struct oob_data *data;
2761
2762 list_for_each_entry(data, &hdev->remote_oob_data, list)
2763 if (bacmp(bdaddr, &data->bdaddr) == 0)
2764 return data;
2765
2766 return NULL;
2767}
2768
2769int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2770{
2771 struct oob_data *data;
2772
2773 data = hci_find_remote_oob_data(hdev, bdaddr);
2774 if (!data)
2775 return -ENOENT;
2776
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002777 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002778
2779 list_del(&data->list);
2780 kfree(data);
2781
2782 return 0;
2783}
2784
2785int hci_remote_oob_data_clear(struct hci_dev *hdev)
2786{
2787 struct oob_data *data, *n;
2788
2789 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2790 list_del(&data->list);
2791 kfree(data);
2792 }
2793
2794 return 0;
2795}
2796
2797int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002798 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002799{
2800 struct oob_data *data;
2801
2802 data = hci_find_remote_oob_data(hdev, bdaddr);
2803
2804 if (!data) {
2805 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2806 if (!data)
2807 return -ENOMEM;
2808
2809 bacpy(&data->bdaddr, bdaddr);
2810 list_add(&data->list, &hdev->remote_oob_data);
2811 }
2812
2813 memcpy(data->hash, hash, sizeof(data->hash));
2814 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2815
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002816 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002817
2818 return 0;
2819}
2820
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002821struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2822 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002823{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002824 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002825
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002826 list_for_each_entry(b, &hdev->blacklist, list) {
2827 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002828 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002829 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002830
2831 return NULL;
2832}
2833
2834int hci_blacklist_clear(struct hci_dev *hdev)
2835{
2836 struct list_head *p, *n;
2837
2838 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002839 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002840
2841 list_del(p);
2842 kfree(b);
2843 }
2844
2845 return 0;
2846}
2847
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002848int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002849{
2850 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002851
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002852 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002853 return -EBADF;
2854
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002855 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002856 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002857
2858 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002859 if (!entry)
2860 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002861
2862 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002863 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002864
2865 list_add(&entry->list, &hdev->blacklist);
2866
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002867 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002868}
2869
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002870int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002871{
2872 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002873
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002874 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002875 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002876
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002877 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002878 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002879 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002880
2881 list_del(&entry->list);
2882 kfree(entry);
2883
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002884 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002885}
2886
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002887static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002888{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002889 if (status) {
2890 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002891
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002892 hci_dev_lock(hdev);
2893 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2894 hci_dev_unlock(hdev);
2895 return;
2896 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002897}
2898
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002899static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002900{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002901 /* General inquiry access code (GIAC) */
2902 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2903 struct hci_request req;
2904 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002905 int err;
2906
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002907 if (status) {
2908 BT_ERR("Failed to disable LE scanning: status %d", status);
2909 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002910 }
2911
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002912 switch (hdev->discovery.type) {
2913 case DISCOV_TYPE_LE:
2914 hci_dev_lock(hdev);
2915 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2916 hci_dev_unlock(hdev);
2917 break;
2918
2919 case DISCOV_TYPE_INTERLEAVED:
2920 hci_req_init(&req, hdev);
2921
2922 memset(&cp, 0, sizeof(cp));
2923 memcpy(&cp.lap, lap, sizeof(cp.lap));
2924 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2925 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2926
2927 hci_dev_lock(hdev);
2928
2929 hci_inquiry_cache_flush(hdev);
2930
2931 err = hci_req_run(&req, inquiry_complete);
2932 if (err) {
2933 BT_ERR("Inquiry request failed: err %d", err);
2934 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2935 }
2936
2937 hci_dev_unlock(hdev);
2938 break;
2939 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002940}
2941
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002942static void le_scan_disable_work(struct work_struct *work)
2943{
2944 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002945 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002946 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002947 struct hci_request req;
2948 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002949
2950 BT_DBG("%s", hdev->name);
2951
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002952 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002953
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002954 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002955 cp.enable = LE_SCAN_DISABLE;
2956 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002957
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002958 err = hci_req_run(&req, le_scan_disable_work_complete);
2959 if (err)
2960 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002961}
2962
David Herrmann9be0dab2012-04-22 14:39:57 +02002963/* Alloc HCI device */
2964struct hci_dev *hci_alloc_dev(void)
2965{
2966 struct hci_dev *hdev;
2967
2968 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2969 if (!hdev)
2970 return NULL;
2971
David Herrmannb1b813d2012-04-22 14:39:58 +02002972 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2973 hdev->esco_type = (ESCO_HV1);
2974 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002975 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2976 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002977 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2978 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002979
David Herrmannb1b813d2012-04-22 14:39:58 +02002980 hdev->sniff_max_interval = 800;
2981 hdev->sniff_min_interval = 80;
2982
Marcel Holtmannbef64732013-10-11 08:23:19 -07002983 hdev->le_scan_interval = 0x0060;
2984 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002985 hdev->le_conn_min_interval = 0x0028;
2986 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002987
David Herrmannb1b813d2012-04-22 14:39:58 +02002988 mutex_init(&hdev->lock);
2989 mutex_init(&hdev->req_lock);
2990
2991 INIT_LIST_HEAD(&hdev->mgmt_pending);
2992 INIT_LIST_HEAD(&hdev->blacklist);
2993 INIT_LIST_HEAD(&hdev->uuids);
2994 INIT_LIST_HEAD(&hdev->link_keys);
2995 INIT_LIST_HEAD(&hdev->long_term_keys);
2996 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002997 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002998
2999 INIT_WORK(&hdev->rx_work, hci_rx_work);
3000 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3001 INIT_WORK(&hdev->tx_work, hci_tx_work);
3002 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003003
David Herrmannb1b813d2012-04-22 14:39:58 +02003004 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3005 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3006 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3007
David Herrmannb1b813d2012-04-22 14:39:58 +02003008 skb_queue_head_init(&hdev->rx_q);
3009 skb_queue_head_init(&hdev->cmd_q);
3010 skb_queue_head_init(&hdev->raw_q);
3011
3012 init_waitqueue_head(&hdev->req_wait_q);
3013
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003014 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003015
David Herrmannb1b813d2012-04-22 14:39:58 +02003016 hci_init_sysfs(hdev);
3017 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003018
3019 return hdev;
3020}
3021EXPORT_SYMBOL(hci_alloc_dev);
3022
3023/* Free HCI device */
3024void hci_free_dev(struct hci_dev *hdev)
3025{
David Herrmann9be0dab2012-04-22 14:39:57 +02003026 /* will free via device release */
3027 put_device(&hdev->dev);
3028}
3029EXPORT_SYMBOL(hci_free_dev);
3030
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031/* Register HCI device */
3032int hci_register_dev(struct hci_dev *hdev)
3033{
David Herrmannb1b813d2012-04-22 14:39:58 +02003034 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035
David Herrmann010666a2012-01-07 15:47:07 +01003036 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 return -EINVAL;
3038
Mat Martineau08add512011-11-02 16:18:36 -07003039 /* Do not allow HCI_AMP devices to register at index 0,
3040 * so the index can be used as the AMP controller ID.
3041 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003042 switch (hdev->dev_type) {
3043 case HCI_BREDR:
3044 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3045 break;
3046 case HCI_AMP:
3047 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3048 break;
3049 default:
3050 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003052
Sasha Levin3df92b32012-05-27 22:36:56 +02003053 if (id < 0)
3054 return id;
3055
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 sprintf(hdev->name, "hci%d", id);
3057 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003058
3059 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3060
Kees Cookd8537542013-07-03 15:04:57 -07003061 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3062 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003063 if (!hdev->workqueue) {
3064 error = -ENOMEM;
3065 goto err;
3066 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003067
Kees Cookd8537542013-07-03 15:04:57 -07003068 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3069 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003070 if (!hdev->req_workqueue) {
3071 destroy_workqueue(hdev->workqueue);
3072 error = -ENOMEM;
3073 goto err;
3074 }
3075
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003076 if (!IS_ERR_OR_NULL(bt_debugfs))
3077 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3078
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003079 dev_set_name(&hdev->dev, "%s", hdev->name);
3080
3081 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003082 if (error < 0)
3083 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003085 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003086 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3087 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003088 if (hdev->rfkill) {
3089 if (rfkill_register(hdev->rfkill) < 0) {
3090 rfkill_destroy(hdev->rfkill);
3091 hdev->rfkill = NULL;
3092 }
3093 }
3094
Johan Hedberg5e130362013-09-13 08:58:17 +03003095 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3096 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3097
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003098 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003099 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003100
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003101 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003102 /* Assume BR/EDR support until proven otherwise (such as
3103 * through reading supported features during init.
3104 */
3105 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3106 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003107
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003108 write_lock(&hci_dev_list_lock);
3109 list_add(&hdev->list, &hci_dev_list);
3110 write_unlock(&hci_dev_list_lock);
3111
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003113 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114
Johan Hedberg19202572013-01-14 22:33:51 +02003115 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003116
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003118
David Herrmann33ca9542011-10-08 14:58:49 +02003119err_wqueue:
3120 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003121 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003122err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003123 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003124
David Herrmann33ca9542011-10-08 14:58:49 +02003125 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126}
3127EXPORT_SYMBOL(hci_register_dev);
3128
3129/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003130void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131{
Sasha Levin3df92b32012-05-27 22:36:56 +02003132 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003133
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003134 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135
Johan Hovold94324962012-03-15 14:48:41 +01003136 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3137
Sasha Levin3df92b32012-05-27 22:36:56 +02003138 id = hdev->id;
3139
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003140 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003142 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
3144 hci_dev_do_close(hdev);
3145
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303146 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003147 kfree_skb(hdev->reassembly[i]);
3148
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003149 cancel_work_sync(&hdev->power_on);
3150
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003151 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003152 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003153 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003154 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003155 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003156 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003157
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003158 /* mgmt_index_removed should take care of emptying the
3159 * pending list */
3160 BUG_ON(!list_empty(&hdev->mgmt_pending));
3161
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 hci_notify(hdev, HCI_DEV_UNREG);
3163
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003164 if (hdev->rfkill) {
3165 rfkill_unregister(hdev->rfkill);
3166 rfkill_destroy(hdev->rfkill);
3167 }
3168
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003169 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003170
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003171 debugfs_remove_recursive(hdev->debugfs);
3172
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003173 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003174 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003175
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003176 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003177 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003178 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003179 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003180 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003181 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003182 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003183
David Herrmanndc946bd2012-01-07 15:47:24 +01003184 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003185
3186 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187}
3188EXPORT_SYMBOL(hci_unregister_dev);
3189
3190/* Suspend HCI device */
3191int hci_suspend_dev(struct hci_dev *hdev)
3192{
3193 hci_notify(hdev, HCI_DEV_SUSPEND);
3194 return 0;
3195}
3196EXPORT_SYMBOL(hci_suspend_dev);
3197
3198/* Resume HCI device */
3199int hci_resume_dev(struct hci_dev *hdev)
3200{
3201 hci_notify(hdev, HCI_DEV_RESUME);
3202 return 0;
3203}
3204EXPORT_SYMBOL(hci_resume_dev);
3205
Marcel Holtmann76bca882009-11-18 00:40:39 +01003206/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003207int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003208{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003209 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003210 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003211 kfree_skb(skb);
3212 return -ENXIO;
3213 }
3214
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003215 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003216 bt_cb(skb)->incoming = 1;
3217
3218 /* Time stamp */
3219 __net_timestamp(skb);
3220
Marcel Holtmann76bca882009-11-18 00:40:39 +01003221 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003222 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003223
Marcel Holtmann76bca882009-11-18 00:40:39 +01003224 return 0;
3225}
3226EXPORT_SYMBOL(hci_recv_frame);
3227
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303228static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003229 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303230{
3231 int len = 0;
3232 int hlen = 0;
3233 int remain = count;
3234 struct sk_buff *skb;
3235 struct bt_skb_cb *scb;
3236
3237 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003238 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303239 return -EILSEQ;
3240
3241 skb = hdev->reassembly[index];
3242
3243 if (!skb) {
3244 switch (type) {
3245 case HCI_ACLDATA_PKT:
3246 len = HCI_MAX_FRAME_SIZE;
3247 hlen = HCI_ACL_HDR_SIZE;
3248 break;
3249 case HCI_EVENT_PKT:
3250 len = HCI_MAX_EVENT_SIZE;
3251 hlen = HCI_EVENT_HDR_SIZE;
3252 break;
3253 case HCI_SCODATA_PKT:
3254 len = HCI_MAX_SCO_SIZE;
3255 hlen = HCI_SCO_HDR_SIZE;
3256 break;
3257 }
3258
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003259 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303260 if (!skb)
3261 return -ENOMEM;
3262
3263 scb = (void *) skb->cb;
3264 scb->expect = hlen;
3265 scb->pkt_type = type;
3266
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303267 hdev->reassembly[index] = skb;
3268 }
3269
3270 while (count) {
3271 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003272 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303273
3274 memcpy(skb_put(skb, len), data, len);
3275
3276 count -= len;
3277 data += len;
3278 scb->expect -= len;
3279 remain = count;
3280
3281 switch (type) {
3282 case HCI_EVENT_PKT:
3283 if (skb->len == HCI_EVENT_HDR_SIZE) {
3284 struct hci_event_hdr *h = hci_event_hdr(skb);
3285 scb->expect = h->plen;
3286
3287 if (skb_tailroom(skb) < scb->expect) {
3288 kfree_skb(skb);
3289 hdev->reassembly[index] = NULL;
3290 return -ENOMEM;
3291 }
3292 }
3293 break;
3294
3295 case HCI_ACLDATA_PKT:
3296 if (skb->len == HCI_ACL_HDR_SIZE) {
3297 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3298 scb->expect = __le16_to_cpu(h->dlen);
3299
3300 if (skb_tailroom(skb) < scb->expect) {
3301 kfree_skb(skb);
3302 hdev->reassembly[index] = NULL;
3303 return -ENOMEM;
3304 }
3305 }
3306 break;
3307
3308 case HCI_SCODATA_PKT:
3309 if (skb->len == HCI_SCO_HDR_SIZE) {
3310 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3311 scb->expect = h->dlen;
3312
3313 if (skb_tailroom(skb) < scb->expect) {
3314 kfree_skb(skb);
3315 hdev->reassembly[index] = NULL;
3316 return -ENOMEM;
3317 }
3318 }
3319 break;
3320 }
3321
3322 if (scb->expect == 0) {
3323 /* Complete frame */
3324
3325 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003326 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303327
3328 hdev->reassembly[index] = NULL;
3329 return remain;
3330 }
3331 }
3332
3333 return remain;
3334}
3335
Marcel Holtmannef222012007-07-11 06:42:04 +02003336int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3337{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303338 int rem = 0;
3339
Marcel Holtmannef222012007-07-11 06:42:04 +02003340 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3341 return -EILSEQ;
3342
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003343 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003344 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303345 if (rem < 0)
3346 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003347
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303348 data += (count - rem);
3349 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003350 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003351
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303352 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003353}
3354EXPORT_SYMBOL(hci_recv_fragment);
3355
Suraj Sumangala99811512010-07-14 13:02:19 +05303356#define STREAM_REASSEMBLY 0
3357
3358int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3359{
3360 int type;
3361 int rem = 0;
3362
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003363 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303364 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3365
3366 if (!skb) {
3367 struct { char type; } *pkt;
3368
3369 /* Start of the frame */
3370 pkt = data;
3371 type = pkt->type;
3372
3373 data++;
3374 count--;
3375 } else
3376 type = bt_cb(skb)->pkt_type;
3377
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003378 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003379 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303380 if (rem < 0)
3381 return rem;
3382
3383 data += (count - rem);
3384 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003385 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303386
3387 return rem;
3388}
3389EXPORT_SYMBOL(hci_recv_stream_fragment);
3390
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391/* ---- Interface to upper protocols ---- */
3392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393int hci_register_cb(struct hci_cb *cb)
3394{
3395 BT_DBG("%p name %s", cb, cb->name);
3396
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003397 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003399 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400
3401 return 0;
3402}
3403EXPORT_SYMBOL(hci_register_cb);
3404
3405int hci_unregister_cb(struct hci_cb *cb)
3406{
3407 BT_DBG("%p name %s", cb, cb->name);
3408
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003409 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003411 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413 return 0;
3414}
3415EXPORT_SYMBOL(hci_unregister_cb);
3416
Marcel Holtmann51086992013-10-10 14:54:19 -07003417static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003419 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003421 /* Time stamp */
3422 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003424 /* Send copy to monitor */
3425 hci_send_to_monitor(hdev, skb);
3426
3427 if (atomic_read(&hdev->promisc)) {
3428 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003429 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 }
3431
3432 /* Get rid of skb owner, prior to sending to the driver. */
3433 skb_orphan(skb);
3434
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003435 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003436 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437}
3438
Johan Hedberg3119ae92013-03-05 20:37:44 +02003439void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3440{
3441 skb_queue_head_init(&req->cmd_q);
3442 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003443 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003444}
3445
3446int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3447{
3448 struct hci_dev *hdev = req->hdev;
3449 struct sk_buff *skb;
3450 unsigned long flags;
3451
3452 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3453
Andre Guedes5d73e032013-03-08 11:20:16 -03003454 /* If an error occured during request building, remove all HCI
3455 * commands queued on the HCI request queue.
3456 */
3457 if (req->err) {
3458 skb_queue_purge(&req->cmd_q);
3459 return req->err;
3460 }
3461
Johan Hedberg3119ae92013-03-05 20:37:44 +02003462 /* Do not allow empty requests */
3463 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003464 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003465
3466 skb = skb_peek_tail(&req->cmd_q);
3467 bt_cb(skb)->req.complete = complete;
3468
3469 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3470 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3471 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3472
3473 queue_work(hdev->workqueue, &hdev->cmd_work);
3474
3475 return 0;
3476}
3477
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003478static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003479 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480{
3481 int len = HCI_COMMAND_HDR_SIZE + plen;
3482 struct hci_command_hdr *hdr;
3483 struct sk_buff *skb;
3484
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003486 if (!skb)
3487 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
3489 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003490 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 hdr->plen = plen;
3492
3493 if (plen)
3494 memcpy(skb_put(skb, plen), param, plen);
3495
3496 BT_DBG("skb len %d", skb->len);
3497
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003498 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003499
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003500 return skb;
3501}
3502
3503/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003504int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3505 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003506{
3507 struct sk_buff *skb;
3508
3509 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3510
3511 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3512 if (!skb) {
3513 BT_ERR("%s no memory for command", hdev->name);
3514 return -ENOMEM;
3515 }
3516
Johan Hedberg11714b32013-03-05 20:37:47 +02003517 /* Stand-alone HCI commands must be flaged as
3518 * single-command requests.
3519 */
3520 bt_cb(skb)->req.start = true;
3521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003523 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524
3525 return 0;
3526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
Johan Hedberg71c76a12013-03-05 20:37:46 +02003528/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003529void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3530 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003531{
3532 struct hci_dev *hdev = req->hdev;
3533 struct sk_buff *skb;
3534
3535 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3536
Andre Guedes34739c12013-03-08 11:20:18 -03003537 /* If an error occured during request building, there is no point in
3538 * queueing the HCI command. We can simply return.
3539 */
3540 if (req->err)
3541 return;
3542
Johan Hedberg71c76a12013-03-05 20:37:46 +02003543 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3544 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003545 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3546 hdev->name, opcode);
3547 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003548 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003549 }
3550
3551 if (skb_queue_empty(&req->cmd_q))
3552 bt_cb(skb)->req.start = true;
3553
Johan Hedberg02350a72013-04-03 21:50:29 +03003554 bt_cb(skb)->req.event = event;
3555
Johan Hedberg71c76a12013-03-05 20:37:46 +02003556 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003557}
3558
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003559void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3560 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003561{
3562 hci_req_add_ev(req, opcode, plen, param, 0);
3563}
3564
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003566void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
3568 struct hci_command_hdr *hdr;
3569
3570 if (!hdev->sent_cmd)
3571 return NULL;
3572
3573 hdr = (void *) hdev->sent_cmd->data;
3574
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003575 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 return NULL;
3577
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003578 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579
3580 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3581}
3582
3583/* Send ACL data */
3584static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3585{
3586 struct hci_acl_hdr *hdr;
3587 int len = skb->len;
3588
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003589 skb_push(skb, HCI_ACL_HDR_SIZE);
3590 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003591 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003592 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3593 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594}
3595
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003596static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003597 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003599 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 struct hci_dev *hdev = conn->hdev;
3601 struct sk_buff *list;
3602
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003603 skb->len = skb_headlen(skb);
3604 skb->data_len = 0;
3605
3606 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003607
3608 switch (hdev->dev_type) {
3609 case HCI_BREDR:
3610 hci_add_acl_hdr(skb, conn->handle, flags);
3611 break;
3612 case HCI_AMP:
3613 hci_add_acl_hdr(skb, chan->handle, flags);
3614 break;
3615 default:
3616 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3617 return;
3618 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003619
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003620 list = skb_shinfo(skb)->frag_list;
3621 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 /* Non fragmented */
3623 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3624
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003625 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 } else {
3627 /* Fragmented */
3628 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3629
3630 skb_shinfo(skb)->frag_list = NULL;
3631
3632 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003633 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003635 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003636
3637 flags &= ~ACL_START;
3638 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 do {
3640 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003641
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003642 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003643 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644
3645 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3646
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003647 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 } while (list);
3649
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003650 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003652}
3653
3654void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3655{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003656 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003657
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003658 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003659
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003660 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003662 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664
3665/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003666void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667{
3668 struct hci_dev *hdev = conn->hdev;
3669 struct hci_sco_hdr hdr;
3670
3671 BT_DBG("%s len %d", hdev->name, skb->len);
3672
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003673 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 hdr.dlen = skb->len;
3675
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003676 skb_push(skb, HCI_SCO_HDR_SIZE);
3677 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003678 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003680 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003681
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003683 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685
3686/* ---- HCI TX task (outgoing data) ---- */
3687
3688/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003689static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3690 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691{
3692 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003693 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003694 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003696 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003698
3699 rcu_read_lock();
3700
3701 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003702 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003704
3705 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3706 continue;
3707
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 num++;
3709
3710 if (c->sent < min) {
3711 min = c->sent;
3712 conn = c;
3713 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003714
3715 if (hci_conn_num(hdev, type) == num)
3716 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 }
3718
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003719 rcu_read_unlock();
3720
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003722 int cnt, q;
3723
3724 switch (conn->type) {
3725 case ACL_LINK:
3726 cnt = hdev->acl_cnt;
3727 break;
3728 case SCO_LINK:
3729 case ESCO_LINK:
3730 cnt = hdev->sco_cnt;
3731 break;
3732 case LE_LINK:
3733 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3734 break;
3735 default:
3736 cnt = 0;
3737 BT_ERR("Unknown link type");
3738 }
3739
3740 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 *quote = q ? q : 1;
3742 } else
3743 *quote = 0;
3744
3745 BT_DBG("conn %p quote %d", conn, *quote);
3746 return conn;
3747}
3748
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003749static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750{
3751 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003752 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
Ville Tervobae1f5d92011-02-10 22:38:53 -03003754 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003756 rcu_read_lock();
3757
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003759 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003760 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003761 BT_ERR("%s killing stalled connection %pMR",
3762 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003763 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 }
3765 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003766
3767 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768}
3769
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003770static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3771 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003772{
3773 struct hci_conn_hash *h = &hdev->conn_hash;
3774 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003775 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776 struct hci_conn *conn;
3777 int cnt, q, conn_num = 0;
3778
3779 BT_DBG("%s", hdev->name);
3780
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003781 rcu_read_lock();
3782
3783 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003784 struct hci_chan *tmp;
3785
3786 if (conn->type != type)
3787 continue;
3788
3789 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3790 continue;
3791
3792 conn_num++;
3793
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003794 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003795 struct sk_buff *skb;
3796
3797 if (skb_queue_empty(&tmp->data_q))
3798 continue;
3799
3800 skb = skb_peek(&tmp->data_q);
3801 if (skb->priority < cur_prio)
3802 continue;
3803
3804 if (skb->priority > cur_prio) {
3805 num = 0;
3806 min = ~0;
3807 cur_prio = skb->priority;
3808 }
3809
3810 num++;
3811
3812 if (conn->sent < min) {
3813 min = conn->sent;
3814 chan = tmp;
3815 }
3816 }
3817
3818 if (hci_conn_num(hdev, type) == conn_num)
3819 break;
3820 }
3821
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003822 rcu_read_unlock();
3823
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003824 if (!chan)
3825 return NULL;
3826
3827 switch (chan->conn->type) {
3828 case ACL_LINK:
3829 cnt = hdev->acl_cnt;
3830 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003831 case AMP_LINK:
3832 cnt = hdev->block_cnt;
3833 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003834 case SCO_LINK:
3835 case ESCO_LINK:
3836 cnt = hdev->sco_cnt;
3837 break;
3838 case LE_LINK:
3839 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3840 break;
3841 default:
3842 cnt = 0;
3843 BT_ERR("Unknown link type");
3844 }
3845
3846 q = cnt / num;
3847 *quote = q ? q : 1;
3848 BT_DBG("chan %p quote %d", chan, *quote);
3849 return chan;
3850}
3851
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003852static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3853{
3854 struct hci_conn_hash *h = &hdev->conn_hash;
3855 struct hci_conn *conn;
3856 int num = 0;
3857
3858 BT_DBG("%s", hdev->name);
3859
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003860 rcu_read_lock();
3861
3862 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003863 struct hci_chan *chan;
3864
3865 if (conn->type != type)
3866 continue;
3867
3868 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3869 continue;
3870
3871 num++;
3872
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003873 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003874 struct sk_buff *skb;
3875
3876 if (chan->sent) {
3877 chan->sent = 0;
3878 continue;
3879 }
3880
3881 if (skb_queue_empty(&chan->data_q))
3882 continue;
3883
3884 skb = skb_peek(&chan->data_q);
3885 if (skb->priority >= HCI_PRIO_MAX - 1)
3886 continue;
3887
3888 skb->priority = HCI_PRIO_MAX - 1;
3889
3890 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003891 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003892 }
3893
3894 if (hci_conn_num(hdev, type) == num)
3895 break;
3896 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003897
3898 rcu_read_unlock();
3899
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003900}
3901
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003902static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3903{
3904 /* Calculate count of blocks used by this packet */
3905 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3906}
3907
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003908static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 if (!test_bit(HCI_RAW, &hdev->flags)) {
3911 /* ACL tx timeout must be longer than maximum
3912 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003913 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003914 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003915 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003917}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003919static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003920{
3921 unsigned int cnt = hdev->acl_cnt;
3922 struct hci_chan *chan;
3923 struct sk_buff *skb;
3924 int quote;
3925
3926 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003927
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003928 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003929 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003930 u32 priority = (skb_peek(&chan->data_q))->priority;
3931 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003932 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003933 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003934
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003935 /* Stop if priority has changed */
3936 if (skb->priority < priority)
3937 break;
3938
3939 skb = skb_dequeue(&chan->data_q);
3940
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003941 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003942 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003943
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003944 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 hdev->acl_last_tx = jiffies;
3946
3947 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003948 chan->sent++;
3949 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 }
3951 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003952
3953 if (cnt != hdev->acl_cnt)
3954 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955}
3956
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003957static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003958{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003959 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003960 struct hci_chan *chan;
3961 struct sk_buff *skb;
3962 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003963 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003964
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003965 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003966
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003967 BT_DBG("%s", hdev->name);
3968
3969 if (hdev->dev_type == HCI_AMP)
3970 type = AMP_LINK;
3971 else
3972 type = ACL_LINK;
3973
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003974 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003975 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003976 u32 priority = (skb_peek(&chan->data_q))->priority;
3977 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3978 int blocks;
3979
3980 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003981 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003982
3983 /* Stop if priority has changed */
3984 if (skb->priority < priority)
3985 break;
3986
3987 skb = skb_dequeue(&chan->data_q);
3988
3989 blocks = __get_blocks(hdev, skb);
3990 if (blocks > hdev->block_cnt)
3991 return;
3992
3993 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003994 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003995
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003996 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003997 hdev->acl_last_tx = jiffies;
3998
3999 hdev->block_cnt -= blocks;
4000 quote -= blocks;
4001
4002 chan->sent += blocks;
4003 chan->conn->sent += blocks;
4004 }
4005 }
4006
4007 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004008 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004009}
4010
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004011static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004012{
4013 BT_DBG("%s", hdev->name);
4014
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004015 /* No ACL link over BR/EDR controller */
4016 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4017 return;
4018
4019 /* No AMP link over AMP controller */
4020 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004021 return;
4022
4023 switch (hdev->flow_ctl_mode) {
4024 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4025 hci_sched_acl_pkt(hdev);
4026 break;
4027
4028 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4029 hci_sched_acl_blk(hdev);
4030 break;
4031 }
4032}
4033
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004035static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036{
4037 struct hci_conn *conn;
4038 struct sk_buff *skb;
4039 int quote;
4040
4041 BT_DBG("%s", hdev->name);
4042
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004043 if (!hci_conn_num(hdev, SCO_LINK))
4044 return;
4045
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4047 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4048 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004049 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050
4051 conn->sent++;
4052 if (conn->sent == ~0)
4053 conn->sent = 0;
4054 }
4055 }
4056}
4057
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004058static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004059{
4060 struct hci_conn *conn;
4061 struct sk_buff *skb;
4062 int quote;
4063
4064 BT_DBG("%s", hdev->name);
4065
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004066 if (!hci_conn_num(hdev, ESCO_LINK))
4067 return;
4068
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004069 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4070 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004071 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4072 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004073 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004074
4075 conn->sent++;
4076 if (conn->sent == ~0)
4077 conn->sent = 0;
4078 }
4079 }
4080}
4081
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004082static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004083{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004084 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004085 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004086 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004087
4088 BT_DBG("%s", hdev->name);
4089
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004090 if (!hci_conn_num(hdev, LE_LINK))
4091 return;
4092
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004093 if (!test_bit(HCI_RAW, &hdev->flags)) {
4094 /* LE tx timeout must be longer than maximum
4095 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004096 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004097 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004098 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004099 }
4100
4101 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004102 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004103 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004104 u32 priority = (skb_peek(&chan->data_q))->priority;
4105 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004106 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004107 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004108
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004109 /* Stop if priority has changed */
4110 if (skb->priority < priority)
4111 break;
4112
4113 skb = skb_dequeue(&chan->data_q);
4114
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004115 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004116 hdev->le_last_tx = jiffies;
4117
4118 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004119 chan->sent++;
4120 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004121 }
4122 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004123
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004124 if (hdev->le_pkts)
4125 hdev->le_cnt = cnt;
4126 else
4127 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004128
4129 if (cnt != tmp)
4130 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004131}
4132
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004133static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004135 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 struct sk_buff *skb;
4137
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004138 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004139 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
Marcel Holtmann52de5992013-09-03 18:08:38 -07004141 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4142 /* Schedule queues and send stuff to HCI driver */
4143 hci_sched_acl(hdev);
4144 hci_sched_sco(hdev);
4145 hci_sched_esco(hdev);
4146 hci_sched_le(hdev);
4147 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 /* Send next queued raw (unknown type) packet */
4150 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004151 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152}
4153
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004154/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155
4156/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004157static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158{
4159 struct hci_acl_hdr *hdr = (void *) skb->data;
4160 struct hci_conn *conn;
4161 __u16 handle, flags;
4162
4163 skb_pull(skb, HCI_ACL_HDR_SIZE);
4164
4165 handle = __le16_to_cpu(hdr->handle);
4166 flags = hci_flags(handle);
4167 handle = hci_handle(handle);
4168
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004169 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004170 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171
4172 hdev->stat.acl_rx++;
4173
4174 hci_dev_lock(hdev);
4175 conn = hci_conn_hash_lookup_handle(hdev, handle);
4176 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004177
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004179 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004182 l2cap_recv_acldata(conn, skb, flags);
4183 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004185 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004186 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 }
4188
4189 kfree_skb(skb);
4190}
4191
4192/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004193static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194{
4195 struct hci_sco_hdr *hdr = (void *) skb->data;
4196 struct hci_conn *conn;
4197 __u16 handle;
4198
4199 skb_pull(skb, HCI_SCO_HDR_SIZE);
4200
4201 handle = __le16_to_cpu(hdr->handle);
4202
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004203 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204
4205 hdev->stat.sco_rx++;
4206
4207 hci_dev_lock(hdev);
4208 conn = hci_conn_hash_lookup_handle(hdev, handle);
4209 hci_dev_unlock(hdev);
4210
4211 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004213 sco_recv_scodata(conn, skb);
4214 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004216 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004217 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 }
4219
4220 kfree_skb(skb);
4221}
4222
Johan Hedberg9238f362013-03-05 20:37:48 +02004223static bool hci_req_is_complete(struct hci_dev *hdev)
4224{
4225 struct sk_buff *skb;
4226
4227 skb = skb_peek(&hdev->cmd_q);
4228 if (!skb)
4229 return true;
4230
4231 return bt_cb(skb)->req.start;
4232}
4233
Johan Hedberg42c6b122013-03-05 20:37:49 +02004234static void hci_resend_last(struct hci_dev *hdev)
4235{
4236 struct hci_command_hdr *sent;
4237 struct sk_buff *skb;
4238 u16 opcode;
4239
4240 if (!hdev->sent_cmd)
4241 return;
4242
4243 sent = (void *) hdev->sent_cmd->data;
4244 opcode = __le16_to_cpu(sent->opcode);
4245 if (opcode == HCI_OP_RESET)
4246 return;
4247
4248 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4249 if (!skb)
4250 return;
4251
4252 skb_queue_head(&hdev->cmd_q, skb);
4253 queue_work(hdev->workqueue, &hdev->cmd_work);
4254}
4255
Johan Hedberg9238f362013-03-05 20:37:48 +02004256void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4257{
4258 hci_req_complete_t req_complete = NULL;
4259 struct sk_buff *skb;
4260 unsigned long flags;
4261
4262 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4263
Johan Hedberg42c6b122013-03-05 20:37:49 +02004264 /* If the completed command doesn't match the last one that was
4265 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004266 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004267 if (!hci_sent_cmd_data(hdev, opcode)) {
4268 /* Some CSR based controllers generate a spontaneous
4269 * reset complete event during init and any pending
4270 * command will never be completed. In such a case we
4271 * need to resend whatever was the last sent
4272 * command.
4273 */
4274 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4275 hci_resend_last(hdev);
4276
Johan Hedberg9238f362013-03-05 20:37:48 +02004277 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004278 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004279
4280 /* If the command succeeded and there's still more commands in
4281 * this request the request is not yet complete.
4282 */
4283 if (!status && !hci_req_is_complete(hdev))
4284 return;
4285
4286 /* If this was the last command in a request the complete
4287 * callback would be found in hdev->sent_cmd instead of the
4288 * command queue (hdev->cmd_q).
4289 */
4290 if (hdev->sent_cmd) {
4291 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004292
4293 if (req_complete) {
4294 /* We must set the complete callback to NULL to
4295 * avoid calling the callback more than once if
4296 * this function gets called again.
4297 */
4298 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4299
Johan Hedberg9238f362013-03-05 20:37:48 +02004300 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004301 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004302 }
4303
4304 /* Remove all pending commands belonging to this request */
4305 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4306 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4307 if (bt_cb(skb)->req.start) {
4308 __skb_queue_head(&hdev->cmd_q, skb);
4309 break;
4310 }
4311
4312 req_complete = bt_cb(skb)->req.complete;
4313 kfree_skb(skb);
4314 }
4315 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4316
4317call_complete:
4318 if (req_complete)
4319 req_complete(hdev, status);
4320}
4321
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004322static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004324 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 struct sk_buff *skb;
4326
4327 BT_DBG("%s", hdev->name);
4328
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004330 /* Send copy to monitor */
4331 hci_send_to_monitor(hdev, skb);
4332
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 if (atomic_read(&hdev->promisc)) {
4334 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004335 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 }
4337
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004338 if (test_bit(HCI_RAW, &hdev->flags) ||
4339 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 kfree_skb(skb);
4341 continue;
4342 }
4343
4344 if (test_bit(HCI_INIT, &hdev->flags)) {
4345 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004346 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 case HCI_ACLDATA_PKT:
4348 case HCI_SCODATA_PKT:
4349 kfree_skb(skb);
4350 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 }
4353
4354 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004355 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004357 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 hci_event_packet(hdev, skb);
4359 break;
4360
4361 case HCI_ACLDATA_PKT:
4362 BT_DBG("%s ACL data packet", hdev->name);
4363 hci_acldata_packet(hdev, skb);
4364 break;
4365
4366 case HCI_SCODATA_PKT:
4367 BT_DBG("%s SCO data packet", hdev->name);
4368 hci_scodata_packet(hdev, skb);
4369 break;
4370
4371 default:
4372 kfree_skb(skb);
4373 break;
4374 }
4375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376}
4377
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004378static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004380 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 struct sk_buff *skb;
4382
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004383 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4384 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004387 if (atomic_read(&hdev->cmd_cnt)) {
4388 skb = skb_dequeue(&hdev->cmd_q);
4389 if (!skb)
4390 return;
4391
Wei Yongjun7585b972009-02-25 18:29:52 +08004392 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004394 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004395 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004397 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004398 if (test_bit(HCI_RESET, &hdev->flags))
4399 del_timer(&hdev->cmd_timer);
4400 else
4401 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004402 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403 } else {
4404 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004405 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 }
4407 }
4408}