blob: d370b432aea642f517aa631227ce10b8cd070bcc [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800418static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
419 size_t count, loff_t *ppos)
420{
421 struct hci_dev *hdev = file->private_data;
422 char buf[3];
423
424 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
425 buf[1] = '\n';
426 buf[2] = '\0';
427 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
428}
429
430static ssize_t force_sc_support_write(struct file *file,
431 const char __user *user_buf,
432 size_t count, loff_t *ppos)
433{
434 struct hci_dev *hdev = file->private_data;
435 char buf[32];
436 size_t buf_size = min(count, (sizeof(buf)-1));
437 bool enable;
438
439 if (test_bit(HCI_UP, &hdev->flags))
440 return -EBUSY;
441
442 if (copy_from_user(buf, user_buf, buf_size))
443 return -EFAULT;
444
445 buf[buf_size] = '\0';
446 if (strtobool(buf, &enable))
447 return -EINVAL;
448
449 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
450 return -EALREADY;
451
452 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
453
454 return count;
455}
456
457static const struct file_operations force_sc_support_fops = {
458 .open = simple_open,
459 .read = force_sc_support_read,
460 .write = force_sc_support_write,
461 .llseek = default_llseek,
462};
463
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800464static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
465 size_t count, loff_t *ppos)
466{
467 struct hci_dev *hdev = file->private_data;
468 char buf[3];
469
470 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
471 buf[1] = '\n';
472 buf[2] = '\0';
473 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
474}
475
476static const struct file_operations sc_only_mode_fops = {
477 .open = simple_open,
478 .read = sc_only_mode_read,
479 .llseek = default_llseek,
480};
481
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700482static int idle_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 if (val != 0 && (val < 500 || val > 3600000))
487 return -EINVAL;
488
489 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700490 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700491 hci_dev_unlock(hdev);
492
493 return 0;
494}
495
496static int idle_timeout_get(void *data, u64 *val)
497{
498 struct hci_dev *hdev = data;
499
500 hci_dev_lock(hdev);
501 *val = hdev->idle_timeout;
502 hci_dev_unlock(hdev);
503
504 return 0;
505}
506
507DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
508 idle_timeout_set, "%llu\n");
509
510static int sniff_min_interval_set(void *data, u64 val)
511{
512 struct hci_dev *hdev = data;
513
514 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
515 return -EINVAL;
516
517 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700518 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700519 hci_dev_unlock(hdev);
520
521 return 0;
522}
523
524static int sniff_min_interval_get(void *data, u64 *val)
525{
526 struct hci_dev *hdev = data;
527
528 hci_dev_lock(hdev);
529 *val = hdev->sniff_min_interval;
530 hci_dev_unlock(hdev);
531
532 return 0;
533}
534
535DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
536 sniff_min_interval_set, "%llu\n");
537
538static int sniff_max_interval_set(void *data, u64 val)
539{
540 struct hci_dev *hdev = data;
541
542 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
543 return -EINVAL;
544
545 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700546 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552static int sniff_max_interval_get(void *data, u64 *val)
553{
554 struct hci_dev *hdev = data;
555
556 hci_dev_lock(hdev);
557 *val = hdev->sniff_max_interval;
558 hci_dev_unlock(hdev);
559
560 return 0;
561}
562
563DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
564 sniff_max_interval_set, "%llu\n");
565
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700566static int static_address_show(struct seq_file *f, void *p)
567{
568 struct hci_dev *hdev = f->private;
569
570 hci_dev_lock(hdev);
571 seq_printf(f, "%pMR\n", &hdev->static_addr);
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int static_address_open(struct inode *inode, struct file *file)
578{
579 return single_open(file, static_address_show, inode->i_private);
580}
581
582static const struct file_operations static_address_fops = {
583 .open = static_address_open,
584 .read = seq_read,
585 .llseek = seq_lseek,
586 .release = single_release,
587};
588
Marcel Holtmann92202182013-10-18 16:38:10 -0700589static int own_address_type_set(void *data, u64 val)
590{
591 struct hci_dev *hdev = data;
592
593 if (val != 0 && val != 1)
594 return -EINVAL;
595
596 hci_dev_lock(hdev);
597 hdev->own_addr_type = val;
598 hci_dev_unlock(hdev);
599
600 return 0;
601}
602
603static int own_address_type_get(void *data, u64 *val)
604{
605 struct hci_dev *hdev = data;
606
607 hci_dev_lock(hdev);
608 *val = hdev->own_addr_type;
609 hci_dev_unlock(hdev);
610
611 return 0;
612}
613
614DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
615 own_address_type_set, "%llu\n");
616
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700617static int long_term_keys_show(struct seq_file *f, void *ptr)
618{
619 struct hci_dev *hdev = f->private;
620 struct list_head *p, *n;
621
622 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800623 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700624 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800625 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700626 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
627 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
628 8, ltk->rand, 16, ltk->val);
629 }
630 hci_dev_unlock(hdev);
631
632 return 0;
633}
634
635static int long_term_keys_open(struct inode *inode, struct file *file)
636{
637 return single_open(file, long_term_keys_show, inode->i_private);
638}
639
640static const struct file_operations long_term_keys_fops = {
641 .open = long_term_keys_open,
642 .read = seq_read,
643 .llseek = seq_lseek,
644 .release = single_release,
645};
646
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700647static int conn_min_interval_set(void *data, u64 val)
648{
649 struct hci_dev *hdev = data;
650
651 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
652 return -EINVAL;
653
654 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700655 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700656 hci_dev_unlock(hdev);
657
658 return 0;
659}
660
661static int conn_min_interval_get(void *data, u64 *val)
662{
663 struct hci_dev *hdev = data;
664
665 hci_dev_lock(hdev);
666 *val = hdev->le_conn_min_interval;
667 hci_dev_unlock(hdev);
668
669 return 0;
670}
671
672DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
673 conn_min_interval_set, "%llu\n");
674
675static int conn_max_interval_set(void *data, u64 val)
676{
677 struct hci_dev *hdev = data;
678
679 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
680 return -EINVAL;
681
682 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700683 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int conn_max_interval_get(void *data, u64 *val)
690{
691 struct hci_dev *hdev = data;
692
693 hci_dev_lock(hdev);
694 *val = hdev->le_conn_max_interval;
695 hci_dev_unlock(hdev);
696
697 return 0;
698}
699
700DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
701 conn_max_interval_set, "%llu\n");
702
Jukka Rissanen89863102013-12-11 17:05:38 +0200703static ssize_t lowpan_read(struct file *file, char __user *user_buf,
704 size_t count, loff_t *ppos)
705{
706 struct hci_dev *hdev = file->private_data;
707 char buf[3];
708
709 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
710 buf[1] = '\n';
711 buf[2] = '\0';
712 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
713}
714
715static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
716 size_t count, loff_t *position)
717{
718 struct hci_dev *hdev = fp->private_data;
719 bool enable;
720 char buf[32];
721 size_t buf_size = min(count, (sizeof(buf)-1));
722
723 if (copy_from_user(buf, user_buffer, buf_size))
724 return -EFAULT;
725
726 buf[buf_size] = '\0';
727
728 if (strtobool(buf, &enable) < 0)
729 return -EINVAL;
730
731 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
732 return -EALREADY;
733
734 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
735
736 return count;
737}
738
739static const struct file_operations lowpan_debugfs_fops = {
740 .open = simple_open,
741 .read = lowpan_read,
742 .write = lowpan_write,
743 .llseek = default_llseek,
744};
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746/* ---- HCI requests ---- */
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 if (hdev->req_status == HCI_REQ_PEND) {
753 hdev->req_result = result;
754 hdev->req_status = HCI_REQ_DONE;
755 wake_up_interruptible(&hdev->req_wait_q);
756 }
757}
758
759static void hci_req_cancel(struct hci_dev *hdev, int err)
760{
761 BT_DBG("%s err 0x%2.2x", hdev->name, err);
762
763 if (hdev->req_status == HCI_REQ_PEND) {
764 hdev->req_result = err;
765 hdev->req_status = HCI_REQ_CANCELED;
766 wake_up_interruptible(&hdev->req_wait_q);
767 }
768}
769
Fengguang Wu77a63e02013-04-20 16:24:31 +0300770static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
771 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300772{
773 struct hci_ev_cmd_complete *ev;
774 struct hci_event_hdr *hdr;
775 struct sk_buff *skb;
776
777 hci_dev_lock(hdev);
778
779 skb = hdev->recv_evt;
780 hdev->recv_evt = NULL;
781
782 hci_dev_unlock(hdev);
783
784 if (!skb)
785 return ERR_PTR(-ENODATA);
786
787 if (skb->len < sizeof(*hdr)) {
788 BT_ERR("Too short HCI event");
789 goto failed;
790 }
791
792 hdr = (void *) skb->data;
793 skb_pull(skb, HCI_EVENT_HDR_SIZE);
794
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300795 if (event) {
796 if (hdr->evt != event)
797 goto failed;
798 return skb;
799 }
800
Johan Hedberg75e84b72013-04-02 13:35:04 +0300801 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
802 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
803 goto failed;
804 }
805
806 if (skb->len < sizeof(*ev)) {
807 BT_ERR("Too short cmd_complete event");
808 goto failed;
809 }
810
811 ev = (void *) skb->data;
812 skb_pull(skb, sizeof(*ev));
813
814 if (opcode == __le16_to_cpu(ev->opcode))
815 return skb;
816
817 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
818 __le16_to_cpu(ev->opcode));
819
820failed:
821 kfree_skb(skb);
822 return ERR_PTR(-ENODATA);
823}
824
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300825struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300826 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300827{
828 DECLARE_WAITQUEUE(wait, current);
829 struct hci_request req;
830 int err = 0;
831
832 BT_DBG("%s", hdev->name);
833
834 hci_req_init(&req, hdev);
835
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300836 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300837
838 hdev->req_status = HCI_REQ_PEND;
839
840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0)
842 return ERR_PTR(err);
843
844 add_wait_queue(&hdev->req_wait_q, &wait);
845 set_current_state(TASK_INTERRUPTIBLE);
846
847 schedule_timeout(timeout);
848
849 remove_wait_queue(&hdev->req_wait_q, &wait);
850
851 if (signal_pending(current))
852 return ERR_PTR(-EINTR);
853
854 switch (hdev->req_status) {
855 case HCI_REQ_DONE:
856 err = -bt_to_errno(hdev->req_result);
857 break;
858
859 case HCI_REQ_CANCELED:
860 err = -hdev->req_result;
861 break;
862
863 default:
864 err = -ETIMEDOUT;
865 break;
866 }
867
868 hdev->req_status = hdev->req_result = 0;
869
870 BT_DBG("%s end: err %d", hdev->name, err);
871
872 if (err < 0)
873 return ERR_PTR(err);
874
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300875 return hci_get_cmd_complete(hdev, opcode, event);
876}
877EXPORT_SYMBOL(__hci_cmd_sync_ev);
878
879struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300880 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300881{
882 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300883}
884EXPORT_SYMBOL(__hci_cmd_sync);
885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200887static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 void (*func)(struct hci_request *req,
889 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200890 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200892 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 DECLARE_WAITQUEUE(wait, current);
894 int err = 0;
895
896 BT_DBG("%s start", hdev->name);
897
Johan Hedberg42c6b122013-03-05 20:37:49 +0200898 hci_req_init(&req, hdev);
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 hdev->req_status = HCI_REQ_PEND;
901
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200903
Johan Hedberg42c6b122013-03-05 20:37:49 +0200904 err = hci_req_run(&req, hci_req_sync_complete);
905 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200906 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300907
908 /* ENODATA means the HCI request command queue is empty.
909 * This can happen when a request with conditionals doesn't
910 * trigger any commands to be sent. This is normal behavior
911 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912 */
Andre Guedes920c8302013-03-08 11:20:15 -0300913 if (err == -ENODATA)
914 return 0;
915
916 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200917 }
918
Andre Guedesbc4445c2013-03-08 11:20:13 -0300919 add_wait_queue(&hdev->req_wait_q, &wait);
920 set_current_state(TASK_INTERRUPTIBLE);
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 schedule_timeout(timeout);
923
924 remove_wait_queue(&hdev->req_wait_q, &wait);
925
926 if (signal_pending(current))
927 return -EINTR;
928
929 switch (hdev->req_status) {
930 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700931 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 break;
933
934 case HCI_REQ_CANCELED:
935 err = -hdev->req_result;
936 break;
937
938 default:
939 err = -ETIMEDOUT;
940 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Johan Hedberga5040ef2011-01-10 13:28:59 +0200943 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 BT_DBG("%s end: err %d", hdev->name, err);
946
947 return err;
948}
949
Johan Hedberg01178cd2013-03-05 20:37:41 +0200950static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951 void (*req)(struct hci_request *req,
952 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200953 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 int ret;
956
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200957 if (!test_bit(HCI_UP, &hdev->flags))
958 return -ENETDOWN;
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 /* Serialize all requests */
961 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200962 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 hci_req_unlock(hdev);
964
965 return ret;
966}
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 set_bit(HCI_RESET, &req->hdev->flags);
974 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200979 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200982 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200984 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200986
987 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989}
990
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200992{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200994
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200995 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300997
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700998 /* Read Local Supported Commands */
999 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1000
1001 /* Read Local Supported Features */
1002 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1003
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001004 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001006
1007 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001009
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001010 /* Read Flow Control Mode */
1011 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1012
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001013 /* Read Location Data */
1014 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001015}
1016
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001018{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001019 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001020
1021 BT_DBG("%s %ld", hdev->name, opt);
1022
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001023 /* Reset */
1024 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001026
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001027 switch (hdev->dev_type) {
1028 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001030 break;
1031
1032 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001034 break;
1035
1036 default:
1037 BT_ERR("Unknown device type %d", hdev->dev_type);
1038 break;
1039 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001040}
1041
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001043{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001044 struct hci_dev *hdev = req->hdev;
1045
Johan Hedberg2177bab2013-03-05 20:37:43 +02001046 __le16 param;
1047 __u8 flt_type;
1048
1049 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001050 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001051
1052 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001054
1055 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001057
1058 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001059 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001060
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001061 /* Read Number of Supported IAC */
1062 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1063
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001064 /* Read Current IAC LAP */
1065 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1066
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067 /* Clear Event Filters */
1068 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070
1071 /* Connection accept timeout ~20 secs */
1072 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001073 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001074
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001075 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1076 * but it does not support page scan related HCI commands.
1077 */
1078 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001079 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1080 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1081 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082}
1083
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001086 struct hci_dev *hdev = req->hdev;
1087
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001089 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001090
1091 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001092 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001093
1094 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001096
1097 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001098 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001099
1100 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001101 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001102
1103 /* LE-only controllers have LE implicitly enabled */
1104 if (!lmp_bredr_capable(hdev))
1105 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001106}
1107
1108static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1109{
1110 if (lmp_ext_inq_capable(hdev))
1111 return 0x02;
1112
1113 if (lmp_inq_rssi_capable(hdev))
1114 return 0x01;
1115
1116 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1117 hdev->lmp_subver == 0x0757)
1118 return 0x01;
1119
1120 if (hdev->manufacturer == 15) {
1121 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1122 return 0x01;
1123 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1124 return 0x01;
1125 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1126 return 0x01;
1127 }
1128
1129 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1130 hdev->lmp_subver == 0x1805)
1131 return 0x01;
1132
1133 return 0x00;
1134}
1135
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001137{
1138 u8 mode;
1139
Johan Hedberg42c6b122013-03-05 20:37:49 +02001140 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001141
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001143}
1144
Johan Hedberg42c6b122013-03-05 20:37:49 +02001145static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001146{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001147 struct hci_dev *hdev = req->hdev;
1148
Johan Hedberg2177bab2013-03-05 20:37:43 +02001149 /* The second byte is 0xff instead of 0x9f (two reserved bits
1150 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1151 * command otherwise.
1152 */
1153 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1154
1155 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1156 * any event mask for pre 1.2 devices.
1157 */
1158 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1159 return;
1160
1161 if (lmp_bredr_capable(hdev)) {
1162 events[4] |= 0x01; /* Flow Specification Complete */
1163 events[4] |= 0x02; /* Inquiry Result with RSSI */
1164 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1165 events[5] |= 0x08; /* Synchronous Connection Complete */
1166 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001167 } else {
1168 /* Use a different default for LE-only devices */
1169 memset(events, 0, sizeof(events));
1170 events[0] |= 0x10; /* Disconnection Complete */
1171 events[0] |= 0x80; /* Encryption Change */
1172 events[1] |= 0x08; /* Read Remote Version Information Complete */
1173 events[1] |= 0x20; /* Command Complete */
1174 events[1] |= 0x40; /* Command Status */
1175 events[1] |= 0x80; /* Hardware Error */
1176 events[2] |= 0x04; /* Number of Completed Packets */
1177 events[3] |= 0x02; /* Data Buffer Overflow */
1178 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001179 }
1180
1181 if (lmp_inq_rssi_capable(hdev))
1182 events[4] |= 0x02; /* Inquiry Result with RSSI */
1183
1184 if (lmp_sniffsubr_capable(hdev))
1185 events[5] |= 0x20; /* Sniff Subrating */
1186
1187 if (lmp_pause_enc_capable(hdev))
1188 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1189
1190 if (lmp_ext_inq_capable(hdev))
1191 events[5] |= 0x40; /* Extended Inquiry Result */
1192
1193 if (lmp_no_flush_capable(hdev))
1194 events[7] |= 0x01; /* Enhanced Flush Complete */
1195
1196 if (lmp_lsto_capable(hdev))
1197 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1198
1199 if (lmp_ssp_capable(hdev)) {
1200 events[6] |= 0x01; /* IO Capability Request */
1201 events[6] |= 0x02; /* IO Capability Response */
1202 events[6] |= 0x04; /* User Confirmation Request */
1203 events[6] |= 0x08; /* User Passkey Request */
1204 events[6] |= 0x10; /* Remote OOB Data Request */
1205 events[6] |= 0x20; /* Simple Pairing Complete */
1206 events[7] |= 0x04; /* User Passkey Notification */
1207 events[7] |= 0x08; /* Keypress Notification */
1208 events[7] |= 0x10; /* Remote Host Supported
1209 * Features Notification
1210 */
1211 }
1212
1213 if (lmp_le_capable(hdev))
1214 events[7] |= 0x20; /* LE Meta-Event */
1215
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217
1218 if (lmp_le_capable(hdev)) {
1219 memset(events, 0, sizeof(events));
1220 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1222 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223 }
1224}
1225
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
1229
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001232 else
1233 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001234
1235 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237
Johan Hedberg42c6b122013-03-05 20:37:49 +02001238 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001239
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001240 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1241 * local supported commands HCI command.
1242 */
1243 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001245
1246 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001247 /* When SSP is available, then the host features page
1248 * should also be available as well. However some
1249 * controllers list the max_page as 0 as long as SSP
1250 * has not been enabled. To achieve proper debugging
1251 * output, force the minimum max_page to 1 at least.
1252 */
1253 hdev->max_page = 0x01;
1254
Johan Hedberg2177bab2013-03-05 20:37:43 +02001255 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1256 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1258 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259 } else {
1260 struct hci_cp_write_eir cp;
1261
1262 memset(hdev->eir, 0, sizeof(hdev->eir));
1263 memset(&cp, 0, sizeof(cp));
1264
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 }
1267 }
1268
1269 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 if (lmp_ext_feat_capable(hdev)) {
1276 struct hci_cp_read_local_ext_features cp;
1277
1278 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1280 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001281 }
1282
1283 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1284 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1286 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 }
1288}
1289
Johan Hedberg42c6b122013-03-05 20:37:49 +02001290static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001291{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293 struct hci_cp_write_def_link_policy cp;
1294 u16 link_policy = 0;
1295
1296 if (lmp_rswitch_capable(hdev))
1297 link_policy |= HCI_LP_RSWITCH;
1298 if (lmp_hold_capable(hdev))
1299 link_policy |= HCI_LP_HOLD;
1300 if (lmp_sniff_capable(hdev))
1301 link_policy |= HCI_LP_SNIFF;
1302 if (lmp_park_capable(hdev))
1303 link_policy |= HCI_LP_PARK;
1304
1305 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312 struct hci_cp_write_le_host_supported cp;
1313
Johan Hedbergc73eee92013-04-19 18:35:21 +03001314 /* LE-only devices do not support explicit enablement */
1315 if (!lmp_bredr_capable(hdev))
1316 return;
1317
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318 memset(&cp, 0, sizeof(cp));
1319
1320 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1321 cp.le = 0x01;
1322 cp.simul = lmp_le_br_capable(hdev);
1323 }
1324
1325 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001326 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1327 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328}
1329
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001330static void hci_set_event_mask_page_2(struct hci_request *req)
1331{
1332 struct hci_dev *hdev = req->hdev;
1333 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1334
1335 /* If Connectionless Slave Broadcast master role is supported
1336 * enable all necessary events for it.
1337 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001338 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001339 events[1] |= 0x40; /* Triggered Clock Capture */
1340 events[1] |= 0x80; /* Synchronization Train Complete */
1341 events[2] |= 0x10; /* Slave Page Response Timeout */
1342 events[2] |= 0x20; /* CSB Channel Map Change */
1343 }
1344
1345 /* If Connectionless Slave Broadcast slave role is supported
1346 * enable all necessary events for it.
1347 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001348 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001349 events[2] |= 0x01; /* Synchronization Train Received */
1350 events[2] |= 0x02; /* CSB Receive */
1351 events[2] |= 0x04; /* CSB Timeout */
1352 events[2] |= 0x08; /* Truncated Page Complete */
1353 }
1354
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001355 /* Enable Authenticated Payload Timeout Expired event if supported */
1356 if (lmp_ping_capable(hdev))
1357 events[2] |= 0x80;
1358
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001359 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1360}
1361
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001365 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001367 /* Some Broadcom based Bluetooth controllers do not support the
1368 * Delete Stored Link Key command. They are clearly indicating its
1369 * absence in the bit mask of supported commands.
1370 *
1371 * Check the supported commands and only if the the command is marked
1372 * as supported send it. If not supported assume that the controller
1373 * does not have actual support for stored link keys which makes this
1374 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001375 *
1376 * Some controllers indicate that they support handling deleting
1377 * stored link keys, but they don't. The quirk lets a driver
1378 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001379 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001380 if (hdev->commands[6] & 0x80 &&
1381 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001382 struct hci_cp_delete_stored_link_key cp;
1383
1384 bacpy(&cp.bdaddr, BDADDR_ANY);
1385 cp.delete_all = 0x01;
1386 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1387 sizeof(cp), &cp);
1388 }
1389
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmann79830f62013-10-18 16:38:09 -07001393 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001394 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1395 /* If the controller has a public BD_ADDR, then
1396 * by default use that one. If this is a LE only
1397 * controller without a public address, default
1398 * to the random address.
1399 */
1400 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1401 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1402 else
1403 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1404 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001405
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001407 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001408
1409 /* Read features beyond page 1 if available */
1410 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1411 struct hci_cp_read_local_ext_features cp;
1412
1413 cp.page = p;
1414 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1415 sizeof(cp), &cp);
1416 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417}
1418
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001419static void hci_init4_req(struct hci_request *req, unsigned long opt)
1420{
1421 struct hci_dev *hdev = req->hdev;
1422
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001423 /* Set event mask page 2 if the HCI command for it is supported */
1424 if (hdev->commands[22] & 0x04)
1425 hci_set_event_mask_page_2(req);
1426
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001427 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001428 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001429 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001430
1431 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001432 if ((lmp_sc_capable(hdev) ||
1433 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001434 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1435 u8 support = 0x01;
1436 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1437 sizeof(support), &support);
1438 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001439}
1440
Johan Hedberg2177bab2013-03-05 20:37:43 +02001441static int __hci_init(struct hci_dev *hdev)
1442{
1443 int err;
1444
1445 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1446 if (err < 0)
1447 return err;
1448
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001449 /* The Device Under Test (DUT) mode is special and available for
1450 * all controller types. So just create it early on.
1451 */
1452 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1453 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1454 &dut_mode_fops);
1455 }
1456
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1458 * BR/EDR/LE type controllers. AMP controllers only need the
1459 * first stage init.
1460 */
1461 if (hdev->dev_type != HCI_BREDR)
1462 return 0;
1463
1464 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1465 if (err < 0)
1466 return err;
1467
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001468 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1469 if (err < 0)
1470 return err;
1471
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001472 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1473 if (err < 0)
1474 return err;
1475
1476 /* Only create debugfs entries during the initial setup
1477 * phase and not every time the controller gets powered on.
1478 */
1479 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1480 return 0;
1481
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001482 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1483 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001484 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1485 &hdev->manufacturer);
1486 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1487 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001488 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1489 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001490 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1491
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001492 if (lmp_bredr_capable(hdev)) {
1493 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1494 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001495 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1496 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001497 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1498 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001499 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1500 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001501 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1502 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001503 }
1504
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001505 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001506 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1507 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001508 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1509 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001510 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1511 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001512 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1513 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001514 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001515
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001516 if (lmp_sniff_capable(hdev)) {
1517 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1518 hdev, &idle_timeout_fops);
1519 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1520 hdev, &sniff_min_interval_fops);
1521 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1522 hdev, &sniff_max_interval_fops);
1523 }
1524
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001525 if (lmp_le_capable(hdev)) {
1526 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1527 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001528 debugfs_create_file("static_address", 0444, hdev->debugfs,
1529 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001530 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1531 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001532 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1533 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001534 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1535 hdev, &conn_min_interval_fops);
1536 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1537 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001538 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1539 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001540 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001541
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001542 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 __u8 scan = opt;
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
1551 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
1557 __u8 auth = opt;
1558
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563}
1564
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566{
1567 __u8 encrypt = opt;
1568
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001571 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001572 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001576{
1577 __le16 policy = cpu_to_le16(opt);
1578
Johan Hedberg42c6b122013-03-05 20:37:49 +02001579 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001580
1581 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001583}
1584
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001585/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 * Device is held on return. */
1587struct hci_dev *hci_dev_get(int index)
1588{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001589 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
1591 BT_DBG("%d", index);
1592
1593 if (index < 0)
1594 return NULL;
1595
1596 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001597 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 if (d->id == index) {
1599 hdev = hci_dev_hold(d);
1600 break;
1601 }
1602 }
1603 read_unlock(&hci_dev_list_lock);
1604 return hdev;
1605}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001608
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001609bool hci_discovery_active(struct hci_dev *hdev)
1610{
1611 struct discovery_state *discov = &hdev->discovery;
1612
Andre Guedes6fbe1952012-02-03 17:47:58 -03001613 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001614 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001615 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001616 return true;
1617
Andre Guedes6fbe1952012-02-03 17:47:58 -03001618 default:
1619 return false;
1620 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001621}
1622
Johan Hedbergff9ef572012-01-04 14:23:45 +02001623void hci_discovery_set_state(struct hci_dev *hdev, int state)
1624{
1625 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1626
1627 if (hdev->discovery.state == state)
1628 return;
1629
1630 switch (state) {
1631 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001632 if (hdev->discovery.state != DISCOVERY_STARTING)
1633 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001634 break;
1635 case DISCOVERY_STARTING:
1636 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001637 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001638 mgmt_discovering(hdev, 1);
1639 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001640 case DISCOVERY_RESOLVING:
1641 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001642 case DISCOVERY_STOPPING:
1643 break;
1644 }
1645
1646 hdev->discovery.state = state;
1647}
1648
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001649void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650{
Johan Hedberg30883512012-01-04 14:16:21 +02001651 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001652 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Johan Hedberg561aafb2012-01-04 13:31:59 +02001654 list_for_each_entry_safe(p, n, &cache->all, all) {
1655 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001656 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001658
1659 INIT_LIST_HEAD(&cache->unknown);
1660 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661}
1662
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001663struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1664 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Johan Hedberg30883512012-01-04 14:16:21 +02001666 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 struct inquiry_entry *e;
1668
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001669 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Johan Hedberg561aafb2012-01-04 13:31:59 +02001671 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001673 return e;
1674 }
1675
1676 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677}
1678
Johan Hedberg561aafb2012-01-04 13:31:59 +02001679struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001680 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001681{
Johan Hedberg30883512012-01-04 14:16:21 +02001682 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001686
1687 list_for_each_entry(e, &cache->unknown, list) {
1688 if (!bacmp(&e->data.bdaddr, bdaddr))
1689 return e;
1690 }
1691
1692 return NULL;
1693}
1694
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001695struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001696 bdaddr_t *bdaddr,
1697 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001698{
1699 struct discovery_state *cache = &hdev->discovery;
1700 struct inquiry_entry *e;
1701
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001702 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001703
1704 list_for_each_entry(e, &cache->resolve, list) {
1705 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1706 return e;
1707 if (!bacmp(&e->data.bdaddr, bdaddr))
1708 return e;
1709 }
1710
1711 return NULL;
1712}
1713
Johan Hedberga3d4e202012-01-09 00:53:02 +02001714void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001715 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001716{
1717 struct discovery_state *cache = &hdev->discovery;
1718 struct list_head *pos = &cache->resolve;
1719 struct inquiry_entry *p;
1720
1721 list_del(&ie->list);
1722
1723 list_for_each_entry(p, &cache->resolve, list) {
1724 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001725 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001726 break;
1727 pos = &p->list;
1728 }
1729
1730 list_add(&ie->list, pos);
1731}
1732
Johan Hedberg31754052012-01-04 13:39:52 +02001733bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001734 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
Johan Hedberg30883512012-01-04 14:16:21 +02001736 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001737 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001739 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
Szymon Janc2b2fec42012-11-20 11:38:54 +01001741 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1742
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001743 if (ssp)
1744 *ssp = data->ssp_mode;
1745
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001746 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001747 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001748 if (ie->data.ssp_mode && ssp)
1749 *ssp = true;
1750
Johan Hedberga3d4e202012-01-09 00:53:02 +02001751 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001752 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001753 ie->data.rssi = data->rssi;
1754 hci_inquiry_cache_update_resolve(hdev, ie);
1755 }
1756
Johan Hedberg561aafb2012-01-04 13:31:59 +02001757 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001758 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001759
Johan Hedberg561aafb2012-01-04 13:31:59 +02001760 /* Entry not in the cache. Add new one. */
1761 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1762 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001763 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001764
1765 list_add(&ie->all, &cache->all);
1766
1767 if (name_known) {
1768 ie->name_state = NAME_KNOWN;
1769 } else {
1770 ie->name_state = NAME_NOT_KNOWN;
1771 list_add(&ie->list, &cache->unknown);
1772 }
1773
1774update:
1775 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001776 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001777 ie->name_state = NAME_KNOWN;
1778 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 }
1780
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001781 memcpy(&ie->data, data, sizeof(*data));
1782 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001784
1785 if (ie->name_state == NAME_NOT_KNOWN)
1786 return false;
1787
1788 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789}
1790
1791static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1792{
Johan Hedberg30883512012-01-04 14:16:21 +02001793 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 struct inquiry_info *info = (struct inquiry_info *) buf;
1795 struct inquiry_entry *e;
1796 int copied = 0;
1797
Johan Hedberg561aafb2012-01-04 13:31:59 +02001798 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001800
1801 if (copied >= num)
1802 break;
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 bacpy(&info->bdaddr, &data->bdaddr);
1805 info->pscan_rep_mode = data->pscan_rep_mode;
1806 info->pscan_period_mode = data->pscan_period_mode;
1807 info->pscan_mode = data->pscan_mode;
1808 memcpy(info->dev_class, data->dev_class, 3);
1809 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001810
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001812 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 }
1814
1815 BT_DBG("cache %p, copied %d", cache, copied);
1816 return copied;
1817}
1818
Johan Hedberg42c6b122013-03-05 20:37:49 +02001819static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
1821 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001822 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 struct hci_cp_inquiry cp;
1824
1825 BT_DBG("%s", hdev->name);
1826
1827 if (test_bit(HCI_INQUIRY, &hdev->flags))
1828 return;
1829
1830 /* Start Inquiry */
1831 memcpy(&cp.lap, &ir->lap, 3);
1832 cp.length = ir->length;
1833 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001834 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835}
1836
Andre Guedes3e13fa12013-03-27 20:04:56 -03001837static int wait_inquiry(void *word)
1838{
1839 schedule();
1840 return signal_pending(current);
1841}
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843int hci_inquiry(void __user *arg)
1844{
1845 __u8 __user *ptr = arg;
1846 struct hci_inquiry_req ir;
1847 struct hci_dev *hdev;
1848 int err = 0, do_inquiry = 0, max_rsp;
1849 long timeo;
1850 __u8 *buf;
1851
1852 if (copy_from_user(&ir, ptr, sizeof(ir)))
1853 return -EFAULT;
1854
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001855 hdev = hci_dev_get(ir.dev_id);
1856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 return -ENODEV;
1858
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001859 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1860 err = -EBUSY;
1861 goto done;
1862 }
1863
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001864 if (hdev->dev_type != HCI_BREDR) {
1865 err = -EOPNOTSUPP;
1866 goto done;
1867 }
1868
Johan Hedberg56f87902013-10-02 13:43:13 +03001869 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1870 err = -EOPNOTSUPP;
1871 goto done;
1872 }
1873
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001874 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001875 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001876 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001877 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 do_inquiry = 1;
1879 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001880 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Marcel Holtmann04837f62006-07-03 10:02:33 +02001882 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001883
1884 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001885 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1886 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 if (err < 0)
1888 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001889
1890 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1891 * cleared). If it is interrupted by a signal, return -EINTR.
1892 */
1893 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1894 TASK_INTERRUPTIBLE))
1895 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001898 /* for unlimited number of responses we will use buffer with
1899 * 255 entries
1900 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1902
1903 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1904 * copy it to the user space.
1905 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001906 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001907 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 err = -ENOMEM;
1909 goto done;
1910 }
1911
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001912 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001914 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 BT_DBG("num_rsp %d", ir.num_rsp);
1917
1918 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1919 ptr += sizeof(ir);
1920 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001921 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001923 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 err = -EFAULT;
1925
1926 kfree(buf);
1927
1928done:
1929 hci_dev_put(hdev);
1930 return err;
1931}
1932
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001933static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 int ret = 0;
1936
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 BT_DBG("%s %p", hdev->name, hdev);
1938
1939 hci_req_lock(hdev);
1940
Johan Hovold94324962012-03-15 14:48:41 +01001941 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1942 ret = -ENODEV;
1943 goto done;
1944 }
1945
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001946 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1947 /* Check for rfkill but allow the HCI setup stage to
1948 * proceed (which in itself doesn't cause any RF activity).
1949 */
1950 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1951 ret = -ERFKILL;
1952 goto done;
1953 }
1954
1955 /* Check for valid public address or a configured static
1956 * random adddress, but let the HCI setup proceed to
1957 * be able to determine if there is a public address
1958 * or not.
1959 *
1960 * This check is only valid for BR/EDR controllers
1961 * since AMP controllers do not have an address.
1962 */
1963 if (hdev->dev_type == HCI_BREDR &&
1964 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1965 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1966 ret = -EADDRNOTAVAIL;
1967 goto done;
1968 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001969 }
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 if (test_bit(HCI_UP, &hdev->flags)) {
1972 ret = -EALREADY;
1973 goto done;
1974 }
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 if (hdev->open(hdev)) {
1977 ret = -EIO;
1978 goto done;
1979 }
1980
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 atomic_set(&hdev->cmd_cnt, 1);
1982 set_bit(HCI_INIT, &hdev->flags);
1983
1984 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1985 ret = hdev->setup(hdev);
1986
1987 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001988 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1989 set_bit(HCI_RAW, &hdev->flags);
1990
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001991 if (!test_bit(HCI_RAW, &hdev->flags) &&
1992 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001993 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
1995
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001996 clear_bit(HCI_INIT, &hdev->flags);
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 if (!ret) {
1999 hci_dev_hold(hdev);
2000 set_bit(HCI_UP, &hdev->flags);
2001 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002002 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002003 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002004 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002005 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002006 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002007 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002008 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002009 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002011 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002012 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002013 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 skb_queue_purge(&hdev->cmd_q);
2016 skb_queue_purge(&hdev->rx_q);
2017
2018 if (hdev->flush)
2019 hdev->flush(hdev);
2020
2021 if (hdev->sent_cmd) {
2022 kfree_skb(hdev->sent_cmd);
2023 hdev->sent_cmd = NULL;
2024 }
2025
2026 hdev->close(hdev);
2027 hdev->flags = 0;
2028 }
2029
2030done:
2031 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return ret;
2033}
2034
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002035/* ---- HCI ioctl helpers ---- */
2036
2037int hci_dev_open(__u16 dev)
2038{
2039 struct hci_dev *hdev;
2040 int err;
2041
2042 hdev = hci_dev_get(dev);
2043 if (!hdev)
2044 return -ENODEV;
2045
Johan Hedberge1d08f42013-10-01 22:44:50 +03002046 /* We need to ensure that no other power on/off work is pending
2047 * before proceeding to call hci_dev_do_open. This is
2048 * particularly important if the setup procedure has not yet
2049 * completed.
2050 */
2051 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2052 cancel_delayed_work(&hdev->power_off);
2053
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002054 /* After this call it is guaranteed that the setup procedure
2055 * has finished. This means that error conditions like RFKILL
2056 * or no valid public or static random address apply.
2057 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002058 flush_workqueue(hdev->req_workqueue);
2059
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002060 err = hci_dev_do_open(hdev);
2061
2062 hci_dev_put(hdev);
2063
2064 return err;
2065}
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067static int hci_dev_do_close(struct hci_dev *hdev)
2068{
2069 BT_DBG("%s %p", hdev->name, hdev);
2070
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002071 cancel_delayed_work(&hdev->power_off);
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 hci_req_cancel(hdev, ENODEV);
2074 hci_req_lock(hdev);
2075
2076 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002077 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 hci_req_unlock(hdev);
2079 return 0;
2080 }
2081
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002082 /* Flush RX and TX works */
2083 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002084 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002086 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002087 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002088 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002089 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002090 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002091 }
2092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002093 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002094 cancel_delayed_work(&hdev->service_cache);
2095
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002096 cancel_delayed_work_sync(&hdev->le_scan_disable);
2097
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002098 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002099 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002101 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 hci_notify(hdev, HCI_DEV_DOWN);
2104
2105 if (hdev->flush)
2106 hdev->flush(hdev);
2107
2108 /* Reset device */
2109 skb_queue_purge(&hdev->cmd_q);
2110 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002111 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002112 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002113 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002115 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 clear_bit(HCI_INIT, &hdev->flags);
2117 }
2118
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002119 /* flush cmd work */
2120 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 /* Drop queues */
2123 skb_queue_purge(&hdev->rx_q);
2124 skb_queue_purge(&hdev->cmd_q);
2125 skb_queue_purge(&hdev->raw_q);
2126
2127 /* Drop last sent command */
2128 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002129 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 kfree_skb(hdev->sent_cmd);
2131 hdev->sent_cmd = NULL;
2132 }
2133
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002134 kfree_skb(hdev->recv_evt);
2135 hdev->recv_evt = NULL;
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 /* After this point our queues are empty
2138 * and no tasks are scheduled. */
2139 hdev->close(hdev);
2140
Johan Hedberg35b973c2013-03-15 17:06:59 -05002141 /* Clear flags */
2142 hdev->flags = 0;
2143 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2144
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002145 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2146 if (hdev->dev_type == HCI_BREDR) {
2147 hci_dev_lock(hdev);
2148 mgmt_powered(hdev, 0);
2149 hci_dev_unlock(hdev);
2150 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002151 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002152
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002153 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002154 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002155
Johan Hedberge59fda82012-02-22 18:11:53 +02002156 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002157 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 hci_req_unlock(hdev);
2160
2161 hci_dev_put(hdev);
2162 return 0;
2163}
2164
2165int hci_dev_close(__u16 dev)
2166{
2167 struct hci_dev *hdev;
2168 int err;
2169
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002170 hdev = hci_dev_get(dev);
2171 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002173
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002174 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2175 err = -EBUSY;
2176 goto done;
2177 }
2178
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002179 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2180 cancel_delayed_work(&hdev->power_off);
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002183
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002184done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 hci_dev_put(hdev);
2186 return err;
2187}
2188
2189int hci_dev_reset(__u16 dev)
2190{
2191 struct hci_dev *hdev;
2192 int ret = 0;
2193
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002194 hdev = hci_dev_get(dev);
2195 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 return -ENODEV;
2197
2198 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Marcel Holtmann808a0492013-08-26 20:57:58 -07002200 if (!test_bit(HCI_UP, &hdev->flags)) {
2201 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002205 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2206 ret = -EBUSY;
2207 goto done;
2208 }
2209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 /* Drop queues */
2211 skb_queue_purge(&hdev->rx_q);
2212 skb_queue_purge(&hdev->cmd_q);
2213
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002214 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002215 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002217 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
2219 if (hdev->flush)
2220 hdev->flush(hdev);
2221
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002222 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002223 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002226 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 hci_req_unlock(hdev);
2230 hci_dev_put(hdev);
2231 return ret;
2232}
2233
2234int hci_dev_reset_stat(__u16 dev)
2235{
2236 struct hci_dev *hdev;
2237 int ret = 0;
2238
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002239 hdev = hci_dev_get(dev);
2240 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 return -ENODEV;
2242
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002243 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2244 ret = -EBUSY;
2245 goto done;
2246 }
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2249
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002250done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 return ret;
2253}
2254
2255int hci_dev_cmd(unsigned int cmd, void __user *arg)
2256{
2257 struct hci_dev *hdev;
2258 struct hci_dev_req dr;
2259 int err = 0;
2260
2261 if (copy_from_user(&dr, arg, sizeof(dr)))
2262 return -EFAULT;
2263
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002264 hdev = hci_dev_get(dr.dev_id);
2265 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 return -ENODEV;
2267
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002268 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2269 err = -EBUSY;
2270 goto done;
2271 }
2272
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002273 if (hdev->dev_type != HCI_BREDR) {
2274 err = -EOPNOTSUPP;
2275 goto done;
2276 }
2277
Johan Hedberg56f87902013-10-02 13:43:13 +03002278 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2279 err = -EOPNOTSUPP;
2280 goto done;
2281 }
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 switch (cmd) {
2284 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 break;
2288
2289 case HCISETENCRYPT:
2290 if (!lmp_encrypt_capable(hdev)) {
2291 err = -EOPNOTSUPP;
2292 break;
2293 }
2294
2295 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2296 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002297 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2298 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 if (err)
2300 break;
2301 }
2302
Johan Hedberg01178cd2013-03-05 20:37:41 +02002303 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2304 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 break;
2306
2307 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002308 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2309 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 break;
2311
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002312 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002313 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2314 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002315 break;
2316
2317 case HCISETLINKMODE:
2318 hdev->link_mode = ((__u16) dr.dev_opt) &
2319 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2320 break;
2321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 case HCISETPTYPE:
2323 hdev->pkt_type = (__u16) dr.dev_opt;
2324 break;
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002327 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2328 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 break;
2330
2331 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002332 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2333 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 break;
2335
2336 default:
2337 err = -EINVAL;
2338 break;
2339 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002340
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002341done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 hci_dev_put(hdev);
2343 return err;
2344}
2345
2346int hci_get_dev_list(void __user *arg)
2347{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002348 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 struct hci_dev_list_req *dl;
2350 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 int n = 0, size, err;
2352 __u16 dev_num;
2353
2354 if (get_user(dev_num, (__u16 __user *) arg))
2355 return -EFAULT;
2356
2357 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2358 return -EINVAL;
2359
2360 size = sizeof(*dl) + dev_num * sizeof(*dr);
2361
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002362 dl = kzalloc(size, GFP_KERNEL);
2363 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 return -ENOMEM;
2365
2366 dr = dl->dev_req;
2367
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002368 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002369 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002370 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002371 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002372
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002373 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2374 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002375
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 (dr + n)->dev_id = hdev->id;
2377 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 if (++n >= dev_num)
2380 break;
2381 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002382 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
2384 dl->dev_num = n;
2385 size = sizeof(*dl) + n * sizeof(*dr);
2386
2387 err = copy_to_user(arg, dl, size);
2388 kfree(dl);
2389
2390 return err ? -EFAULT : 0;
2391}
2392
2393int hci_get_dev_info(void __user *arg)
2394{
2395 struct hci_dev *hdev;
2396 struct hci_dev_info di;
2397 int err = 0;
2398
2399 if (copy_from_user(&di, arg, sizeof(di)))
2400 return -EFAULT;
2401
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002402 hdev = hci_dev_get(di.dev_id);
2403 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 return -ENODEV;
2405
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002406 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002407 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002408
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002409 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2410 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 strcpy(di.name, hdev->name);
2413 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002414 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 di.flags = hdev->flags;
2416 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002417 if (lmp_bredr_capable(hdev)) {
2418 di.acl_mtu = hdev->acl_mtu;
2419 di.acl_pkts = hdev->acl_pkts;
2420 di.sco_mtu = hdev->sco_mtu;
2421 di.sco_pkts = hdev->sco_pkts;
2422 } else {
2423 di.acl_mtu = hdev->le_mtu;
2424 di.acl_pkts = hdev->le_pkts;
2425 di.sco_mtu = 0;
2426 di.sco_pkts = 0;
2427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 di.link_policy = hdev->link_policy;
2429 di.link_mode = hdev->link_mode;
2430
2431 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2432 memcpy(&di.features, &hdev->features, sizeof(di.features));
2433
2434 if (copy_to_user(arg, &di, sizeof(di)))
2435 err = -EFAULT;
2436
2437 hci_dev_put(hdev);
2438
2439 return err;
2440}
2441
2442/* ---- Interface to HCI drivers ---- */
2443
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002444static int hci_rfkill_set_block(void *data, bool blocked)
2445{
2446 struct hci_dev *hdev = data;
2447
2448 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2449
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002450 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2451 return -EBUSY;
2452
Johan Hedberg5e130362013-09-13 08:58:17 +03002453 if (blocked) {
2454 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002455 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2456 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002457 } else {
2458 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002459 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002460
2461 return 0;
2462}
2463
2464static const struct rfkill_ops hci_rfkill_ops = {
2465 .set_block = hci_rfkill_set_block,
2466};
2467
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002468static void hci_power_on(struct work_struct *work)
2469{
2470 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002471 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002472
2473 BT_DBG("%s", hdev->name);
2474
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002475 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002476 if (err < 0) {
2477 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002478 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002479 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002480
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002481 /* During the HCI setup phase, a few error conditions are
2482 * ignored and they need to be checked now. If they are still
2483 * valid, it is important to turn the device back off.
2484 */
2485 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2486 (hdev->dev_type == HCI_BREDR &&
2487 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2488 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002489 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2490 hci_dev_do_close(hdev);
2491 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002492 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2493 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002494 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002495
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002496 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002497 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002498}
2499
2500static void hci_power_off(struct work_struct *work)
2501{
Johan Hedberg32435532011-11-07 22:16:04 +02002502 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002503 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002504
2505 BT_DBG("%s", hdev->name);
2506
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002507 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002508}
2509
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002510static void hci_discov_off(struct work_struct *work)
2511{
2512 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002513
2514 hdev = container_of(work, struct hci_dev, discov_off.work);
2515
2516 BT_DBG("%s", hdev->name);
2517
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002518 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002519}
2520
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002521int hci_uuids_clear(struct hci_dev *hdev)
2522{
Johan Hedberg48210022013-01-27 00:31:28 +02002523 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002524
Johan Hedberg48210022013-01-27 00:31:28 +02002525 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2526 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002527 kfree(uuid);
2528 }
2529
2530 return 0;
2531}
2532
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002533int hci_link_keys_clear(struct hci_dev *hdev)
2534{
2535 struct list_head *p, *n;
2536
2537 list_for_each_safe(p, n, &hdev->link_keys) {
2538 struct link_key *key;
2539
2540 key = list_entry(p, struct link_key, list);
2541
2542 list_del(p);
2543 kfree(key);
2544 }
2545
2546 return 0;
2547}
2548
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002549int hci_smp_ltks_clear(struct hci_dev *hdev)
2550{
2551 struct smp_ltk *k, *tmp;
2552
2553 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2554 list_del(&k->list);
2555 kfree(k);
2556 }
2557
2558 return 0;
2559}
2560
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002561struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2562{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002563 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002564
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002565 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002566 if (bacmp(bdaddr, &k->bdaddr) == 0)
2567 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002568
2569 return NULL;
2570}
2571
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302572static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002573 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574{
2575 /* Legacy key */
2576 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Debug keys are insecure so don't store them persistently */
2580 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* Changed combination key and there's no previous one */
2584 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586
2587 /* Security mode 3 case */
2588 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302589 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002590
2591 /* Neither local nor remote side had no-bonding as requirement */
2592 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302593 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002594
2595 /* Local side had dedicated bonding as requirement */
2596 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302597 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002598
2599 /* Remote side had dedicated bonding as requirement */
2600 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302601 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002602
2603 /* If none of the above criteria match, then don't store the key
2604 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302605 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002606}
2607
Johan Hedberg98a0b842014-01-30 19:40:00 -08002608static bool ltk_type_master(u8 type)
2609{
2610 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2611 return true;
2612
2613 return false;
2614}
2615
2616struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2617 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002618{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002619 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002620
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002621 list_for_each_entry(k, &hdev->long_term_keys, list) {
2622 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002623 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002624 continue;
2625
Johan Hedberg98a0b842014-01-30 19:40:00 -08002626 if (ltk_type_master(k->type) != master)
2627 continue;
2628
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002629 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002630 }
2631
2632 return NULL;
2633}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002634
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002635struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002636 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002637{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002638 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002639
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002640 list_for_each_entry(k, &hdev->long_term_keys, list)
2641 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002642 bacmp(bdaddr, &k->bdaddr) == 0 &&
2643 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002644 return k;
2645
2646 return NULL;
2647}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002648
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002649int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002650 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002651{
2652 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302653 u8 old_key_type;
2654 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002655
2656 old_key = hci_find_link_key(hdev, bdaddr);
2657 if (old_key) {
2658 old_key_type = old_key->type;
2659 key = old_key;
2660 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002661 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002662 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2663 if (!key)
2664 return -ENOMEM;
2665 list_add(&key->list, &hdev->link_keys);
2666 }
2667
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002668 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002669
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002670 /* Some buggy controller combinations generate a changed
2671 * combination key for legacy pairing even when there's no
2672 * previous key */
2673 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002674 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002675 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002676 if (conn)
2677 conn->key_type = type;
2678 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002679
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002680 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002681 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002682 key->pin_len = pin_len;
2683
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002684 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002685 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002686 else
2687 key->type = type;
2688
Johan Hedberg4df378a2011-04-28 11:29:03 -07002689 if (!new_key)
2690 return 0;
2691
2692 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2693
Johan Hedberg744cf192011-11-08 20:40:14 +02002694 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002695
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302696 if (conn)
2697 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002698
2699 return 0;
2700}
2701
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002702int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002703 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002704 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002705{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002706 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002707 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002708
Johan Hedberg98a0b842014-01-30 19:40:00 -08002709 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002710 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002711 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002712 else {
2713 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002714 if (!key)
2715 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002716 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002717 }
2718
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002719 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002720 key->bdaddr_type = addr_type;
2721 memcpy(key->val, tk, sizeof(key->val));
2722 key->authenticated = authenticated;
2723 key->ediv = ediv;
2724 key->enc_size = enc_size;
2725 key->type = type;
2726 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002727
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002728 if (!new_key)
2729 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002730
Johan Hedberg21b93b72014-01-30 19:39:58 -08002731 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002732 mgmt_new_ltk(hdev, key, 1);
2733
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002734 return 0;
2735}
2736
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002737int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2738{
2739 struct link_key *key;
2740
2741 key = hci_find_link_key(hdev, bdaddr);
2742 if (!key)
2743 return -ENOENT;
2744
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002745 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002746
2747 list_del(&key->list);
2748 kfree(key);
2749
2750 return 0;
2751}
2752
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002753int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2754{
2755 struct smp_ltk *k, *tmp;
2756
2757 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2758 if (bacmp(bdaddr, &k->bdaddr))
2759 continue;
2760
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002761 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002762
2763 list_del(&k->list);
2764 kfree(k);
2765 }
2766
2767 return 0;
2768}
2769
Ville Tervo6bd32322011-02-16 16:32:41 +02002770/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002771static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002772{
2773 struct hci_dev *hdev = (void *) arg;
2774
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002775 if (hdev->sent_cmd) {
2776 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2777 u16 opcode = __le16_to_cpu(sent->opcode);
2778
2779 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2780 } else {
2781 BT_ERR("%s command tx timeout", hdev->name);
2782 }
2783
Ville Tervo6bd32322011-02-16 16:32:41 +02002784 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002785 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002786}
2787
Szymon Janc2763eda2011-03-22 13:12:22 +01002788struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002789 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002790{
2791 struct oob_data *data;
2792
2793 list_for_each_entry(data, &hdev->remote_oob_data, list)
2794 if (bacmp(bdaddr, &data->bdaddr) == 0)
2795 return data;
2796
2797 return NULL;
2798}
2799
2800int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2801{
2802 struct oob_data *data;
2803
2804 data = hci_find_remote_oob_data(hdev, bdaddr);
2805 if (!data)
2806 return -ENOENT;
2807
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002809
2810 list_del(&data->list);
2811 kfree(data);
2812
2813 return 0;
2814}
2815
2816int hci_remote_oob_data_clear(struct hci_dev *hdev)
2817{
2818 struct oob_data *data, *n;
2819
2820 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2821 list_del(&data->list);
2822 kfree(data);
2823 }
2824
2825 return 0;
2826}
2827
Marcel Holtmann07988722014-01-10 02:07:29 -08002828int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2829 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002830{
2831 struct oob_data *data;
2832
2833 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002834 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002835 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002836 if (!data)
2837 return -ENOMEM;
2838
2839 bacpy(&data->bdaddr, bdaddr);
2840 list_add(&data->list, &hdev->remote_oob_data);
2841 }
2842
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002843 memcpy(data->hash192, hash, sizeof(data->hash192));
2844 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002845
Marcel Holtmann07988722014-01-10 02:07:29 -08002846 memset(data->hash256, 0, sizeof(data->hash256));
2847 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2848
2849 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2850
2851 return 0;
2852}
2853
2854int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2855 u8 *hash192, u8 *randomizer192,
2856 u8 *hash256, u8 *randomizer256)
2857{
2858 struct oob_data *data;
2859
2860 data = hci_find_remote_oob_data(hdev, bdaddr);
2861 if (!data) {
2862 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2863 if (!data)
2864 return -ENOMEM;
2865
2866 bacpy(&data->bdaddr, bdaddr);
2867 list_add(&data->list, &hdev->remote_oob_data);
2868 }
2869
2870 memcpy(data->hash192, hash192, sizeof(data->hash192));
2871 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2872
2873 memcpy(data->hash256, hash256, sizeof(data->hash256));
2874 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2875
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002876 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002877
2878 return 0;
2879}
2880
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002881struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2882 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002883{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002884 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002885
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002886 list_for_each_entry(b, &hdev->blacklist, list) {
2887 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002888 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002889 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002890
2891 return NULL;
2892}
2893
2894int hci_blacklist_clear(struct hci_dev *hdev)
2895{
2896 struct list_head *p, *n;
2897
2898 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002899 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002900
2901 list_del(p);
2902 kfree(b);
2903 }
2904
2905 return 0;
2906}
2907
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002908int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002909{
2910 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002911
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002912 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002913 return -EBADF;
2914
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002915 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002916 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002917
2918 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002919 if (!entry)
2920 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002921
2922 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002923 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002924
2925 list_add(&entry->list, &hdev->blacklist);
2926
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002927 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002928}
2929
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002930int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002931{
2932 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002933
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002934 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002935 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002936
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002937 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002938 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002939 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002940
2941 list_del(&entry->list);
2942 kfree(entry);
2943
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002944 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002945}
2946
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002947static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002948{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002949 if (status) {
2950 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002951
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002952 hci_dev_lock(hdev);
2953 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2954 hci_dev_unlock(hdev);
2955 return;
2956 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002957}
2958
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002959static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002960{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002961 /* General inquiry access code (GIAC) */
2962 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2963 struct hci_request req;
2964 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002965 int err;
2966
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002967 if (status) {
2968 BT_ERR("Failed to disable LE scanning: status %d", status);
2969 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002970 }
2971
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002972 switch (hdev->discovery.type) {
2973 case DISCOV_TYPE_LE:
2974 hci_dev_lock(hdev);
2975 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2976 hci_dev_unlock(hdev);
2977 break;
2978
2979 case DISCOV_TYPE_INTERLEAVED:
2980 hci_req_init(&req, hdev);
2981
2982 memset(&cp, 0, sizeof(cp));
2983 memcpy(&cp.lap, lap, sizeof(cp.lap));
2984 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2985 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2986
2987 hci_dev_lock(hdev);
2988
2989 hci_inquiry_cache_flush(hdev);
2990
2991 err = hci_req_run(&req, inquiry_complete);
2992 if (err) {
2993 BT_ERR("Inquiry request failed: err %d", err);
2994 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2995 }
2996
2997 hci_dev_unlock(hdev);
2998 break;
2999 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003000}
3001
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003002static void le_scan_disable_work(struct work_struct *work)
3003{
3004 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003005 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003006 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003007 struct hci_request req;
3008 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003009
3010 BT_DBG("%s", hdev->name);
3011
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003012 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003013
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003014 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003015 cp.enable = LE_SCAN_DISABLE;
3016 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003017
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003018 err = hci_req_run(&req, le_scan_disable_work_complete);
3019 if (err)
3020 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003021}
3022
David Herrmann9be0dab2012-04-22 14:39:57 +02003023/* Alloc HCI device */
3024struct hci_dev *hci_alloc_dev(void)
3025{
3026 struct hci_dev *hdev;
3027
3028 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3029 if (!hdev)
3030 return NULL;
3031
David Herrmannb1b813d2012-04-22 14:39:58 +02003032 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3033 hdev->esco_type = (ESCO_HV1);
3034 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003035 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3036 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003037 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3038 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003039
David Herrmannb1b813d2012-04-22 14:39:58 +02003040 hdev->sniff_max_interval = 800;
3041 hdev->sniff_min_interval = 80;
3042
Marcel Holtmannbef64732013-10-11 08:23:19 -07003043 hdev->le_scan_interval = 0x0060;
3044 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003045 hdev->le_conn_min_interval = 0x0028;
3046 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003047
David Herrmannb1b813d2012-04-22 14:39:58 +02003048 mutex_init(&hdev->lock);
3049 mutex_init(&hdev->req_lock);
3050
3051 INIT_LIST_HEAD(&hdev->mgmt_pending);
3052 INIT_LIST_HEAD(&hdev->blacklist);
3053 INIT_LIST_HEAD(&hdev->uuids);
3054 INIT_LIST_HEAD(&hdev->link_keys);
3055 INIT_LIST_HEAD(&hdev->long_term_keys);
3056 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003057 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003058
3059 INIT_WORK(&hdev->rx_work, hci_rx_work);
3060 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3061 INIT_WORK(&hdev->tx_work, hci_tx_work);
3062 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003063
David Herrmannb1b813d2012-04-22 14:39:58 +02003064 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3065 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3066 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3067
David Herrmannb1b813d2012-04-22 14:39:58 +02003068 skb_queue_head_init(&hdev->rx_q);
3069 skb_queue_head_init(&hdev->cmd_q);
3070 skb_queue_head_init(&hdev->raw_q);
3071
3072 init_waitqueue_head(&hdev->req_wait_q);
3073
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003074 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003075
David Herrmannb1b813d2012-04-22 14:39:58 +02003076 hci_init_sysfs(hdev);
3077 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003078
3079 return hdev;
3080}
3081EXPORT_SYMBOL(hci_alloc_dev);
3082
3083/* Free HCI device */
3084void hci_free_dev(struct hci_dev *hdev)
3085{
David Herrmann9be0dab2012-04-22 14:39:57 +02003086 /* will free via device release */
3087 put_device(&hdev->dev);
3088}
3089EXPORT_SYMBOL(hci_free_dev);
3090
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091/* Register HCI device */
3092int hci_register_dev(struct hci_dev *hdev)
3093{
David Herrmannb1b813d2012-04-22 14:39:58 +02003094 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
David Herrmann010666a2012-01-07 15:47:07 +01003096 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 return -EINVAL;
3098
Mat Martineau08add512011-11-02 16:18:36 -07003099 /* Do not allow HCI_AMP devices to register at index 0,
3100 * so the index can be used as the AMP controller ID.
3101 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003102 switch (hdev->dev_type) {
3103 case HCI_BREDR:
3104 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3105 break;
3106 case HCI_AMP:
3107 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3108 break;
3109 default:
3110 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003112
Sasha Levin3df92b32012-05-27 22:36:56 +02003113 if (id < 0)
3114 return id;
3115
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 sprintf(hdev->name, "hci%d", id);
3117 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003118
3119 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3120
Kees Cookd8537542013-07-03 15:04:57 -07003121 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3122 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003123 if (!hdev->workqueue) {
3124 error = -ENOMEM;
3125 goto err;
3126 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003127
Kees Cookd8537542013-07-03 15:04:57 -07003128 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3129 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003130 if (!hdev->req_workqueue) {
3131 destroy_workqueue(hdev->workqueue);
3132 error = -ENOMEM;
3133 goto err;
3134 }
3135
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003136 if (!IS_ERR_OR_NULL(bt_debugfs))
3137 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3138
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003139 dev_set_name(&hdev->dev, "%s", hdev->name);
3140
3141 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003142 if (error < 0)
3143 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003145 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003146 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3147 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003148 if (hdev->rfkill) {
3149 if (rfkill_register(hdev->rfkill) < 0) {
3150 rfkill_destroy(hdev->rfkill);
3151 hdev->rfkill = NULL;
3152 }
3153 }
3154
Johan Hedberg5e130362013-09-13 08:58:17 +03003155 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3156 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3157
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003158 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003159 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003160
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003161 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003162 /* Assume BR/EDR support until proven otherwise (such as
3163 * through reading supported features during init.
3164 */
3165 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3166 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003167
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003168 write_lock(&hci_dev_list_lock);
3169 list_add(&hdev->list, &hci_dev_list);
3170 write_unlock(&hci_dev_list_lock);
3171
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003173 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
Johan Hedberg19202572013-01-14 22:33:51 +02003175 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003176
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003178
David Herrmann33ca9542011-10-08 14:58:49 +02003179err_wqueue:
3180 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003181 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003182err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003183 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003184
David Herrmann33ca9542011-10-08 14:58:49 +02003185 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186}
3187EXPORT_SYMBOL(hci_register_dev);
3188
3189/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003190void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191{
Sasha Levin3df92b32012-05-27 22:36:56 +02003192 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003193
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003194 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195
Johan Hovold94324962012-03-15 14:48:41 +01003196 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3197
Sasha Levin3df92b32012-05-27 22:36:56 +02003198 id = hdev->id;
3199
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003200 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003202 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203
3204 hci_dev_do_close(hdev);
3205
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303206 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003207 kfree_skb(hdev->reassembly[i]);
3208
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003209 cancel_work_sync(&hdev->power_on);
3210
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003211 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003212 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003213 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003214 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003215 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003216 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003217
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003218 /* mgmt_index_removed should take care of emptying the
3219 * pending list */
3220 BUG_ON(!list_empty(&hdev->mgmt_pending));
3221
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 hci_notify(hdev, HCI_DEV_UNREG);
3223
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003224 if (hdev->rfkill) {
3225 rfkill_unregister(hdev->rfkill);
3226 rfkill_destroy(hdev->rfkill);
3227 }
3228
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003229 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003230
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003231 debugfs_remove_recursive(hdev->debugfs);
3232
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003233 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003234 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003235
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003236 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003237 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003238 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003239 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003240 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003241 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003242 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003243
David Herrmanndc946bd2012-01-07 15:47:24 +01003244 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003245
3246 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247}
3248EXPORT_SYMBOL(hci_unregister_dev);
3249
3250/* Suspend HCI device */
3251int hci_suspend_dev(struct hci_dev *hdev)
3252{
3253 hci_notify(hdev, HCI_DEV_SUSPEND);
3254 return 0;
3255}
3256EXPORT_SYMBOL(hci_suspend_dev);
3257
3258/* Resume HCI device */
3259int hci_resume_dev(struct hci_dev *hdev)
3260{
3261 hci_notify(hdev, HCI_DEV_RESUME);
3262 return 0;
3263}
3264EXPORT_SYMBOL(hci_resume_dev);
3265
Marcel Holtmann76bca882009-11-18 00:40:39 +01003266/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003267int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003268{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003269 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003270 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003271 kfree_skb(skb);
3272 return -ENXIO;
3273 }
3274
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003275 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003276 bt_cb(skb)->incoming = 1;
3277
3278 /* Time stamp */
3279 __net_timestamp(skb);
3280
Marcel Holtmann76bca882009-11-18 00:40:39 +01003281 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003282 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003283
Marcel Holtmann76bca882009-11-18 00:40:39 +01003284 return 0;
3285}
3286EXPORT_SYMBOL(hci_recv_frame);
3287
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303288static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003289 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303290{
3291 int len = 0;
3292 int hlen = 0;
3293 int remain = count;
3294 struct sk_buff *skb;
3295 struct bt_skb_cb *scb;
3296
3297 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003298 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303299 return -EILSEQ;
3300
3301 skb = hdev->reassembly[index];
3302
3303 if (!skb) {
3304 switch (type) {
3305 case HCI_ACLDATA_PKT:
3306 len = HCI_MAX_FRAME_SIZE;
3307 hlen = HCI_ACL_HDR_SIZE;
3308 break;
3309 case HCI_EVENT_PKT:
3310 len = HCI_MAX_EVENT_SIZE;
3311 hlen = HCI_EVENT_HDR_SIZE;
3312 break;
3313 case HCI_SCODATA_PKT:
3314 len = HCI_MAX_SCO_SIZE;
3315 hlen = HCI_SCO_HDR_SIZE;
3316 break;
3317 }
3318
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003319 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303320 if (!skb)
3321 return -ENOMEM;
3322
3323 scb = (void *) skb->cb;
3324 scb->expect = hlen;
3325 scb->pkt_type = type;
3326
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303327 hdev->reassembly[index] = skb;
3328 }
3329
3330 while (count) {
3331 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003332 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303333
3334 memcpy(skb_put(skb, len), data, len);
3335
3336 count -= len;
3337 data += len;
3338 scb->expect -= len;
3339 remain = count;
3340
3341 switch (type) {
3342 case HCI_EVENT_PKT:
3343 if (skb->len == HCI_EVENT_HDR_SIZE) {
3344 struct hci_event_hdr *h = hci_event_hdr(skb);
3345 scb->expect = h->plen;
3346
3347 if (skb_tailroom(skb) < scb->expect) {
3348 kfree_skb(skb);
3349 hdev->reassembly[index] = NULL;
3350 return -ENOMEM;
3351 }
3352 }
3353 break;
3354
3355 case HCI_ACLDATA_PKT:
3356 if (skb->len == HCI_ACL_HDR_SIZE) {
3357 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3358 scb->expect = __le16_to_cpu(h->dlen);
3359
3360 if (skb_tailroom(skb) < scb->expect) {
3361 kfree_skb(skb);
3362 hdev->reassembly[index] = NULL;
3363 return -ENOMEM;
3364 }
3365 }
3366 break;
3367
3368 case HCI_SCODATA_PKT:
3369 if (skb->len == HCI_SCO_HDR_SIZE) {
3370 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3371 scb->expect = h->dlen;
3372
3373 if (skb_tailroom(skb) < scb->expect) {
3374 kfree_skb(skb);
3375 hdev->reassembly[index] = NULL;
3376 return -ENOMEM;
3377 }
3378 }
3379 break;
3380 }
3381
3382 if (scb->expect == 0) {
3383 /* Complete frame */
3384
3385 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003386 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303387
3388 hdev->reassembly[index] = NULL;
3389 return remain;
3390 }
3391 }
3392
3393 return remain;
3394}
3395
Marcel Holtmannef222012007-07-11 06:42:04 +02003396int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3397{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303398 int rem = 0;
3399
Marcel Holtmannef222012007-07-11 06:42:04 +02003400 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3401 return -EILSEQ;
3402
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003403 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003404 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303405 if (rem < 0)
3406 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003407
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303408 data += (count - rem);
3409 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003410 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003411
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303412 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003413}
3414EXPORT_SYMBOL(hci_recv_fragment);
3415
Suraj Sumangala99811512010-07-14 13:02:19 +05303416#define STREAM_REASSEMBLY 0
3417
3418int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3419{
3420 int type;
3421 int rem = 0;
3422
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003423 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303424 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3425
3426 if (!skb) {
3427 struct { char type; } *pkt;
3428
3429 /* Start of the frame */
3430 pkt = data;
3431 type = pkt->type;
3432
3433 data++;
3434 count--;
3435 } else
3436 type = bt_cb(skb)->pkt_type;
3437
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003438 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003439 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303440 if (rem < 0)
3441 return rem;
3442
3443 data += (count - rem);
3444 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003445 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303446
3447 return rem;
3448}
3449EXPORT_SYMBOL(hci_recv_stream_fragment);
3450
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451/* ---- Interface to upper protocols ---- */
3452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453int hci_register_cb(struct hci_cb *cb)
3454{
3455 BT_DBG("%p name %s", cb, cb->name);
3456
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003457 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003459 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
3461 return 0;
3462}
3463EXPORT_SYMBOL(hci_register_cb);
3464
3465int hci_unregister_cb(struct hci_cb *cb)
3466{
3467 BT_DBG("%p name %s", cb, cb->name);
3468
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003469 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003471 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
3473 return 0;
3474}
3475EXPORT_SYMBOL(hci_unregister_cb);
3476
Marcel Holtmann51086992013-10-10 14:54:19 -07003477static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003479 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003481 /* Time stamp */
3482 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003484 /* Send copy to monitor */
3485 hci_send_to_monitor(hdev, skb);
3486
3487 if (atomic_read(&hdev->promisc)) {
3488 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003489 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 }
3491
3492 /* Get rid of skb owner, prior to sending to the driver. */
3493 skb_orphan(skb);
3494
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003495 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003496 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497}
3498
Johan Hedberg3119ae92013-03-05 20:37:44 +02003499void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3500{
3501 skb_queue_head_init(&req->cmd_q);
3502 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003503 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003504}
3505
3506int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3507{
3508 struct hci_dev *hdev = req->hdev;
3509 struct sk_buff *skb;
3510 unsigned long flags;
3511
3512 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3513
Andre Guedes5d73e032013-03-08 11:20:16 -03003514 /* If an error occured during request building, remove all HCI
3515 * commands queued on the HCI request queue.
3516 */
3517 if (req->err) {
3518 skb_queue_purge(&req->cmd_q);
3519 return req->err;
3520 }
3521
Johan Hedberg3119ae92013-03-05 20:37:44 +02003522 /* Do not allow empty requests */
3523 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003524 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003525
3526 skb = skb_peek_tail(&req->cmd_q);
3527 bt_cb(skb)->req.complete = complete;
3528
3529 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3530 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3531 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3532
3533 queue_work(hdev->workqueue, &hdev->cmd_work);
3534
3535 return 0;
3536}
3537
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003538static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003539 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540{
3541 int len = HCI_COMMAND_HDR_SIZE + plen;
3542 struct hci_command_hdr *hdr;
3543 struct sk_buff *skb;
3544
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003546 if (!skb)
3547 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
3549 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003550 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 hdr->plen = plen;
3552
3553 if (plen)
3554 memcpy(skb_put(skb, plen), param, plen);
3555
3556 BT_DBG("skb len %d", skb->len);
3557
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003558 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003559
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003560 return skb;
3561}
3562
3563/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003564int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3565 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003566{
3567 struct sk_buff *skb;
3568
3569 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3570
3571 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3572 if (!skb) {
3573 BT_ERR("%s no memory for command", hdev->name);
3574 return -ENOMEM;
3575 }
3576
Johan Hedberg11714b32013-03-05 20:37:47 +02003577 /* Stand-alone HCI commands must be flaged as
3578 * single-command requests.
3579 */
3580 bt_cb(skb)->req.start = true;
3581
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003583 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
3585 return 0;
3586}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587
Johan Hedberg71c76a12013-03-05 20:37:46 +02003588/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003589void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3590 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003591{
3592 struct hci_dev *hdev = req->hdev;
3593 struct sk_buff *skb;
3594
3595 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3596
Andre Guedes34739c12013-03-08 11:20:18 -03003597 /* If an error occured during request building, there is no point in
3598 * queueing the HCI command. We can simply return.
3599 */
3600 if (req->err)
3601 return;
3602
Johan Hedberg71c76a12013-03-05 20:37:46 +02003603 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3604 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003605 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3606 hdev->name, opcode);
3607 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003608 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003609 }
3610
3611 if (skb_queue_empty(&req->cmd_q))
3612 bt_cb(skb)->req.start = true;
3613
Johan Hedberg02350a72013-04-03 21:50:29 +03003614 bt_cb(skb)->req.event = event;
3615
Johan Hedberg71c76a12013-03-05 20:37:46 +02003616 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003617}
3618
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003619void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3620 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003621{
3622 hci_req_add_ev(req, opcode, plen, param, 0);
3623}
3624
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003626void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627{
3628 struct hci_command_hdr *hdr;
3629
3630 if (!hdev->sent_cmd)
3631 return NULL;
3632
3633 hdr = (void *) hdev->sent_cmd->data;
3634
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003635 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 return NULL;
3637
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003638 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639
3640 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3641}
3642
3643/* Send ACL data */
3644static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3645{
3646 struct hci_acl_hdr *hdr;
3647 int len = skb->len;
3648
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003649 skb_push(skb, HCI_ACL_HDR_SIZE);
3650 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003651 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003652 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3653 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654}
3655
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003656static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003657 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003659 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 struct hci_dev *hdev = conn->hdev;
3661 struct sk_buff *list;
3662
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003663 skb->len = skb_headlen(skb);
3664 skb->data_len = 0;
3665
3666 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003667
3668 switch (hdev->dev_type) {
3669 case HCI_BREDR:
3670 hci_add_acl_hdr(skb, conn->handle, flags);
3671 break;
3672 case HCI_AMP:
3673 hci_add_acl_hdr(skb, chan->handle, flags);
3674 break;
3675 default:
3676 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3677 return;
3678 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003679
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003680 list = skb_shinfo(skb)->frag_list;
3681 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 /* Non fragmented */
3683 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3684
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003685 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 } else {
3687 /* Fragmented */
3688 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3689
3690 skb_shinfo(skb)->frag_list = NULL;
3691
3692 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003693 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003695 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003696
3697 flags &= ~ACL_START;
3698 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 do {
3700 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003701
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003702 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003703 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
3705 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3706
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003707 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 } while (list);
3709
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003710 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003712}
3713
3714void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3715{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003716 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003717
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003718 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003719
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003720 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003722 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
3725/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003726void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
3728 struct hci_dev *hdev = conn->hdev;
3729 struct hci_sco_hdr hdr;
3730
3731 BT_DBG("%s len %d", hdev->name, skb->len);
3732
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003733 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 hdr.dlen = skb->len;
3735
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003736 skb_push(skb, HCI_SCO_HDR_SIZE);
3737 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003738 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003740 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003741
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003743 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745
3746/* ---- HCI TX task (outgoing data) ---- */
3747
3748/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003749static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3750 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751{
3752 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003753 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003754 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003756 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003758
3759 rcu_read_lock();
3760
3761 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003762 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003764
3765 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3766 continue;
3767
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 num++;
3769
3770 if (c->sent < min) {
3771 min = c->sent;
3772 conn = c;
3773 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003774
3775 if (hci_conn_num(hdev, type) == num)
3776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 }
3778
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003779 rcu_read_unlock();
3780
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003782 int cnt, q;
3783
3784 switch (conn->type) {
3785 case ACL_LINK:
3786 cnt = hdev->acl_cnt;
3787 break;
3788 case SCO_LINK:
3789 case ESCO_LINK:
3790 cnt = hdev->sco_cnt;
3791 break;
3792 case LE_LINK:
3793 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3794 break;
3795 default:
3796 cnt = 0;
3797 BT_ERR("Unknown link type");
3798 }
3799
3800 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 *quote = q ? q : 1;
3802 } else
3803 *quote = 0;
3804
3805 BT_DBG("conn %p quote %d", conn, *quote);
3806 return conn;
3807}
3808
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003809static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810{
3811 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003812 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813
Ville Tervobae1f5d92011-02-10 22:38:53 -03003814 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003816 rcu_read_lock();
3817
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003819 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003820 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003821 BT_ERR("%s killing stalled connection %pMR",
3822 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003823 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 }
3825 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003826
3827 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828}
3829
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003830static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3831 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003832{
3833 struct hci_conn_hash *h = &hdev->conn_hash;
3834 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003835 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003836 struct hci_conn *conn;
3837 int cnt, q, conn_num = 0;
3838
3839 BT_DBG("%s", hdev->name);
3840
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003841 rcu_read_lock();
3842
3843 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003844 struct hci_chan *tmp;
3845
3846 if (conn->type != type)
3847 continue;
3848
3849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3850 continue;
3851
3852 conn_num++;
3853
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003854 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003855 struct sk_buff *skb;
3856
3857 if (skb_queue_empty(&tmp->data_q))
3858 continue;
3859
3860 skb = skb_peek(&tmp->data_q);
3861 if (skb->priority < cur_prio)
3862 continue;
3863
3864 if (skb->priority > cur_prio) {
3865 num = 0;
3866 min = ~0;
3867 cur_prio = skb->priority;
3868 }
3869
3870 num++;
3871
3872 if (conn->sent < min) {
3873 min = conn->sent;
3874 chan = tmp;
3875 }
3876 }
3877
3878 if (hci_conn_num(hdev, type) == conn_num)
3879 break;
3880 }
3881
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003882 rcu_read_unlock();
3883
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003884 if (!chan)
3885 return NULL;
3886
3887 switch (chan->conn->type) {
3888 case ACL_LINK:
3889 cnt = hdev->acl_cnt;
3890 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003891 case AMP_LINK:
3892 cnt = hdev->block_cnt;
3893 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003894 case SCO_LINK:
3895 case ESCO_LINK:
3896 cnt = hdev->sco_cnt;
3897 break;
3898 case LE_LINK:
3899 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3900 break;
3901 default:
3902 cnt = 0;
3903 BT_ERR("Unknown link type");
3904 }
3905
3906 q = cnt / num;
3907 *quote = q ? q : 1;
3908 BT_DBG("chan %p quote %d", chan, *quote);
3909 return chan;
3910}
3911
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003912static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3913{
3914 struct hci_conn_hash *h = &hdev->conn_hash;
3915 struct hci_conn *conn;
3916 int num = 0;
3917
3918 BT_DBG("%s", hdev->name);
3919
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003920 rcu_read_lock();
3921
3922 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003923 struct hci_chan *chan;
3924
3925 if (conn->type != type)
3926 continue;
3927
3928 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3929 continue;
3930
3931 num++;
3932
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003933 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003934 struct sk_buff *skb;
3935
3936 if (chan->sent) {
3937 chan->sent = 0;
3938 continue;
3939 }
3940
3941 if (skb_queue_empty(&chan->data_q))
3942 continue;
3943
3944 skb = skb_peek(&chan->data_q);
3945 if (skb->priority >= HCI_PRIO_MAX - 1)
3946 continue;
3947
3948 skb->priority = HCI_PRIO_MAX - 1;
3949
3950 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003951 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003952 }
3953
3954 if (hci_conn_num(hdev, type) == num)
3955 break;
3956 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003957
3958 rcu_read_unlock();
3959
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003960}
3961
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003962static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3963{
3964 /* Calculate count of blocks used by this packet */
3965 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3966}
3967
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003968static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 if (!test_bit(HCI_RAW, &hdev->flags)) {
3971 /* ACL tx timeout must be longer than maximum
3972 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003973 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003974 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003975 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003979static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003980{
3981 unsigned int cnt = hdev->acl_cnt;
3982 struct hci_chan *chan;
3983 struct sk_buff *skb;
3984 int quote;
3985
3986 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003987
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003988 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003989 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003990 u32 priority = (skb_peek(&chan->data_q))->priority;
3991 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003992 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003993 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003994
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003995 /* Stop if priority has changed */
3996 if (skb->priority < priority)
3997 break;
3998
3999 skb = skb_dequeue(&chan->data_q);
4000
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004001 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004002 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004003
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004004 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 hdev->acl_last_tx = jiffies;
4006
4007 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004008 chan->sent++;
4009 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 }
4011 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004012
4013 if (cnt != hdev->acl_cnt)
4014 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015}
4016
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004017static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004018{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004019 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004020 struct hci_chan *chan;
4021 struct sk_buff *skb;
4022 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004023 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004024
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004025 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004026
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004027 BT_DBG("%s", hdev->name);
4028
4029 if (hdev->dev_type == HCI_AMP)
4030 type = AMP_LINK;
4031 else
4032 type = ACL_LINK;
4033
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004034 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004035 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004036 u32 priority = (skb_peek(&chan->data_q))->priority;
4037 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4038 int blocks;
4039
4040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004041 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004042
4043 /* Stop if priority has changed */
4044 if (skb->priority < priority)
4045 break;
4046
4047 skb = skb_dequeue(&chan->data_q);
4048
4049 blocks = __get_blocks(hdev, skb);
4050 if (blocks > hdev->block_cnt)
4051 return;
4052
4053 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004054 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004055
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004056 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004057 hdev->acl_last_tx = jiffies;
4058
4059 hdev->block_cnt -= blocks;
4060 quote -= blocks;
4061
4062 chan->sent += blocks;
4063 chan->conn->sent += blocks;
4064 }
4065 }
4066
4067 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004068 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004069}
4070
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004071static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004072{
4073 BT_DBG("%s", hdev->name);
4074
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004075 /* No ACL link over BR/EDR controller */
4076 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4077 return;
4078
4079 /* No AMP link over AMP controller */
4080 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004081 return;
4082
4083 switch (hdev->flow_ctl_mode) {
4084 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4085 hci_sched_acl_pkt(hdev);
4086 break;
4087
4088 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4089 hci_sched_acl_blk(hdev);
4090 break;
4091 }
4092}
4093
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004095static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096{
4097 struct hci_conn *conn;
4098 struct sk_buff *skb;
4099 int quote;
4100
4101 BT_DBG("%s", hdev->name);
4102
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004103 if (!hci_conn_num(hdev, SCO_LINK))
4104 return;
4105
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4107 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4108 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004109 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
4111 conn->sent++;
4112 if (conn->sent == ~0)
4113 conn->sent = 0;
4114 }
4115 }
4116}
4117
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004118static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004119{
4120 struct hci_conn *conn;
4121 struct sk_buff *skb;
4122 int quote;
4123
4124 BT_DBG("%s", hdev->name);
4125
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004126 if (!hci_conn_num(hdev, ESCO_LINK))
4127 return;
4128
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004129 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4130 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004131 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4132 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004133 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004134
4135 conn->sent++;
4136 if (conn->sent == ~0)
4137 conn->sent = 0;
4138 }
4139 }
4140}
4141
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004142static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004143{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004144 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004145 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004146 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004147
4148 BT_DBG("%s", hdev->name);
4149
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004150 if (!hci_conn_num(hdev, LE_LINK))
4151 return;
4152
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004153 if (!test_bit(HCI_RAW, &hdev->flags)) {
4154 /* LE tx timeout must be longer than maximum
4155 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004156 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004157 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004158 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004159 }
4160
4161 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004162 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004163 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004164 u32 priority = (skb_peek(&chan->data_q))->priority;
4165 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004166 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004167 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004168
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004169 /* Stop if priority has changed */
4170 if (skb->priority < priority)
4171 break;
4172
4173 skb = skb_dequeue(&chan->data_q);
4174
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004175 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004176 hdev->le_last_tx = jiffies;
4177
4178 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004179 chan->sent++;
4180 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004181 }
4182 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004183
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004184 if (hdev->le_pkts)
4185 hdev->le_cnt = cnt;
4186 else
4187 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004188
4189 if (cnt != tmp)
4190 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004191}
4192
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004193static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004195 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 struct sk_buff *skb;
4197
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004198 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004199 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
Marcel Holtmann52de5992013-09-03 18:08:38 -07004201 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4202 /* Schedule queues and send stuff to HCI driver */
4203 hci_sched_acl(hdev);
4204 hci_sched_sco(hdev);
4205 hci_sched_esco(hdev);
4206 hci_sched_le(hdev);
4207 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004208
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 /* Send next queued raw (unknown type) packet */
4210 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004211 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212}
4213
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004214/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
4216/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004217static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218{
4219 struct hci_acl_hdr *hdr = (void *) skb->data;
4220 struct hci_conn *conn;
4221 __u16 handle, flags;
4222
4223 skb_pull(skb, HCI_ACL_HDR_SIZE);
4224
4225 handle = __le16_to_cpu(hdr->handle);
4226 flags = hci_flags(handle);
4227 handle = hci_handle(handle);
4228
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004229 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004230 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231
4232 hdev->stat.acl_rx++;
4233
4234 hci_dev_lock(hdev);
4235 conn = hci_conn_hash_lookup_handle(hdev, handle);
4236 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004237
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004239 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004240
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004242 l2cap_recv_acldata(conn, skb, flags);
4243 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004245 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004246 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 }
4248
4249 kfree_skb(skb);
4250}
4251
4252/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004253static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254{
4255 struct hci_sco_hdr *hdr = (void *) skb->data;
4256 struct hci_conn *conn;
4257 __u16 handle;
4258
4259 skb_pull(skb, HCI_SCO_HDR_SIZE);
4260
4261 handle = __le16_to_cpu(hdr->handle);
4262
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004263 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264
4265 hdev->stat.sco_rx++;
4266
4267 hci_dev_lock(hdev);
4268 conn = hci_conn_hash_lookup_handle(hdev, handle);
4269 hci_dev_unlock(hdev);
4270
4271 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004273 sco_recv_scodata(conn, skb);
4274 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004276 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004277 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 }
4279
4280 kfree_skb(skb);
4281}
4282
Johan Hedberg9238f362013-03-05 20:37:48 +02004283static bool hci_req_is_complete(struct hci_dev *hdev)
4284{
4285 struct sk_buff *skb;
4286
4287 skb = skb_peek(&hdev->cmd_q);
4288 if (!skb)
4289 return true;
4290
4291 return bt_cb(skb)->req.start;
4292}
4293
Johan Hedberg42c6b122013-03-05 20:37:49 +02004294static void hci_resend_last(struct hci_dev *hdev)
4295{
4296 struct hci_command_hdr *sent;
4297 struct sk_buff *skb;
4298 u16 opcode;
4299
4300 if (!hdev->sent_cmd)
4301 return;
4302
4303 sent = (void *) hdev->sent_cmd->data;
4304 opcode = __le16_to_cpu(sent->opcode);
4305 if (opcode == HCI_OP_RESET)
4306 return;
4307
4308 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4309 if (!skb)
4310 return;
4311
4312 skb_queue_head(&hdev->cmd_q, skb);
4313 queue_work(hdev->workqueue, &hdev->cmd_work);
4314}
4315
Johan Hedberg9238f362013-03-05 20:37:48 +02004316void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4317{
4318 hci_req_complete_t req_complete = NULL;
4319 struct sk_buff *skb;
4320 unsigned long flags;
4321
4322 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4323
Johan Hedberg42c6b122013-03-05 20:37:49 +02004324 /* If the completed command doesn't match the last one that was
4325 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004326 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004327 if (!hci_sent_cmd_data(hdev, opcode)) {
4328 /* Some CSR based controllers generate a spontaneous
4329 * reset complete event during init and any pending
4330 * command will never be completed. In such a case we
4331 * need to resend whatever was the last sent
4332 * command.
4333 */
4334 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4335 hci_resend_last(hdev);
4336
Johan Hedberg9238f362013-03-05 20:37:48 +02004337 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004338 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004339
4340 /* If the command succeeded and there's still more commands in
4341 * this request the request is not yet complete.
4342 */
4343 if (!status && !hci_req_is_complete(hdev))
4344 return;
4345
4346 /* If this was the last command in a request the complete
4347 * callback would be found in hdev->sent_cmd instead of the
4348 * command queue (hdev->cmd_q).
4349 */
4350 if (hdev->sent_cmd) {
4351 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004352
4353 if (req_complete) {
4354 /* We must set the complete callback to NULL to
4355 * avoid calling the callback more than once if
4356 * this function gets called again.
4357 */
4358 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4359
Johan Hedberg9238f362013-03-05 20:37:48 +02004360 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004361 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004362 }
4363
4364 /* Remove all pending commands belonging to this request */
4365 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4366 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4367 if (bt_cb(skb)->req.start) {
4368 __skb_queue_head(&hdev->cmd_q, skb);
4369 break;
4370 }
4371
4372 req_complete = bt_cb(skb)->req.complete;
4373 kfree_skb(skb);
4374 }
4375 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4376
4377call_complete:
4378 if (req_complete)
4379 req_complete(hdev, status);
4380}
4381
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004382static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004384 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 struct sk_buff *skb;
4386
4387 BT_DBG("%s", hdev->name);
4388
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004390 /* Send copy to monitor */
4391 hci_send_to_monitor(hdev, skb);
4392
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 if (atomic_read(&hdev->promisc)) {
4394 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004395 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 }
4397
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004398 if (test_bit(HCI_RAW, &hdev->flags) ||
4399 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400 kfree_skb(skb);
4401 continue;
4402 }
4403
4404 if (test_bit(HCI_INIT, &hdev->flags)) {
4405 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004406 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 case HCI_ACLDATA_PKT:
4408 case HCI_SCODATA_PKT:
4409 kfree_skb(skb);
4410 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 }
4413
4414 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004415 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004417 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418 hci_event_packet(hdev, skb);
4419 break;
4420
4421 case HCI_ACLDATA_PKT:
4422 BT_DBG("%s ACL data packet", hdev->name);
4423 hci_acldata_packet(hdev, skb);
4424 break;
4425
4426 case HCI_SCODATA_PKT:
4427 BT_DBG("%s SCO data packet", hdev->name);
4428 hci_scodata_packet(hdev, skb);
4429 break;
4430
4431 default:
4432 kfree_skb(skb);
4433 break;
4434 }
4435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436}
4437
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004438static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004440 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 struct sk_buff *skb;
4442
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004443 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4444 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004447 if (atomic_read(&hdev->cmd_cnt)) {
4448 skb = skb_dequeue(&hdev->cmd_q);
4449 if (!skb)
4450 return;
4451
Wei Yongjun7585b972009-02-25 18:29:52 +08004452 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004454 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004455 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004457 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004458 if (test_bit(HCI_RESET, &hdev->flags))
4459 del_timer(&hdev->cmd_timer);
4460 else
4461 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004462 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 } else {
4464 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004465 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 }
4467 }
4468}