blob: b40d52446f8f5a0054e63263359e7ca8ae2caddb [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700288static int dev_class_show(struct seq_file *f, void *ptr)
289{
290 struct hci_dev *hdev = f->private;
291
292 hci_dev_lock(hdev);
293 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
294 hdev->dev_class[1], hdev->dev_class[0]);
295 hci_dev_unlock(hdev);
296
297 return 0;
298}
299
300static int dev_class_open(struct inode *inode, struct file *file)
301{
302 return single_open(file, dev_class_show, inode->i_private);
303}
304
305static const struct file_operations dev_class_fops = {
306 .open = dev_class_open,
307 .read = seq_read,
308 .llseek = seq_lseek,
309 .release = single_release,
310};
311
Marcel Holtmann041000b2013-10-17 12:02:31 -0700312static int voice_setting_get(void *data, u64 *val)
313{
314 struct hci_dev *hdev = data;
315
316 hci_dev_lock(hdev);
317 *val = hdev->voice_setting;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
324 NULL, "0x%4.4llx\n");
325
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700326static int auto_accept_delay_set(void *data, u64 val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 hdev->auto_accept_delay = val;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337static int auto_accept_delay_get(void *data, u64 *val)
338{
339 struct hci_dev *hdev = data;
340
341 hci_dev_lock(hdev);
342 *val = hdev->auto_accept_delay;
343 hci_dev_unlock(hdev);
344
345 return 0;
346}
347
348DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
349 auto_accept_delay_set, "%llu\n");
350
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700351static int ssp_debug_mode_set(void *data, u64 val)
352{
353 struct hci_dev *hdev = data;
354 struct sk_buff *skb;
355 __u8 mode;
356 int err;
357
358 if (val != 0 && val != 1)
359 return -EINVAL;
360
361 if (!test_bit(HCI_UP, &hdev->flags))
362 return -ENETDOWN;
363
364 hci_req_lock(hdev);
365 mode = val;
366 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
367 &mode, HCI_CMD_TIMEOUT);
368 hci_req_unlock(hdev);
369
370 if (IS_ERR(skb))
371 return PTR_ERR(skb);
372
373 err = -bt_to_errno(skb->data[0]);
374 kfree_skb(skb);
375
376 if (err < 0)
377 return err;
378
379 hci_dev_lock(hdev);
380 hdev->ssp_debug_mode = val;
381 hci_dev_unlock(hdev);
382
383 return 0;
384}
385
386static int ssp_debug_mode_get(void *data, u64 *val)
387{
388 struct hci_dev *hdev = data;
389
390 hci_dev_lock(hdev);
391 *val = hdev->ssp_debug_mode;
392 hci_dev_unlock(hdev);
393
394 return 0;
395}
396
397DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
398 ssp_debug_mode_set, "%llu\n");
399
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800400static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
401 size_t count, loff_t *ppos)
402{
403 struct hci_dev *hdev = file->private_data;
404 char buf[3];
405
406 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
407 buf[1] = '\n';
408 buf[2] = '\0';
409 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
410}
411
412static ssize_t force_sc_support_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct hci_dev *hdev = file->private_data;
417 char buf[32];
418 size_t buf_size = min(count, (sizeof(buf)-1));
419 bool enable;
420
421 if (test_bit(HCI_UP, &hdev->flags))
422 return -EBUSY;
423
424 if (copy_from_user(buf, user_buf, buf_size))
425 return -EFAULT;
426
427 buf[buf_size] = '\0';
428 if (strtobool(buf, &enable))
429 return -EINVAL;
430
431 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
432 return -EALREADY;
433
434 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
435
436 return count;
437}
438
439static const struct file_operations force_sc_support_fops = {
440 .open = simple_open,
441 .read = force_sc_support_read,
442 .write = force_sc_support_write,
443 .llseek = default_llseek,
444};
445
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800446static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos)
448{
449 struct hci_dev *hdev = file->private_data;
450 char buf[3];
451
452 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
453 buf[1] = '\n';
454 buf[2] = '\0';
455 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
456}
457
458static const struct file_operations sc_only_mode_fops = {
459 .open = simple_open,
460 .read = sc_only_mode_read,
461 .llseek = default_llseek,
462};
463
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700464static int idle_timeout_set(void *data, u64 val)
465{
466 struct hci_dev *hdev = data;
467
468 if (val != 0 && (val < 500 || val > 3600000))
469 return -EINVAL;
470
471 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700472 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700473 hci_dev_unlock(hdev);
474
475 return 0;
476}
477
478static int idle_timeout_get(void *data, u64 *val)
479{
480 struct hci_dev *hdev = data;
481
482 hci_dev_lock(hdev);
483 *val = hdev->idle_timeout;
484 hci_dev_unlock(hdev);
485
486 return 0;
487}
488
489DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
490 idle_timeout_set, "%llu\n");
491
492static int sniff_min_interval_set(void *data, u64 val)
493{
494 struct hci_dev *hdev = data;
495
496 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
497 return -EINVAL;
498
499 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700500 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700501 hci_dev_unlock(hdev);
502
503 return 0;
504}
505
506static int sniff_min_interval_get(void *data, u64 *val)
507{
508 struct hci_dev *hdev = data;
509
510 hci_dev_lock(hdev);
511 *val = hdev->sniff_min_interval;
512 hci_dev_unlock(hdev);
513
514 return 0;
515}
516
517DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
518 sniff_min_interval_set, "%llu\n");
519
520static int sniff_max_interval_set(void *data, u64 val)
521{
522 struct hci_dev *hdev = data;
523
524 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700528 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529 hci_dev_unlock(hdev);
530
531 return 0;
532}
533
534static int sniff_max_interval_get(void *data, u64 *val)
535{
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->sniff_max_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543}
544
545DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
546 sniff_max_interval_set, "%llu\n");
547
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700548static int static_address_show(struct seq_file *f, void *p)
549{
550 struct hci_dev *hdev = f->private;
551
552 hci_dev_lock(hdev);
553 seq_printf(f, "%pMR\n", &hdev->static_addr);
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559static int static_address_open(struct inode *inode, struct file *file)
560{
561 return single_open(file, static_address_show, inode->i_private);
562}
563
564static const struct file_operations static_address_fops = {
565 .open = static_address_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = single_release,
569};
570
Marcel Holtmann92202182013-10-18 16:38:10 -0700571static int own_address_type_set(void *data, u64 val)
572{
573 struct hci_dev *hdev = data;
574
575 if (val != 0 && val != 1)
576 return -EINVAL;
577
578 hci_dev_lock(hdev);
579 hdev->own_addr_type = val;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int own_address_type_get(void *data, u64 *val)
586{
587 struct hci_dev *hdev = data;
588
589 hci_dev_lock(hdev);
590 *val = hdev->own_addr_type;
591 hci_dev_unlock(hdev);
592
593 return 0;
594}
595
596DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
597 own_address_type_set, "%llu\n");
598
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700599static int long_term_keys_show(struct seq_file *f, void *ptr)
600{
601 struct hci_dev *hdev = f->private;
602 struct list_head *p, *n;
603
604 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800605 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700606 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800607 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700608 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
609 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
610 8, ltk->rand, 16, ltk->val);
611 }
612 hci_dev_unlock(hdev);
613
614 return 0;
615}
616
617static int long_term_keys_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, long_term_keys_show, inode->i_private);
620}
621
622static const struct file_operations long_term_keys_fops = {
623 .open = long_term_keys_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700629static int conn_min_interval_set(void *data, u64 val)
630{
631 struct hci_dev *hdev = data;
632
633 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
634 return -EINVAL;
635
636 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700637 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int conn_min_interval_get(void *data, u64 *val)
644{
645 struct hci_dev *hdev = data;
646
647 hci_dev_lock(hdev);
648 *val = hdev->le_conn_min_interval;
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
655 conn_min_interval_set, "%llu\n");
656
657static int conn_max_interval_set(void *data, u64 val)
658{
659 struct hci_dev *hdev = data;
660
661 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
662 return -EINVAL;
663
664 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700665 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700666 hci_dev_unlock(hdev);
667
668 return 0;
669}
670
671static int conn_max_interval_get(void *data, u64 *val)
672{
673 struct hci_dev *hdev = data;
674
675 hci_dev_lock(hdev);
676 *val = hdev->le_conn_max_interval;
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
683 conn_max_interval_set, "%llu\n");
684
Jukka Rissanen89863102013-12-11 17:05:38 +0200685static ssize_t lowpan_read(struct file *file, char __user *user_buf,
686 size_t count, loff_t *ppos)
687{
688 struct hci_dev *hdev = file->private_data;
689 char buf[3];
690
691 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
692 buf[1] = '\n';
693 buf[2] = '\0';
694 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
695}
696
697static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
698 size_t count, loff_t *position)
699{
700 struct hci_dev *hdev = fp->private_data;
701 bool enable;
702 char buf[32];
703 size_t buf_size = min(count, (sizeof(buf)-1));
704
705 if (copy_from_user(buf, user_buffer, buf_size))
706 return -EFAULT;
707
708 buf[buf_size] = '\0';
709
710 if (strtobool(buf, &enable) < 0)
711 return -EINVAL;
712
713 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
714 return -EALREADY;
715
716 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
717
718 return count;
719}
720
721static const struct file_operations lowpan_debugfs_fops = {
722 .open = simple_open,
723 .read = lowpan_read,
724 .write = lowpan_write,
725 .llseek = default_llseek,
726};
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728/* ---- HCI requests ---- */
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (hdev->req_status == HCI_REQ_PEND) {
735 hdev->req_result = result;
736 hdev->req_status = HCI_REQ_DONE;
737 wake_up_interruptible(&hdev->req_wait_q);
738 }
739}
740
741static void hci_req_cancel(struct hci_dev *hdev, int err)
742{
743 BT_DBG("%s err 0x%2.2x", hdev->name, err);
744
745 if (hdev->req_status == HCI_REQ_PEND) {
746 hdev->req_result = err;
747 hdev->req_status = HCI_REQ_CANCELED;
748 wake_up_interruptible(&hdev->req_wait_q);
749 }
750}
751
Fengguang Wu77a63e02013-04-20 16:24:31 +0300752static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
753 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300754{
755 struct hci_ev_cmd_complete *ev;
756 struct hci_event_hdr *hdr;
757 struct sk_buff *skb;
758
759 hci_dev_lock(hdev);
760
761 skb = hdev->recv_evt;
762 hdev->recv_evt = NULL;
763
764 hci_dev_unlock(hdev);
765
766 if (!skb)
767 return ERR_PTR(-ENODATA);
768
769 if (skb->len < sizeof(*hdr)) {
770 BT_ERR("Too short HCI event");
771 goto failed;
772 }
773
774 hdr = (void *) skb->data;
775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
776
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300777 if (event) {
778 if (hdr->evt != event)
779 goto failed;
780 return skb;
781 }
782
Johan Hedberg75e84b72013-04-02 13:35:04 +0300783 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
784 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
785 goto failed;
786 }
787
788 if (skb->len < sizeof(*ev)) {
789 BT_ERR("Too short cmd_complete event");
790 goto failed;
791 }
792
793 ev = (void *) skb->data;
794 skb_pull(skb, sizeof(*ev));
795
796 if (opcode == __le16_to_cpu(ev->opcode))
797 return skb;
798
799 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
800 __le16_to_cpu(ev->opcode));
801
802failed:
803 kfree_skb(skb);
804 return ERR_PTR(-ENODATA);
805}
806
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300807struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300808 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300809{
810 DECLARE_WAITQUEUE(wait, current);
811 struct hci_request req;
812 int err = 0;
813
814 BT_DBG("%s", hdev->name);
815
816 hci_req_init(&req, hdev);
817
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300818 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819
820 hdev->req_status = HCI_REQ_PEND;
821
822 err = hci_req_run(&req, hci_req_sync_complete);
823 if (err < 0)
824 return ERR_PTR(err);
825
826 add_wait_queue(&hdev->req_wait_q, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828
829 schedule_timeout(timeout);
830
831 remove_wait_queue(&hdev->req_wait_q, &wait);
832
833 if (signal_pending(current))
834 return ERR_PTR(-EINTR);
835
836 switch (hdev->req_status) {
837 case HCI_REQ_DONE:
838 err = -bt_to_errno(hdev->req_result);
839 break;
840
841 case HCI_REQ_CANCELED:
842 err = -hdev->req_result;
843 break;
844
845 default:
846 err = -ETIMEDOUT;
847 break;
848 }
849
850 hdev->req_status = hdev->req_result = 0;
851
852 BT_DBG("%s end: err %d", hdev->name, err);
853
854 if (err < 0)
855 return ERR_PTR(err);
856
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300857 return hci_get_cmd_complete(hdev, opcode, event);
858}
859EXPORT_SYMBOL(__hci_cmd_sync_ev);
860
861struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300862 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300863{
864 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300865}
866EXPORT_SYMBOL(__hci_cmd_sync);
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200869static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870 void (*func)(struct hci_request *req,
871 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200874 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 DECLARE_WAITQUEUE(wait, current);
876 int err = 0;
877
878 BT_DBG("%s start", hdev->name);
879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_init(&req, hdev);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 hdev->req_status = HCI_REQ_PEND;
883
Johan Hedberg42c6b122013-03-05 20:37:49 +0200884 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200885
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 err = hci_req_run(&req, hci_req_sync_complete);
887 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200888 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300889
890 /* ENODATA means the HCI request command queue is empty.
891 * This can happen when a request with conditionals doesn't
892 * trigger any commands to be sent. This is normal behavior
893 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200894 */
Andre Guedes920c8302013-03-08 11:20:15 -0300895 if (err == -ENODATA)
896 return 0;
897
898 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200899 }
900
Andre Guedesbc4445c2013-03-08 11:20:13 -0300901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return -EINTR;
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700913 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Johan Hedberga5040ef2011-01-10 13:28:59 +0200925 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 return err;
930}
931
Johan Hedberg01178cd2013-03-05 20:37:41 +0200932static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 void (*req)(struct hci_request *req,
934 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
937 int ret;
938
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200939 if (!test_bit(HCI_UP, &hdev->flags))
940 return -ENETDOWN;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* Serialize all requests */
943 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200944 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 hci_req_unlock(hdev);
946
947 return ret;
948}
949
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200952 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 set_bit(HCI_RESET, &req->hdev->flags);
956 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958
Johan Hedberg42c6b122013-03-05 20:37:49 +0200959static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200966 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200968
969 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200974{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200976
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300979
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700980 /* Read Local Supported Commands */
981 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
982
983 /* Read Local Supported Features */
984 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
985
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300986 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300988
989 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700991
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700992 /* Read Flow Control Mode */
993 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
994
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700995 /* Read Location Data */
996 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200997}
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001002
1003 BT_DBG("%s %ld", hdev->name, opt);
1004
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001005 /* Reset */
1006 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001009 switch (hdev->dev_type) {
1010 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 break;
1013
1014 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001016 break;
1017
1018 default:
1019 BT_ERR("Unknown device type %d", hdev->dev_type);
1020 break;
1021 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001022}
1023
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001026 struct hci_dev *hdev = req->hdev;
1027
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028 __le16 param;
1029 __u8 flt_type;
1030
1031 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001033
1034 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001043 /* Read Number of Supported IAC */
1044 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1045
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001046 /* Read Current IAC LAP */
1047 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1048
Johan Hedberg2177bab2013-03-05 20:37:43 +02001049 /* Clear Event Filters */
1050 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052
1053 /* Connection accept timeout ~20 secs */
1054 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001056
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001057 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1058 * but it does not support page scan related HCI commands.
1059 */
1060 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001061 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1063 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001064}
1065
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001068 struct hci_dev *hdev = req->hdev;
1069
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001071 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001072
1073 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001084
1085 /* LE-only controllers have LE implicitly enabled */
1086 if (!lmp_bredr_capable(hdev))
1087 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088}
1089
1090static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1091{
1092 if (lmp_ext_inq_capable(hdev))
1093 return 0x02;
1094
1095 if (lmp_inq_rssi_capable(hdev))
1096 return 0x01;
1097
1098 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1099 hdev->lmp_subver == 0x0757)
1100 return 0x01;
1101
1102 if (hdev->manufacturer == 15) {
1103 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1104 return 0x01;
1105 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1106 return 0x01;
1107 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1108 return 0x01;
1109 }
1110
1111 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1112 hdev->lmp_subver == 0x1805)
1113 return 0x01;
1114
1115 return 0x00;
1116}
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001119{
1120 u8 mode;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001125}
1126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 struct hci_dev *hdev = req->hdev;
1130
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131 /* The second byte is 0xff instead of 0x9f (two reserved bits
1132 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1133 * command otherwise.
1134 */
1135 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1136
1137 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1138 * any event mask for pre 1.2 devices.
1139 */
1140 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1141 return;
1142
1143 if (lmp_bredr_capable(hdev)) {
1144 events[4] |= 0x01; /* Flow Specification Complete */
1145 events[4] |= 0x02; /* Inquiry Result with RSSI */
1146 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1147 events[5] |= 0x08; /* Synchronous Connection Complete */
1148 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001149 } else {
1150 /* Use a different default for LE-only devices */
1151 memset(events, 0, sizeof(events));
1152 events[0] |= 0x10; /* Disconnection Complete */
1153 events[0] |= 0x80; /* Encryption Change */
1154 events[1] |= 0x08; /* Read Remote Version Information Complete */
1155 events[1] |= 0x20; /* Command Complete */
1156 events[1] |= 0x40; /* Command Status */
1157 events[1] |= 0x80; /* Hardware Error */
1158 events[2] |= 0x04; /* Number of Completed Packets */
1159 events[3] |= 0x02; /* Data Buffer Overflow */
1160 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001161 }
1162
1163 if (lmp_inq_rssi_capable(hdev))
1164 events[4] |= 0x02; /* Inquiry Result with RSSI */
1165
1166 if (lmp_sniffsubr_capable(hdev))
1167 events[5] |= 0x20; /* Sniff Subrating */
1168
1169 if (lmp_pause_enc_capable(hdev))
1170 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1171
1172 if (lmp_ext_inq_capable(hdev))
1173 events[5] |= 0x40; /* Extended Inquiry Result */
1174
1175 if (lmp_no_flush_capable(hdev))
1176 events[7] |= 0x01; /* Enhanced Flush Complete */
1177
1178 if (lmp_lsto_capable(hdev))
1179 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1180
1181 if (lmp_ssp_capable(hdev)) {
1182 events[6] |= 0x01; /* IO Capability Request */
1183 events[6] |= 0x02; /* IO Capability Response */
1184 events[6] |= 0x04; /* User Confirmation Request */
1185 events[6] |= 0x08; /* User Passkey Request */
1186 events[6] |= 0x10; /* Remote OOB Data Request */
1187 events[6] |= 0x20; /* Simple Pairing Complete */
1188 events[7] |= 0x04; /* User Passkey Notification */
1189 events[7] |= 0x08; /* Keypress Notification */
1190 events[7] |= 0x10; /* Remote Host Supported
1191 * Features Notification
1192 */
1193 }
1194
1195 if (lmp_le_capable(hdev))
1196 events[7] |= 0x20; /* LE Meta-Event */
1197
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001199
1200 if (lmp_le_capable(hdev)) {
1201 memset(events, 0, sizeof(events));
1202 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001203 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1204 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205 }
1206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
1211
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001214 else
1215 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001216
1217 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001221
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001222 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1223 * local supported commands HCI command.
1224 */
1225 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227
1228 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001229 /* When SSP is available, then the host features page
1230 * should also be available as well. However some
1231 * controllers list the max_page as 0 as long as SSP
1232 * has not been enabled. To achieve proper debugging
1233 * output, force the minimum max_page to 1 at least.
1234 */
1235 hdev->max_page = 0x01;
1236
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1238 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1240 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241 } else {
1242 struct hci_cp_write_eir cp;
1243
1244 memset(hdev->eir, 0, sizeof(hdev->eir));
1245 memset(&cp, 0, sizeof(cp));
1246
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 }
1249 }
1250
1251 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253
1254 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_ext_feat_capable(hdev)) {
1258 struct hci_cp_read_local_ext_features cp;
1259
1260 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1262 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263 }
1264
1265 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1266 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1268 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001269 }
1270}
1271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275 struct hci_cp_write_def_link_policy cp;
1276 u16 link_policy = 0;
1277
1278 if (lmp_rswitch_capable(hdev))
1279 link_policy |= HCI_LP_RSWITCH;
1280 if (lmp_hold_capable(hdev))
1281 link_policy |= HCI_LP_HOLD;
1282 if (lmp_sniff_capable(hdev))
1283 link_policy |= HCI_LP_SNIFF;
1284 if (lmp_park_capable(hdev))
1285 link_policy |= HCI_LP_PARK;
1286
1287 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289}
1290
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294 struct hci_cp_write_le_host_supported cp;
1295
Johan Hedbergc73eee92013-04-19 18:35:21 +03001296 /* LE-only devices do not support explicit enablement */
1297 if (!lmp_bredr_capable(hdev))
1298 return;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 memset(&cp, 0, sizeof(cp));
1301
1302 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1303 cp.le = 0x01;
1304 cp.simul = lmp_le_br_capable(hdev);
1305 }
1306
1307 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1309 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310}
1311
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001312static void hci_set_event_mask_page_2(struct hci_request *req)
1313{
1314 struct hci_dev *hdev = req->hdev;
1315 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1316
1317 /* If Connectionless Slave Broadcast master role is supported
1318 * enable all necessary events for it.
1319 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001320 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001321 events[1] |= 0x40; /* Triggered Clock Capture */
1322 events[1] |= 0x80; /* Synchronization Train Complete */
1323 events[2] |= 0x10; /* Slave Page Response Timeout */
1324 events[2] |= 0x20; /* CSB Channel Map Change */
1325 }
1326
1327 /* If Connectionless Slave Broadcast slave role is supported
1328 * enable all necessary events for it.
1329 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001330 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001331 events[2] |= 0x01; /* Synchronization Train Received */
1332 events[2] |= 0x02; /* CSB Receive */
1333 events[2] |= 0x04; /* CSB Timeout */
1334 events[2] |= 0x08; /* Truncated Page Complete */
1335 }
1336
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001337 /* Enable Authenticated Payload Timeout Expired event if supported */
1338 if (lmp_ping_capable(hdev))
1339 events[2] |= 0x80;
1340
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001341 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1342}
1343
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001347 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001349 /* Some Broadcom based Bluetooth controllers do not support the
1350 * Delete Stored Link Key command. They are clearly indicating its
1351 * absence in the bit mask of supported commands.
1352 *
1353 * Check the supported commands and only if the the command is marked
1354 * as supported send it. If not supported assume that the controller
1355 * does not have actual support for stored link keys which makes this
1356 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001357 *
1358 * Some controllers indicate that they support handling deleting
1359 * stored link keys, but they don't. The quirk lets a driver
1360 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001361 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001362 if (hdev->commands[6] & 0x80 &&
1363 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001364 struct hci_cp_delete_stored_link_key cp;
1365
1366 bacpy(&cp.bdaddr, BDADDR_ANY);
1367 cp.delete_all = 0x01;
1368 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1369 sizeof(cp), &cp);
1370 }
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
Marcel Holtmann79830f62013-10-18 16:38:09 -07001375 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001376 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1377 /* If the controller has a public BD_ADDR, then
1378 * by default use that one. If this is a LE only
1379 * controller without a public address, default
1380 * to the random address.
1381 */
1382 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1383 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1384 else
1385 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1386 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001389 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001390
1391 /* Read features beyond page 1 if available */
1392 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1393 struct hci_cp_read_local_ext_features cp;
1394
1395 cp.page = p;
1396 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1397 sizeof(cp), &cp);
1398 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399}
1400
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001401static void hci_init4_req(struct hci_request *req, unsigned long opt)
1402{
1403 struct hci_dev *hdev = req->hdev;
1404
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001405 /* Set event mask page 2 if the HCI command for it is supported */
1406 if (hdev->commands[22] & 0x04)
1407 hci_set_event_mask_page_2(req);
1408
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001409 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001410 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001411 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001412
1413 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001414 if ((lmp_sc_capable(hdev) ||
1415 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001416 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1417 u8 support = 0x01;
1418 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1419 sizeof(support), &support);
1420 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001421}
1422
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423static int __hci_init(struct hci_dev *hdev)
1424{
1425 int err;
1426
1427 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1428 if (err < 0)
1429 return err;
1430
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001431 /* The Device Under Test (DUT) mode is special and available for
1432 * all controller types. So just create it early on.
1433 */
1434 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1435 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1436 &dut_mode_fops);
1437 }
1438
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1440 * BR/EDR/LE type controllers. AMP controllers only need the
1441 * first stage init.
1442 */
1443 if (hdev->dev_type != HCI_BREDR)
1444 return 0;
1445
1446 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1447 if (err < 0)
1448 return err;
1449
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001450 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1451 if (err < 0)
1452 return err;
1453
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001454 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1455 if (err < 0)
1456 return err;
1457
1458 /* Only create debugfs entries during the initial setup
1459 * phase and not every time the controller gets powered on.
1460 */
1461 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1462 return 0;
1463
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001464 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1465 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001466 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1467 &hdev->manufacturer);
1468 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1469 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001470 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1471 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001472 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1473
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001474 if (lmp_bredr_capable(hdev)) {
1475 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1476 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001477 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1478 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001479 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1480 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001481 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1482 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001483 }
1484
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001485 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001486 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1487 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1489 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001490 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1491 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001492 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1493 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001494 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001495
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001496 if (lmp_sniff_capable(hdev)) {
1497 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1498 hdev, &idle_timeout_fops);
1499 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1500 hdev, &sniff_min_interval_fops);
1501 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1502 hdev, &sniff_max_interval_fops);
1503 }
1504
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001505 if (lmp_le_capable(hdev)) {
1506 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1507 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001508 debugfs_create_file("static_address", 0444, hdev->debugfs,
1509 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001510 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1511 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001512 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1513 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001514 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1515 hdev, &conn_min_interval_fops);
1516 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1517 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001518 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1519 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001520 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001521
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001522 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526{
1527 __u8 scan = opt;
1528
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536{
1537 __u8 auth = opt;
1538
Johan Hedberg42c6b122013-03-05 20:37:49 +02001539 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 __u8 encrypt = opt;
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001551 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001556{
1557 __le16 policy = cpu_to_le16(opt);
1558
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001560
1561 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563}
1564
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001565/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * Device is held on return. */
1567struct hci_dev *hci_dev_get(int index)
1568{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001569 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
1571 BT_DBG("%d", index);
1572
1573 if (index < 0)
1574 return NULL;
1575
1576 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001577 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (d->id == index) {
1579 hdev = hci_dev_hold(d);
1580 break;
1581 }
1582 }
1583 read_unlock(&hci_dev_list_lock);
1584 return hdev;
1585}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001588
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001589bool hci_discovery_active(struct hci_dev *hdev)
1590{
1591 struct discovery_state *discov = &hdev->discovery;
1592
Andre Guedes6fbe1952012-02-03 17:47:58 -03001593 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001594 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001595 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001596 return true;
1597
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 default:
1599 return false;
1600 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001601}
1602
Johan Hedbergff9ef572012-01-04 14:23:45 +02001603void hci_discovery_set_state(struct hci_dev *hdev, int state)
1604{
1605 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1606
1607 if (hdev->discovery.state == state)
1608 return;
1609
1610 switch (state) {
1611 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001612 if (hdev->discovery.state != DISCOVERY_STARTING)
1613 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001614 break;
1615 case DISCOVERY_STARTING:
1616 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001617 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001618 mgmt_discovering(hdev, 1);
1619 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001620 case DISCOVERY_RESOLVING:
1621 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001622 case DISCOVERY_STOPPING:
1623 break;
1624 }
1625
1626 hdev->discovery.state = state;
1627}
1628
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001629void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630{
Johan Hedberg30883512012-01-04 14:16:21 +02001631 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001632 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Johan Hedberg561aafb2012-01-04 13:31:59 +02001634 list_for_each_entry_safe(p, n, &cache->all, all) {
1635 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001636 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001638
1639 INIT_LIST_HEAD(&cache->unknown);
1640 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001643struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1644 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Johan Hedberg30883512012-01-04 14:16:21 +02001646 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 struct inquiry_entry *e;
1648
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001649 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Johan Hedberg561aafb2012-01-04 13:31:59 +02001651 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001653 return e;
1654 }
1655
1656 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657}
1658
Johan Hedberg561aafb2012-01-04 13:31:59 +02001659struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001660 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001661{
Johan Hedberg30883512012-01-04 14:16:21 +02001662 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001663 struct inquiry_entry *e;
1664
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001665 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666
1667 list_for_each_entry(e, &cache->unknown, list) {
1668 if (!bacmp(&e->data.bdaddr, bdaddr))
1669 return e;
1670 }
1671
1672 return NULL;
1673}
1674
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001675struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001676 bdaddr_t *bdaddr,
1677 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678{
1679 struct discovery_state *cache = &hdev->discovery;
1680 struct inquiry_entry *e;
1681
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001682 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001683
1684 list_for_each_entry(e, &cache->resolve, list) {
1685 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1686 return e;
1687 if (!bacmp(&e->data.bdaddr, bdaddr))
1688 return e;
1689 }
1690
1691 return NULL;
1692}
1693
Johan Hedberga3d4e202012-01-09 00:53:02 +02001694void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001696{
1697 struct discovery_state *cache = &hdev->discovery;
1698 struct list_head *pos = &cache->resolve;
1699 struct inquiry_entry *p;
1700
1701 list_del(&ie->list);
1702
1703 list_for_each_entry(p, &cache->resolve, list) {
1704 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001705 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001706 break;
1707 pos = &p->list;
1708 }
1709
1710 list_add(&ie->list, pos);
1711}
1712
Johan Hedberg31754052012-01-04 13:39:52 +02001713bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001714 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715{
Johan Hedberg30883512012-01-04 14:16:21 +02001716 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001717 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001719 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Szymon Janc2b2fec42012-11-20 11:38:54 +01001721 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1722
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001723 if (ssp)
1724 *ssp = data->ssp_mode;
1725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001726 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001727 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001728 if (ie->data.ssp_mode && ssp)
1729 *ssp = true;
1730
Johan Hedberga3d4e202012-01-09 00:53:02 +02001731 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001732 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001733 ie->data.rssi = data->rssi;
1734 hci_inquiry_cache_update_resolve(hdev, ie);
1735 }
1736
Johan Hedberg561aafb2012-01-04 13:31:59 +02001737 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001738 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 /* Entry not in the cache. Add new one. */
1741 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1742 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001743 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001744
1745 list_add(&ie->all, &cache->all);
1746
1747 if (name_known) {
1748 ie->name_state = NAME_KNOWN;
1749 } else {
1750 ie->name_state = NAME_NOT_KNOWN;
1751 list_add(&ie->list, &cache->unknown);
1752 }
1753
1754update:
1755 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001756 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001757 ie->name_state = NAME_KNOWN;
1758 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 }
1760
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001761 memcpy(&ie->data, data, sizeof(*data));
1762 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001764
1765 if (ie->name_state == NAME_NOT_KNOWN)
1766 return false;
1767
1768 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
1771static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1772{
Johan Hedberg30883512012-01-04 14:16:21 +02001773 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 struct inquiry_info *info = (struct inquiry_info *) buf;
1775 struct inquiry_entry *e;
1776 int copied = 0;
1777
Johan Hedberg561aafb2012-01-04 13:31:59 +02001778 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001780
1781 if (copied >= num)
1782 break;
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 bacpy(&info->bdaddr, &data->bdaddr);
1785 info->pscan_rep_mode = data->pscan_rep_mode;
1786 info->pscan_period_mode = data->pscan_period_mode;
1787 info->pscan_mode = data->pscan_mode;
1788 memcpy(info->dev_class, data->dev_class, 3);
1789 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001792 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 }
1794
1795 BT_DBG("cache %p, copied %d", cache, copied);
1796 return copied;
1797}
1798
Johan Hedberg42c6b122013-03-05 20:37:49 +02001799static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800{
1801 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 struct hci_cp_inquiry cp;
1804
1805 BT_DBG("%s", hdev->name);
1806
1807 if (test_bit(HCI_INQUIRY, &hdev->flags))
1808 return;
1809
1810 /* Start Inquiry */
1811 memcpy(&cp.lap, &ir->lap, 3);
1812 cp.length = ir->length;
1813 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815}
1816
Andre Guedes3e13fa12013-03-27 20:04:56 -03001817static int wait_inquiry(void *word)
1818{
1819 schedule();
1820 return signal_pending(current);
1821}
1822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823int hci_inquiry(void __user *arg)
1824{
1825 __u8 __user *ptr = arg;
1826 struct hci_inquiry_req ir;
1827 struct hci_dev *hdev;
1828 int err = 0, do_inquiry = 0, max_rsp;
1829 long timeo;
1830 __u8 *buf;
1831
1832 if (copy_from_user(&ir, ptr, sizeof(ir)))
1833 return -EFAULT;
1834
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001835 hdev = hci_dev_get(ir.dev_id);
1836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -ENODEV;
1838
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001839 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Johan Hedberg56f87902013-10-02 13:43:13 +03001849 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001854 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001855 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001856 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001857 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 do_inquiry = 1;
1859 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001860 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Marcel Holtmann04837f62006-07-03 10:02:33 +02001862 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001863
1864 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001865 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1866 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001867 if (err < 0)
1868 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001869
1870 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1871 * cleared). If it is interrupted by a signal, return -EINTR.
1872 */
1873 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1874 TASK_INTERRUPTIBLE))
1875 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001878 /* for unlimited number of responses we will use buffer with
1879 * 255 entries
1880 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1882
1883 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1884 * copy it to the user space.
1885 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001886 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 err = -ENOMEM;
1889 goto done;
1890 }
1891
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 BT_DBG("num_rsp %d", ir.num_rsp);
1897
1898 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1899 ptr += sizeof(ir);
1900 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001901 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001903 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 err = -EFAULT;
1905
1906 kfree(buf);
1907
1908done:
1909 hci_dev_put(hdev);
1910 return err;
1911}
1912
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001913static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 int ret = 0;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 BT_DBG("%s %p", hdev->name, hdev);
1918
1919 hci_req_lock(hdev);
1920
Johan Hovold94324962012-03-15 14:48:41 +01001921 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1922 ret = -ENODEV;
1923 goto done;
1924 }
1925
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001926 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1927 /* Check for rfkill but allow the HCI setup stage to
1928 * proceed (which in itself doesn't cause any RF activity).
1929 */
1930 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1931 ret = -ERFKILL;
1932 goto done;
1933 }
1934
1935 /* Check for valid public address or a configured static
1936 * random adddress, but let the HCI setup proceed to
1937 * be able to determine if there is a public address
1938 * or not.
1939 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001940 * In case of user channel usage, it is not important
1941 * if a public address or static random address is
1942 * available.
1943 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001944 * This check is only valid for BR/EDR controllers
1945 * since AMP controllers do not have an address.
1946 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001947 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1948 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001949 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1950 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1951 ret = -EADDRNOTAVAIL;
1952 goto done;
1953 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001954 }
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 if (test_bit(HCI_UP, &hdev->flags)) {
1957 ret = -EALREADY;
1958 goto done;
1959 }
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 if (hdev->open(hdev)) {
1962 ret = -EIO;
1963 goto done;
1964 }
1965
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001966 atomic_set(&hdev->cmd_cnt, 1);
1967 set_bit(HCI_INIT, &hdev->flags);
1968
1969 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1970 ret = hdev->setup(hdev);
1971
1972 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001973 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1974 set_bit(HCI_RAW, &hdev->flags);
1975
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001976 if (!test_bit(HCI_RAW, &hdev->flags) &&
1977 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001978 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 }
1980
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 clear_bit(HCI_INIT, &hdev->flags);
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 if (!ret) {
1984 hci_dev_hold(hdev);
1985 set_bit(HCI_UP, &hdev->flags);
1986 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001987 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001988 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001989 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001990 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001991 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001992 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001993 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001994 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001996 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001997 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001998 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 skb_queue_purge(&hdev->cmd_q);
2001 skb_queue_purge(&hdev->rx_q);
2002
2003 if (hdev->flush)
2004 hdev->flush(hdev);
2005
2006 if (hdev->sent_cmd) {
2007 kfree_skb(hdev->sent_cmd);
2008 hdev->sent_cmd = NULL;
2009 }
2010
2011 hdev->close(hdev);
2012 hdev->flags = 0;
2013 }
2014
2015done:
2016 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 return ret;
2018}
2019
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002020/* ---- HCI ioctl helpers ---- */
2021
2022int hci_dev_open(__u16 dev)
2023{
2024 struct hci_dev *hdev;
2025 int err;
2026
2027 hdev = hci_dev_get(dev);
2028 if (!hdev)
2029 return -ENODEV;
2030
Johan Hedberge1d08f42013-10-01 22:44:50 +03002031 /* We need to ensure that no other power on/off work is pending
2032 * before proceeding to call hci_dev_do_open. This is
2033 * particularly important if the setup procedure has not yet
2034 * completed.
2035 */
2036 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2037 cancel_delayed_work(&hdev->power_off);
2038
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002039 /* After this call it is guaranteed that the setup procedure
2040 * has finished. This means that error conditions like RFKILL
2041 * or no valid public or static random address apply.
2042 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002043 flush_workqueue(hdev->req_workqueue);
2044
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002045 err = hci_dev_do_open(hdev);
2046
2047 hci_dev_put(hdev);
2048
2049 return err;
2050}
2051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052static int hci_dev_do_close(struct hci_dev *hdev)
2053{
2054 BT_DBG("%s %p", hdev->name, hdev);
2055
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002056 cancel_delayed_work(&hdev->power_off);
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 hci_req_cancel(hdev, ENODEV);
2059 hci_req_lock(hdev);
2060
2061 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002062 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 hci_req_unlock(hdev);
2064 return 0;
2065 }
2066
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002067 /* Flush RX and TX works */
2068 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002069 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002071 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002072 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002073 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002074 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002075 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002076 }
2077
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002078 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002079 cancel_delayed_work(&hdev->service_cache);
2080
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002081 cancel_delayed_work_sync(&hdev->le_scan_disable);
2082
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002083 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002084 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002086 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088 hci_notify(hdev, HCI_DEV_DOWN);
2089
2090 if (hdev->flush)
2091 hdev->flush(hdev);
2092
2093 /* Reset device */
2094 skb_queue_purge(&hdev->cmd_q);
2095 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002096 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002097 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002098 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002100 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 clear_bit(HCI_INIT, &hdev->flags);
2102 }
2103
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002104 /* flush cmd work */
2105 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
2107 /* Drop queues */
2108 skb_queue_purge(&hdev->rx_q);
2109 skb_queue_purge(&hdev->cmd_q);
2110 skb_queue_purge(&hdev->raw_q);
2111
2112 /* Drop last sent command */
2113 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002114 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 kfree_skb(hdev->sent_cmd);
2116 hdev->sent_cmd = NULL;
2117 }
2118
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002119 kfree_skb(hdev->recv_evt);
2120 hdev->recv_evt = NULL;
2121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 /* After this point our queues are empty
2123 * and no tasks are scheduled. */
2124 hdev->close(hdev);
2125
Johan Hedberg35b973c2013-03-15 17:06:59 -05002126 /* Clear flags */
2127 hdev->flags = 0;
2128 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2129
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002130 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2131 if (hdev->dev_type == HCI_BREDR) {
2132 hci_dev_lock(hdev);
2133 mgmt_powered(hdev, 0);
2134 hci_dev_unlock(hdev);
2135 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002136 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002137
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002138 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002139 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002140
Johan Hedberge59fda82012-02-22 18:11:53 +02002141 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002142 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 hci_req_unlock(hdev);
2145
2146 hci_dev_put(hdev);
2147 return 0;
2148}
2149
2150int hci_dev_close(__u16 dev)
2151{
2152 struct hci_dev *hdev;
2153 int err;
2154
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002155 hdev = hci_dev_get(dev);
2156 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002158
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002159 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2160 err = -EBUSY;
2161 goto done;
2162 }
2163
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002164 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2165 cancel_delayed_work(&hdev->power_off);
2166
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002168
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002169done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 hci_dev_put(hdev);
2171 return err;
2172}
2173
2174int hci_dev_reset(__u16 dev)
2175{
2176 struct hci_dev *hdev;
2177 int ret = 0;
2178
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002179 hdev = hci_dev_get(dev);
2180 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 return -ENODEV;
2182
2183 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Marcel Holtmann808a0492013-08-26 20:57:58 -07002185 if (!test_bit(HCI_UP, &hdev->flags)) {
2186 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002190 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2191 ret = -EBUSY;
2192 goto done;
2193 }
2194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 /* Drop queues */
2196 skb_queue_purge(&hdev->rx_q);
2197 skb_queue_purge(&hdev->cmd_q);
2198
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002199 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002200 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002202 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
2204 if (hdev->flush)
2205 hdev->flush(hdev);
2206
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002207 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002208 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002211 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 hci_req_unlock(hdev);
2215 hci_dev_put(hdev);
2216 return ret;
2217}
2218
2219int hci_dev_reset_stat(__u16 dev)
2220{
2221 struct hci_dev *hdev;
2222 int ret = 0;
2223
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002224 hdev = hci_dev_get(dev);
2225 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 return -ENODEV;
2227
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002228 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2229 ret = -EBUSY;
2230 goto done;
2231 }
2232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2234
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002235done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 return ret;
2238}
2239
2240int hci_dev_cmd(unsigned int cmd, void __user *arg)
2241{
2242 struct hci_dev *hdev;
2243 struct hci_dev_req dr;
2244 int err = 0;
2245
2246 if (copy_from_user(&dr, arg, sizeof(dr)))
2247 return -EFAULT;
2248
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002249 hdev = hci_dev_get(dr.dev_id);
2250 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 return -ENODEV;
2252
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002253 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2254 err = -EBUSY;
2255 goto done;
2256 }
2257
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002258 if (hdev->dev_type != HCI_BREDR) {
2259 err = -EOPNOTSUPP;
2260 goto done;
2261 }
2262
Johan Hedberg56f87902013-10-02 13:43:13 +03002263 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2264 err = -EOPNOTSUPP;
2265 goto done;
2266 }
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 switch (cmd) {
2269 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002270 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2271 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 break;
2273
2274 case HCISETENCRYPT:
2275 if (!lmp_encrypt_capable(hdev)) {
2276 err = -EOPNOTSUPP;
2277 break;
2278 }
2279
2280 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2281 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002282 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2283 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 if (err)
2285 break;
2286 }
2287
Johan Hedberg01178cd2013-03-05 20:37:41 +02002288 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2289 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 break;
2291
2292 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002293 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2294 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 break;
2296
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002297 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002298 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2299 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002300 break;
2301
2302 case HCISETLINKMODE:
2303 hdev->link_mode = ((__u16) dr.dev_opt) &
2304 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2305 break;
2306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 case HCISETPTYPE:
2308 hdev->pkt_type = (__u16) dr.dev_opt;
2309 break;
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002312 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2313 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 break;
2315
2316 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002317 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2318 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 break;
2320
2321 default:
2322 err = -EINVAL;
2323 break;
2324 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002325
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002326done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 hci_dev_put(hdev);
2328 return err;
2329}
2330
2331int hci_get_dev_list(void __user *arg)
2332{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002333 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 struct hci_dev_list_req *dl;
2335 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 int n = 0, size, err;
2337 __u16 dev_num;
2338
2339 if (get_user(dev_num, (__u16 __user *) arg))
2340 return -EFAULT;
2341
2342 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2343 return -EINVAL;
2344
2345 size = sizeof(*dl) + dev_num * sizeof(*dr);
2346
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002347 dl = kzalloc(size, GFP_KERNEL);
2348 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 return -ENOMEM;
2350
2351 dr = dl->dev_req;
2352
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002353 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002354 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002355 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002356 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002357
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2359 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 (dr + n)->dev_id = hdev->id;
2362 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if (++n >= dev_num)
2365 break;
2366 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002367 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369 dl->dev_num = n;
2370 size = sizeof(*dl) + n * sizeof(*dr);
2371
2372 err = copy_to_user(arg, dl, size);
2373 kfree(dl);
2374
2375 return err ? -EFAULT : 0;
2376}
2377
2378int hci_get_dev_info(void __user *arg)
2379{
2380 struct hci_dev *hdev;
2381 struct hci_dev_info di;
2382 int err = 0;
2383
2384 if (copy_from_user(&di, arg, sizeof(di)))
2385 return -EFAULT;
2386
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002387 hdev = hci_dev_get(di.dev_id);
2388 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 return -ENODEV;
2390
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002391 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002392 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002393
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002394 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2395 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002396
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 strcpy(di.name, hdev->name);
2398 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002399 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 di.flags = hdev->flags;
2401 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002402 if (lmp_bredr_capable(hdev)) {
2403 di.acl_mtu = hdev->acl_mtu;
2404 di.acl_pkts = hdev->acl_pkts;
2405 di.sco_mtu = hdev->sco_mtu;
2406 di.sco_pkts = hdev->sco_pkts;
2407 } else {
2408 di.acl_mtu = hdev->le_mtu;
2409 di.acl_pkts = hdev->le_pkts;
2410 di.sco_mtu = 0;
2411 di.sco_pkts = 0;
2412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 di.link_policy = hdev->link_policy;
2414 di.link_mode = hdev->link_mode;
2415
2416 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2417 memcpy(&di.features, &hdev->features, sizeof(di.features));
2418
2419 if (copy_to_user(arg, &di, sizeof(di)))
2420 err = -EFAULT;
2421
2422 hci_dev_put(hdev);
2423
2424 return err;
2425}
2426
2427/* ---- Interface to HCI drivers ---- */
2428
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002429static int hci_rfkill_set_block(void *data, bool blocked)
2430{
2431 struct hci_dev *hdev = data;
2432
2433 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2434
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002435 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2436 return -EBUSY;
2437
Johan Hedberg5e130362013-09-13 08:58:17 +03002438 if (blocked) {
2439 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002440 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2441 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002442 } else {
2443 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002444 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002445
2446 return 0;
2447}
2448
2449static const struct rfkill_ops hci_rfkill_ops = {
2450 .set_block = hci_rfkill_set_block,
2451};
2452
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002453static void hci_power_on(struct work_struct *work)
2454{
2455 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002456 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002457
2458 BT_DBG("%s", hdev->name);
2459
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002460 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002461 if (err < 0) {
2462 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002463 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002464 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002465
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002466 /* During the HCI setup phase, a few error conditions are
2467 * ignored and they need to be checked now. If they are still
2468 * valid, it is important to turn the device back off.
2469 */
2470 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2471 (hdev->dev_type == HCI_BREDR &&
2472 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2473 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002474 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2475 hci_dev_do_close(hdev);
2476 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002477 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2478 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002479 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002481 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002482 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002483}
2484
2485static void hci_power_off(struct work_struct *work)
2486{
Johan Hedberg32435532011-11-07 22:16:04 +02002487 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002488 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002489
2490 BT_DBG("%s", hdev->name);
2491
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002492 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002493}
2494
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002495static void hci_discov_off(struct work_struct *work)
2496{
2497 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002498
2499 hdev = container_of(work, struct hci_dev, discov_off.work);
2500
2501 BT_DBG("%s", hdev->name);
2502
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002503 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002504}
2505
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002506int hci_uuids_clear(struct hci_dev *hdev)
2507{
Johan Hedberg48210022013-01-27 00:31:28 +02002508 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002509
Johan Hedberg48210022013-01-27 00:31:28 +02002510 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2511 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002512 kfree(uuid);
2513 }
2514
2515 return 0;
2516}
2517
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002518int hci_link_keys_clear(struct hci_dev *hdev)
2519{
2520 struct list_head *p, *n;
2521
2522 list_for_each_safe(p, n, &hdev->link_keys) {
2523 struct link_key *key;
2524
2525 key = list_entry(p, struct link_key, list);
2526
2527 list_del(p);
2528 kfree(key);
2529 }
2530
2531 return 0;
2532}
2533
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002534int hci_smp_ltks_clear(struct hci_dev *hdev)
2535{
2536 struct smp_ltk *k, *tmp;
2537
2538 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2539 list_del(&k->list);
2540 kfree(k);
2541 }
2542
2543 return 0;
2544}
2545
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002546struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2547{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002548 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002549
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002550 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002551 if (bacmp(bdaddr, &k->bdaddr) == 0)
2552 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002553
2554 return NULL;
2555}
2556
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302557static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002558 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002559{
2560 /* Legacy key */
2561 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302562 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002563
2564 /* Debug keys are insecure so don't store them persistently */
2565 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302566 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002567
2568 /* Changed combination key and there's no previous one */
2569 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302570 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002571
2572 /* Security mode 3 case */
2573 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302574 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002575
2576 /* Neither local nor remote side had no-bonding as requirement */
2577 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302578 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002579
2580 /* Local side had dedicated bonding as requirement */
2581 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302582 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002583
2584 /* Remote side had dedicated bonding as requirement */
2585 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302586 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002587
2588 /* If none of the above criteria match, then don't store the key
2589 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302590 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002591}
2592
Johan Hedberg98a0b842014-01-30 19:40:00 -08002593static bool ltk_type_master(u8 type)
2594{
2595 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2596 return true;
2597
2598 return false;
2599}
2600
2601struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2602 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002603{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002604 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002605
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002606 list_for_each_entry(k, &hdev->long_term_keys, list) {
2607 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002608 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002609 continue;
2610
Johan Hedberg98a0b842014-01-30 19:40:00 -08002611 if (ltk_type_master(k->type) != master)
2612 continue;
2613
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002614 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002615 }
2616
2617 return NULL;
2618}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002619
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002620struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002621 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002623 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002624
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002625 list_for_each_entry(k, &hdev->long_term_keys, list)
2626 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002627 bacmp(bdaddr, &k->bdaddr) == 0 &&
2628 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002629 return k;
2630
2631 return NULL;
2632}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002633
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002634int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002635 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002636{
2637 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302638 u8 old_key_type;
2639 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002640
2641 old_key = hci_find_link_key(hdev, bdaddr);
2642 if (old_key) {
2643 old_key_type = old_key->type;
2644 key = old_key;
2645 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002646 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002647 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2648 if (!key)
2649 return -ENOMEM;
2650 list_add(&key->list, &hdev->link_keys);
2651 }
2652
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002653 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002654
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002655 /* Some buggy controller combinations generate a changed
2656 * combination key for legacy pairing even when there's no
2657 * previous key */
2658 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002659 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002660 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002661 if (conn)
2662 conn->key_type = type;
2663 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002664
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002665 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002666 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002667 key->pin_len = pin_len;
2668
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002669 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002670 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002671 else
2672 key->type = type;
2673
Johan Hedberg4df378a2011-04-28 11:29:03 -07002674 if (!new_key)
2675 return 0;
2676
2677 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2678
Johan Hedberg744cf192011-11-08 20:40:14 +02002679 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002680
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302681 if (conn)
2682 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002683
2684 return 0;
2685}
2686
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002687int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002688 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002689 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002690{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002691 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002692 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002693 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002694
Johan Hedberg98a0b842014-01-30 19:40:00 -08002695 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002696 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002697 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002698 else {
2699 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002700 if (!key)
2701 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002702 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002703 }
2704
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002705 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002706 key->bdaddr_type = addr_type;
2707 memcpy(key->val, tk, sizeof(key->val));
2708 key->authenticated = authenticated;
2709 key->ediv = ediv;
2710 key->enc_size = enc_size;
2711 key->type = type;
2712 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002713
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002714 if (!new_key)
2715 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002716
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002717 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2718 persistent = 0;
2719 else
2720 persistent = 1;
2721
Johan Hedberg21b93b72014-01-30 19:39:58 -08002722 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002723 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002724
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002725 return 0;
2726}
2727
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002728int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2729{
2730 struct link_key *key;
2731
2732 key = hci_find_link_key(hdev, bdaddr);
2733 if (!key)
2734 return -ENOENT;
2735
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002736 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002737
2738 list_del(&key->list);
2739 kfree(key);
2740
2741 return 0;
2742}
2743
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002744int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2745{
2746 struct smp_ltk *k, *tmp;
2747
2748 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2749 if (bacmp(bdaddr, &k->bdaddr))
2750 continue;
2751
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002752 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002753
2754 list_del(&k->list);
2755 kfree(k);
2756 }
2757
2758 return 0;
2759}
2760
Ville Tervo6bd32322011-02-16 16:32:41 +02002761/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002762static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002763{
2764 struct hci_dev *hdev = (void *) arg;
2765
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002766 if (hdev->sent_cmd) {
2767 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2768 u16 opcode = __le16_to_cpu(sent->opcode);
2769
2770 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2771 } else {
2772 BT_ERR("%s command tx timeout", hdev->name);
2773 }
2774
Ville Tervo6bd32322011-02-16 16:32:41 +02002775 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002776 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002777}
2778
Szymon Janc2763eda2011-03-22 13:12:22 +01002779struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002780 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002781{
2782 struct oob_data *data;
2783
2784 list_for_each_entry(data, &hdev->remote_oob_data, list)
2785 if (bacmp(bdaddr, &data->bdaddr) == 0)
2786 return data;
2787
2788 return NULL;
2789}
2790
2791int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2792{
2793 struct oob_data *data;
2794
2795 data = hci_find_remote_oob_data(hdev, bdaddr);
2796 if (!data)
2797 return -ENOENT;
2798
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002799 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002800
2801 list_del(&data->list);
2802 kfree(data);
2803
2804 return 0;
2805}
2806
2807int hci_remote_oob_data_clear(struct hci_dev *hdev)
2808{
2809 struct oob_data *data, *n;
2810
2811 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2812 list_del(&data->list);
2813 kfree(data);
2814 }
2815
2816 return 0;
2817}
2818
Marcel Holtmann07988722014-01-10 02:07:29 -08002819int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2820 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002821{
2822 struct oob_data *data;
2823
2824 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002825 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002826 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002827 if (!data)
2828 return -ENOMEM;
2829
2830 bacpy(&data->bdaddr, bdaddr);
2831 list_add(&data->list, &hdev->remote_oob_data);
2832 }
2833
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002834 memcpy(data->hash192, hash, sizeof(data->hash192));
2835 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002836
Marcel Holtmann07988722014-01-10 02:07:29 -08002837 memset(data->hash256, 0, sizeof(data->hash256));
2838 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2839
2840 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2841
2842 return 0;
2843}
2844
2845int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2846 u8 *hash192, u8 *randomizer192,
2847 u8 *hash256, u8 *randomizer256)
2848{
2849 struct oob_data *data;
2850
2851 data = hci_find_remote_oob_data(hdev, bdaddr);
2852 if (!data) {
2853 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2854 if (!data)
2855 return -ENOMEM;
2856
2857 bacpy(&data->bdaddr, bdaddr);
2858 list_add(&data->list, &hdev->remote_oob_data);
2859 }
2860
2861 memcpy(data->hash192, hash192, sizeof(data->hash192));
2862 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2863
2864 memcpy(data->hash256, hash256, sizeof(data->hash256));
2865 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2866
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002867 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002868
2869 return 0;
2870}
2871
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002872struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2873 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002874{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002875 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002876
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002877 list_for_each_entry(b, &hdev->blacklist, list) {
2878 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002879 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002880 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002881
2882 return NULL;
2883}
2884
2885int hci_blacklist_clear(struct hci_dev *hdev)
2886{
2887 struct list_head *p, *n;
2888
2889 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002890 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002891
2892 list_del(p);
2893 kfree(b);
2894 }
2895
2896 return 0;
2897}
2898
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002899int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002900{
2901 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002902
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002903 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002904 return -EBADF;
2905
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002906 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002907 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002908
2909 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002910 if (!entry)
2911 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002912
2913 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002914 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002915
2916 list_add(&entry->list, &hdev->blacklist);
2917
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002918 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002919}
2920
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002921int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002922{
2923 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002924
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002925 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002926 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002927
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002928 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002929 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002930 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002931
2932 list_del(&entry->list);
2933 kfree(entry);
2934
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002935 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002936}
2937
Andre Guedes15819a72014-02-03 13:56:18 -03002938/* This function requires the caller holds hdev->lock */
2939struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2940 bdaddr_t *addr, u8 addr_type)
2941{
2942 struct hci_conn_params *params;
2943
2944 list_for_each_entry(params, &hdev->le_conn_params, list) {
2945 if (bacmp(&params->addr, addr) == 0 &&
2946 params->addr_type == addr_type) {
2947 return params;
2948 }
2949 }
2950
2951 return NULL;
2952}
2953
2954/* This function requires the caller holds hdev->lock */
2955void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
2956 u16 conn_min_interval, u16 conn_max_interval)
2957{
2958 struct hci_conn_params *params;
2959
2960 params = hci_conn_params_lookup(hdev, addr, addr_type);
2961 if (params) {
2962 params->conn_min_interval = conn_min_interval;
2963 params->conn_max_interval = conn_max_interval;
2964 return;
2965 }
2966
2967 params = kzalloc(sizeof(*params), GFP_KERNEL);
2968 if (!params) {
2969 BT_ERR("Out of memory");
2970 return;
2971 }
2972
2973 bacpy(&params->addr, addr);
2974 params->addr_type = addr_type;
2975 params->conn_min_interval = conn_min_interval;
2976 params->conn_max_interval = conn_max_interval;
2977
2978 list_add(&params->list, &hdev->le_conn_params);
2979
2980 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
2981 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
2982 conn_max_interval);
2983}
2984
2985/* This function requires the caller holds hdev->lock */
2986void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2987{
2988 struct hci_conn_params *params;
2989
2990 params = hci_conn_params_lookup(hdev, addr, addr_type);
2991 if (!params)
2992 return;
2993
2994 list_del(&params->list);
2995 kfree(params);
2996
2997 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2998}
2999
3000/* This function requires the caller holds hdev->lock */
3001void hci_conn_params_clear(struct hci_dev *hdev)
3002{
3003 struct hci_conn_params *params, *tmp;
3004
3005 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3006 list_del(&params->list);
3007 kfree(params);
3008 }
3009
3010 BT_DBG("All LE connection parameters were removed");
3011}
3012
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003013static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003014{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003015 if (status) {
3016 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003017
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003018 hci_dev_lock(hdev);
3019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3020 hci_dev_unlock(hdev);
3021 return;
3022 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003023}
3024
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003025static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003026{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003027 /* General inquiry access code (GIAC) */
3028 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3029 struct hci_request req;
3030 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003031 int err;
3032
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003033 if (status) {
3034 BT_ERR("Failed to disable LE scanning: status %d", status);
3035 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003036 }
3037
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003038 switch (hdev->discovery.type) {
3039 case DISCOV_TYPE_LE:
3040 hci_dev_lock(hdev);
3041 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3042 hci_dev_unlock(hdev);
3043 break;
3044
3045 case DISCOV_TYPE_INTERLEAVED:
3046 hci_req_init(&req, hdev);
3047
3048 memset(&cp, 0, sizeof(cp));
3049 memcpy(&cp.lap, lap, sizeof(cp.lap));
3050 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3051 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3052
3053 hci_dev_lock(hdev);
3054
3055 hci_inquiry_cache_flush(hdev);
3056
3057 err = hci_req_run(&req, inquiry_complete);
3058 if (err) {
3059 BT_ERR("Inquiry request failed: err %d", err);
3060 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3061 }
3062
3063 hci_dev_unlock(hdev);
3064 break;
3065 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003066}
3067
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003068static void le_scan_disable_work(struct work_struct *work)
3069{
3070 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003071 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003072 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003073 struct hci_request req;
3074 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003075
3076 BT_DBG("%s", hdev->name);
3077
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003078 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003079
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003080 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003081 cp.enable = LE_SCAN_DISABLE;
3082 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003083
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003084 err = hci_req_run(&req, le_scan_disable_work_complete);
3085 if (err)
3086 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003087}
3088
David Herrmann9be0dab2012-04-22 14:39:57 +02003089/* Alloc HCI device */
3090struct hci_dev *hci_alloc_dev(void)
3091{
3092 struct hci_dev *hdev;
3093
3094 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3095 if (!hdev)
3096 return NULL;
3097
David Herrmannb1b813d2012-04-22 14:39:58 +02003098 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3099 hdev->esco_type = (ESCO_HV1);
3100 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003101 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3102 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003103 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3104 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003105
David Herrmannb1b813d2012-04-22 14:39:58 +02003106 hdev->sniff_max_interval = 800;
3107 hdev->sniff_min_interval = 80;
3108
Marcel Holtmannbef64732013-10-11 08:23:19 -07003109 hdev->le_scan_interval = 0x0060;
3110 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003111 hdev->le_conn_min_interval = 0x0028;
3112 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003113
David Herrmannb1b813d2012-04-22 14:39:58 +02003114 mutex_init(&hdev->lock);
3115 mutex_init(&hdev->req_lock);
3116
3117 INIT_LIST_HEAD(&hdev->mgmt_pending);
3118 INIT_LIST_HEAD(&hdev->blacklist);
3119 INIT_LIST_HEAD(&hdev->uuids);
3120 INIT_LIST_HEAD(&hdev->link_keys);
3121 INIT_LIST_HEAD(&hdev->long_term_keys);
3122 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003123 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003124 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003125
3126 INIT_WORK(&hdev->rx_work, hci_rx_work);
3127 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3128 INIT_WORK(&hdev->tx_work, hci_tx_work);
3129 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003130
David Herrmannb1b813d2012-04-22 14:39:58 +02003131 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3132 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3133 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3134
David Herrmannb1b813d2012-04-22 14:39:58 +02003135 skb_queue_head_init(&hdev->rx_q);
3136 skb_queue_head_init(&hdev->cmd_q);
3137 skb_queue_head_init(&hdev->raw_q);
3138
3139 init_waitqueue_head(&hdev->req_wait_q);
3140
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003141 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003142
David Herrmannb1b813d2012-04-22 14:39:58 +02003143 hci_init_sysfs(hdev);
3144 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003145
3146 return hdev;
3147}
3148EXPORT_SYMBOL(hci_alloc_dev);
3149
3150/* Free HCI device */
3151void hci_free_dev(struct hci_dev *hdev)
3152{
David Herrmann9be0dab2012-04-22 14:39:57 +02003153 /* will free via device release */
3154 put_device(&hdev->dev);
3155}
3156EXPORT_SYMBOL(hci_free_dev);
3157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158/* Register HCI device */
3159int hci_register_dev(struct hci_dev *hdev)
3160{
David Herrmannb1b813d2012-04-22 14:39:58 +02003161 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
David Herrmann010666a2012-01-07 15:47:07 +01003163 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 return -EINVAL;
3165
Mat Martineau08add512011-11-02 16:18:36 -07003166 /* Do not allow HCI_AMP devices to register at index 0,
3167 * so the index can be used as the AMP controller ID.
3168 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003169 switch (hdev->dev_type) {
3170 case HCI_BREDR:
3171 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3172 break;
3173 case HCI_AMP:
3174 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3175 break;
3176 default:
3177 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003179
Sasha Levin3df92b32012-05-27 22:36:56 +02003180 if (id < 0)
3181 return id;
3182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 sprintf(hdev->name, "hci%d", id);
3184 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003185
3186 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3187
Kees Cookd8537542013-07-03 15:04:57 -07003188 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3189 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003190 if (!hdev->workqueue) {
3191 error = -ENOMEM;
3192 goto err;
3193 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003194
Kees Cookd8537542013-07-03 15:04:57 -07003195 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3196 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003197 if (!hdev->req_workqueue) {
3198 destroy_workqueue(hdev->workqueue);
3199 error = -ENOMEM;
3200 goto err;
3201 }
3202
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003203 if (!IS_ERR_OR_NULL(bt_debugfs))
3204 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3205
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003206 dev_set_name(&hdev->dev, "%s", hdev->name);
3207
3208 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003209 if (error < 0)
3210 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003212 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003213 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3214 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003215 if (hdev->rfkill) {
3216 if (rfkill_register(hdev->rfkill) < 0) {
3217 rfkill_destroy(hdev->rfkill);
3218 hdev->rfkill = NULL;
3219 }
3220 }
3221
Johan Hedberg5e130362013-09-13 08:58:17 +03003222 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3223 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3224
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003225 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003226 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003227
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003228 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003229 /* Assume BR/EDR support until proven otherwise (such as
3230 * through reading supported features during init.
3231 */
3232 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3233 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003234
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003235 write_lock(&hci_dev_list_lock);
3236 list_add(&hdev->list, &hci_dev_list);
3237 write_unlock(&hci_dev_list_lock);
3238
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003240 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
Johan Hedberg19202572013-01-14 22:33:51 +02003242 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003243
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003245
David Herrmann33ca9542011-10-08 14:58:49 +02003246err_wqueue:
3247 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003248 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003249err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003250 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003251
David Herrmann33ca9542011-10-08 14:58:49 +02003252 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253}
3254EXPORT_SYMBOL(hci_register_dev);
3255
3256/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003257void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258{
Sasha Levin3df92b32012-05-27 22:36:56 +02003259 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003260
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003261 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262
Johan Hovold94324962012-03-15 14:48:41 +01003263 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3264
Sasha Levin3df92b32012-05-27 22:36:56 +02003265 id = hdev->id;
3266
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003267 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003269 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270
3271 hci_dev_do_close(hdev);
3272
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303273 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003274 kfree_skb(hdev->reassembly[i]);
3275
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003276 cancel_work_sync(&hdev->power_on);
3277
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003278 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003279 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003280 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003281 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003282 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003283 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003284
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003285 /* mgmt_index_removed should take care of emptying the
3286 * pending list */
3287 BUG_ON(!list_empty(&hdev->mgmt_pending));
3288
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 hci_notify(hdev, HCI_DEV_UNREG);
3290
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003291 if (hdev->rfkill) {
3292 rfkill_unregister(hdev->rfkill);
3293 rfkill_destroy(hdev->rfkill);
3294 }
3295
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003296 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003297
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003298 debugfs_remove_recursive(hdev->debugfs);
3299
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003300 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003301 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003302
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003303 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003304 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003305 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003306 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003307 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003308 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003309 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003310 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003311
David Herrmanndc946bd2012-01-07 15:47:24 +01003312 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003313
3314 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315}
3316EXPORT_SYMBOL(hci_unregister_dev);
3317
3318/* Suspend HCI device */
3319int hci_suspend_dev(struct hci_dev *hdev)
3320{
3321 hci_notify(hdev, HCI_DEV_SUSPEND);
3322 return 0;
3323}
3324EXPORT_SYMBOL(hci_suspend_dev);
3325
3326/* Resume HCI device */
3327int hci_resume_dev(struct hci_dev *hdev)
3328{
3329 hci_notify(hdev, HCI_DEV_RESUME);
3330 return 0;
3331}
3332EXPORT_SYMBOL(hci_resume_dev);
3333
Marcel Holtmann76bca882009-11-18 00:40:39 +01003334/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003335int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003336{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003337 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003338 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003339 kfree_skb(skb);
3340 return -ENXIO;
3341 }
3342
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003343 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003344 bt_cb(skb)->incoming = 1;
3345
3346 /* Time stamp */
3347 __net_timestamp(skb);
3348
Marcel Holtmann76bca882009-11-18 00:40:39 +01003349 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003350 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003351
Marcel Holtmann76bca882009-11-18 00:40:39 +01003352 return 0;
3353}
3354EXPORT_SYMBOL(hci_recv_frame);
3355
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303356static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003357 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303358{
3359 int len = 0;
3360 int hlen = 0;
3361 int remain = count;
3362 struct sk_buff *skb;
3363 struct bt_skb_cb *scb;
3364
3365 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003366 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303367 return -EILSEQ;
3368
3369 skb = hdev->reassembly[index];
3370
3371 if (!skb) {
3372 switch (type) {
3373 case HCI_ACLDATA_PKT:
3374 len = HCI_MAX_FRAME_SIZE;
3375 hlen = HCI_ACL_HDR_SIZE;
3376 break;
3377 case HCI_EVENT_PKT:
3378 len = HCI_MAX_EVENT_SIZE;
3379 hlen = HCI_EVENT_HDR_SIZE;
3380 break;
3381 case HCI_SCODATA_PKT:
3382 len = HCI_MAX_SCO_SIZE;
3383 hlen = HCI_SCO_HDR_SIZE;
3384 break;
3385 }
3386
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003387 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303388 if (!skb)
3389 return -ENOMEM;
3390
3391 scb = (void *) skb->cb;
3392 scb->expect = hlen;
3393 scb->pkt_type = type;
3394
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303395 hdev->reassembly[index] = skb;
3396 }
3397
3398 while (count) {
3399 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003400 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303401
3402 memcpy(skb_put(skb, len), data, len);
3403
3404 count -= len;
3405 data += len;
3406 scb->expect -= len;
3407 remain = count;
3408
3409 switch (type) {
3410 case HCI_EVENT_PKT:
3411 if (skb->len == HCI_EVENT_HDR_SIZE) {
3412 struct hci_event_hdr *h = hci_event_hdr(skb);
3413 scb->expect = h->plen;
3414
3415 if (skb_tailroom(skb) < scb->expect) {
3416 kfree_skb(skb);
3417 hdev->reassembly[index] = NULL;
3418 return -ENOMEM;
3419 }
3420 }
3421 break;
3422
3423 case HCI_ACLDATA_PKT:
3424 if (skb->len == HCI_ACL_HDR_SIZE) {
3425 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3426 scb->expect = __le16_to_cpu(h->dlen);
3427
3428 if (skb_tailroom(skb) < scb->expect) {
3429 kfree_skb(skb);
3430 hdev->reassembly[index] = NULL;
3431 return -ENOMEM;
3432 }
3433 }
3434 break;
3435
3436 case HCI_SCODATA_PKT:
3437 if (skb->len == HCI_SCO_HDR_SIZE) {
3438 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3439 scb->expect = h->dlen;
3440
3441 if (skb_tailroom(skb) < scb->expect) {
3442 kfree_skb(skb);
3443 hdev->reassembly[index] = NULL;
3444 return -ENOMEM;
3445 }
3446 }
3447 break;
3448 }
3449
3450 if (scb->expect == 0) {
3451 /* Complete frame */
3452
3453 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003454 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303455
3456 hdev->reassembly[index] = NULL;
3457 return remain;
3458 }
3459 }
3460
3461 return remain;
3462}
3463
Marcel Holtmannef222012007-07-11 06:42:04 +02003464int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3465{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303466 int rem = 0;
3467
Marcel Holtmannef222012007-07-11 06:42:04 +02003468 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3469 return -EILSEQ;
3470
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003471 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003472 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303473 if (rem < 0)
3474 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003475
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303476 data += (count - rem);
3477 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003478 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003479
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303480 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003481}
3482EXPORT_SYMBOL(hci_recv_fragment);
3483
Suraj Sumangala99811512010-07-14 13:02:19 +05303484#define STREAM_REASSEMBLY 0
3485
3486int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3487{
3488 int type;
3489 int rem = 0;
3490
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003491 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303492 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3493
3494 if (!skb) {
3495 struct { char type; } *pkt;
3496
3497 /* Start of the frame */
3498 pkt = data;
3499 type = pkt->type;
3500
3501 data++;
3502 count--;
3503 } else
3504 type = bt_cb(skb)->pkt_type;
3505
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003506 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003507 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303508 if (rem < 0)
3509 return rem;
3510
3511 data += (count - rem);
3512 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003513 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303514
3515 return rem;
3516}
3517EXPORT_SYMBOL(hci_recv_stream_fragment);
3518
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519/* ---- Interface to upper protocols ---- */
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521int hci_register_cb(struct hci_cb *cb)
3522{
3523 BT_DBG("%p name %s", cb, cb->name);
3524
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003525 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003527 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528
3529 return 0;
3530}
3531EXPORT_SYMBOL(hci_register_cb);
3532
3533int hci_unregister_cb(struct hci_cb *cb)
3534{
3535 BT_DBG("%p name %s", cb, cb->name);
3536
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003537 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003539 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
3541 return 0;
3542}
3543EXPORT_SYMBOL(hci_unregister_cb);
3544
Marcel Holtmann51086992013-10-10 14:54:19 -07003545static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003547 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003549 /* Time stamp */
3550 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003552 /* Send copy to monitor */
3553 hci_send_to_monitor(hdev, skb);
3554
3555 if (atomic_read(&hdev->promisc)) {
3556 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003557 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 }
3559
3560 /* Get rid of skb owner, prior to sending to the driver. */
3561 skb_orphan(skb);
3562
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003563 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003564 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565}
3566
Johan Hedberg3119ae92013-03-05 20:37:44 +02003567void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3568{
3569 skb_queue_head_init(&req->cmd_q);
3570 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003571 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003572}
3573
3574int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3575{
3576 struct hci_dev *hdev = req->hdev;
3577 struct sk_buff *skb;
3578 unsigned long flags;
3579
3580 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3581
Andre Guedes5d73e032013-03-08 11:20:16 -03003582 /* If an error occured during request building, remove all HCI
3583 * commands queued on the HCI request queue.
3584 */
3585 if (req->err) {
3586 skb_queue_purge(&req->cmd_q);
3587 return req->err;
3588 }
3589
Johan Hedberg3119ae92013-03-05 20:37:44 +02003590 /* Do not allow empty requests */
3591 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003592 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003593
3594 skb = skb_peek_tail(&req->cmd_q);
3595 bt_cb(skb)->req.complete = complete;
3596
3597 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3598 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3599 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3600
3601 queue_work(hdev->workqueue, &hdev->cmd_work);
3602
3603 return 0;
3604}
3605
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003606static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003607 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608{
3609 int len = HCI_COMMAND_HDR_SIZE + plen;
3610 struct hci_command_hdr *hdr;
3611 struct sk_buff *skb;
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003614 if (!skb)
3615 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
3617 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003618 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 hdr->plen = plen;
3620
3621 if (plen)
3622 memcpy(skb_put(skb, plen), param, plen);
3623
3624 BT_DBG("skb len %d", skb->len);
3625
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003626 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003627
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003628 return skb;
3629}
3630
3631/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003632int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3633 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003634{
3635 struct sk_buff *skb;
3636
3637 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3638
3639 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3640 if (!skb) {
3641 BT_ERR("%s no memory for command", hdev->name);
3642 return -ENOMEM;
3643 }
3644
Johan Hedberg11714b32013-03-05 20:37:47 +02003645 /* Stand-alone HCI commands must be flaged as
3646 * single-command requests.
3647 */
3648 bt_cb(skb)->req.start = true;
3649
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003651 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652
3653 return 0;
3654}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655
Johan Hedberg71c76a12013-03-05 20:37:46 +02003656/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003657void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3658 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003659{
3660 struct hci_dev *hdev = req->hdev;
3661 struct sk_buff *skb;
3662
3663 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3664
Andre Guedes34739c12013-03-08 11:20:18 -03003665 /* If an error occured during request building, there is no point in
3666 * queueing the HCI command. We can simply return.
3667 */
3668 if (req->err)
3669 return;
3670
Johan Hedberg71c76a12013-03-05 20:37:46 +02003671 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3672 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003673 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3674 hdev->name, opcode);
3675 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003676 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003677 }
3678
3679 if (skb_queue_empty(&req->cmd_q))
3680 bt_cb(skb)->req.start = true;
3681
Johan Hedberg02350a72013-04-03 21:50:29 +03003682 bt_cb(skb)->req.event = event;
3683
Johan Hedberg71c76a12013-03-05 20:37:46 +02003684 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003685}
3686
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003687void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3688 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003689{
3690 hci_req_add_ev(req, opcode, plen, param, 0);
3691}
3692
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003694void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695{
3696 struct hci_command_hdr *hdr;
3697
3698 if (!hdev->sent_cmd)
3699 return NULL;
3700
3701 hdr = (void *) hdev->sent_cmd->data;
3702
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003703 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 return NULL;
3705
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003706 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
3708 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3709}
3710
3711/* Send ACL data */
3712static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3713{
3714 struct hci_acl_hdr *hdr;
3715 int len = skb->len;
3716
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003717 skb_push(skb, HCI_ACL_HDR_SIZE);
3718 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003719 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003720 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3721 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722}
3723
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003724static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003725 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003727 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 struct hci_dev *hdev = conn->hdev;
3729 struct sk_buff *list;
3730
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003731 skb->len = skb_headlen(skb);
3732 skb->data_len = 0;
3733
3734 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003735
3736 switch (hdev->dev_type) {
3737 case HCI_BREDR:
3738 hci_add_acl_hdr(skb, conn->handle, flags);
3739 break;
3740 case HCI_AMP:
3741 hci_add_acl_hdr(skb, chan->handle, flags);
3742 break;
3743 default:
3744 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3745 return;
3746 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003747
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003748 list = skb_shinfo(skb)->frag_list;
3749 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 /* Non fragmented */
3751 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3752
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003753 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754 } else {
3755 /* Fragmented */
3756 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3757
3758 skb_shinfo(skb)->frag_list = NULL;
3759
3760 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003761 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003763 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003764
3765 flags &= ~ACL_START;
3766 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 do {
3768 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003769
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003770 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003771 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772
3773 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3774
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003775 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 } while (list);
3777
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003778 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003780}
3781
3782void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3783{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003784 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003785
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003786 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003787
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003788 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003790 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
3793/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003794void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795{
3796 struct hci_dev *hdev = conn->hdev;
3797 struct hci_sco_hdr hdr;
3798
3799 BT_DBG("%s len %d", hdev->name, skb->len);
3800
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003801 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 hdr.dlen = skb->len;
3803
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003804 skb_push(skb, HCI_SCO_HDR_SIZE);
3805 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003806 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003808 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003809
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003811 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813
3814/* ---- HCI TX task (outgoing data) ---- */
3815
3816/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003817static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3818 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819{
3820 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003821 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003822 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003824 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003826
3827 rcu_read_lock();
3828
3829 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003830 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003832
3833 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3834 continue;
3835
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 num++;
3837
3838 if (c->sent < min) {
3839 min = c->sent;
3840 conn = c;
3841 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003842
3843 if (hci_conn_num(hdev, type) == num)
3844 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 }
3846
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003847 rcu_read_unlock();
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003850 int cnt, q;
3851
3852 switch (conn->type) {
3853 case ACL_LINK:
3854 cnt = hdev->acl_cnt;
3855 break;
3856 case SCO_LINK:
3857 case ESCO_LINK:
3858 cnt = hdev->sco_cnt;
3859 break;
3860 case LE_LINK:
3861 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3862 break;
3863 default:
3864 cnt = 0;
3865 BT_ERR("Unknown link type");
3866 }
3867
3868 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 *quote = q ? q : 1;
3870 } else
3871 *quote = 0;
3872
3873 BT_DBG("conn %p quote %d", conn, *quote);
3874 return conn;
3875}
3876
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003877static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878{
3879 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003880 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881
Ville Tervobae1f5d92011-02-10 22:38:53 -03003882 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003884 rcu_read_lock();
3885
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003887 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003888 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003889 BT_ERR("%s killing stalled connection %pMR",
3890 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003891 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 }
3893 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003894
3895 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896}
3897
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003898static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3899 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003900{
3901 struct hci_conn_hash *h = &hdev->conn_hash;
3902 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003903 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003904 struct hci_conn *conn;
3905 int cnt, q, conn_num = 0;
3906
3907 BT_DBG("%s", hdev->name);
3908
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003909 rcu_read_lock();
3910
3911 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003912 struct hci_chan *tmp;
3913
3914 if (conn->type != type)
3915 continue;
3916
3917 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3918 continue;
3919
3920 conn_num++;
3921
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003922 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003923 struct sk_buff *skb;
3924
3925 if (skb_queue_empty(&tmp->data_q))
3926 continue;
3927
3928 skb = skb_peek(&tmp->data_q);
3929 if (skb->priority < cur_prio)
3930 continue;
3931
3932 if (skb->priority > cur_prio) {
3933 num = 0;
3934 min = ~0;
3935 cur_prio = skb->priority;
3936 }
3937
3938 num++;
3939
3940 if (conn->sent < min) {
3941 min = conn->sent;
3942 chan = tmp;
3943 }
3944 }
3945
3946 if (hci_conn_num(hdev, type) == conn_num)
3947 break;
3948 }
3949
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003950 rcu_read_unlock();
3951
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003952 if (!chan)
3953 return NULL;
3954
3955 switch (chan->conn->type) {
3956 case ACL_LINK:
3957 cnt = hdev->acl_cnt;
3958 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003959 case AMP_LINK:
3960 cnt = hdev->block_cnt;
3961 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003962 case SCO_LINK:
3963 case ESCO_LINK:
3964 cnt = hdev->sco_cnt;
3965 break;
3966 case LE_LINK:
3967 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3968 break;
3969 default:
3970 cnt = 0;
3971 BT_ERR("Unknown link type");
3972 }
3973
3974 q = cnt / num;
3975 *quote = q ? q : 1;
3976 BT_DBG("chan %p quote %d", chan, *quote);
3977 return chan;
3978}
3979
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003980static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3981{
3982 struct hci_conn_hash *h = &hdev->conn_hash;
3983 struct hci_conn *conn;
3984 int num = 0;
3985
3986 BT_DBG("%s", hdev->name);
3987
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003988 rcu_read_lock();
3989
3990 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003991 struct hci_chan *chan;
3992
3993 if (conn->type != type)
3994 continue;
3995
3996 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3997 continue;
3998
3999 num++;
4000
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004001 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004002 struct sk_buff *skb;
4003
4004 if (chan->sent) {
4005 chan->sent = 0;
4006 continue;
4007 }
4008
4009 if (skb_queue_empty(&chan->data_q))
4010 continue;
4011
4012 skb = skb_peek(&chan->data_q);
4013 if (skb->priority >= HCI_PRIO_MAX - 1)
4014 continue;
4015
4016 skb->priority = HCI_PRIO_MAX - 1;
4017
4018 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004019 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004020 }
4021
4022 if (hci_conn_num(hdev, type) == num)
4023 break;
4024 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004025
4026 rcu_read_unlock();
4027
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004028}
4029
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004030static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4031{
4032 /* Calculate count of blocks used by this packet */
4033 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4034}
4035
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004036static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 if (!test_bit(HCI_RAW, &hdev->flags)) {
4039 /* ACL tx timeout must be longer than maximum
4040 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004041 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004042 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004043 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004045}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004047static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004048{
4049 unsigned int cnt = hdev->acl_cnt;
4050 struct hci_chan *chan;
4051 struct sk_buff *skb;
4052 int quote;
4053
4054 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004055
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004056 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004057 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004058 u32 priority = (skb_peek(&chan->data_q))->priority;
4059 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004060 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004061 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004062
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004063 /* Stop if priority has changed */
4064 if (skb->priority < priority)
4065 break;
4066
4067 skb = skb_dequeue(&chan->data_q);
4068
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004069 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004070 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004071
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004072 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 hdev->acl_last_tx = jiffies;
4074
4075 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004076 chan->sent++;
4077 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 }
4079 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004080
4081 if (cnt != hdev->acl_cnt)
4082 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083}
4084
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004085static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004086{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004087 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004088 struct hci_chan *chan;
4089 struct sk_buff *skb;
4090 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004091 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004092
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004093 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004094
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004095 BT_DBG("%s", hdev->name);
4096
4097 if (hdev->dev_type == HCI_AMP)
4098 type = AMP_LINK;
4099 else
4100 type = ACL_LINK;
4101
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004102 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004103 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004104 u32 priority = (skb_peek(&chan->data_q))->priority;
4105 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4106 int blocks;
4107
4108 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004109 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004110
4111 /* Stop if priority has changed */
4112 if (skb->priority < priority)
4113 break;
4114
4115 skb = skb_dequeue(&chan->data_q);
4116
4117 blocks = __get_blocks(hdev, skb);
4118 if (blocks > hdev->block_cnt)
4119 return;
4120
4121 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004122 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004123
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004124 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004125 hdev->acl_last_tx = jiffies;
4126
4127 hdev->block_cnt -= blocks;
4128 quote -= blocks;
4129
4130 chan->sent += blocks;
4131 chan->conn->sent += blocks;
4132 }
4133 }
4134
4135 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004136 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004137}
4138
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004139static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004140{
4141 BT_DBG("%s", hdev->name);
4142
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004143 /* No ACL link over BR/EDR controller */
4144 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4145 return;
4146
4147 /* No AMP link over AMP controller */
4148 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004149 return;
4150
4151 switch (hdev->flow_ctl_mode) {
4152 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4153 hci_sched_acl_pkt(hdev);
4154 break;
4155
4156 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4157 hci_sched_acl_blk(hdev);
4158 break;
4159 }
4160}
4161
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004163static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164{
4165 struct hci_conn *conn;
4166 struct sk_buff *skb;
4167 int quote;
4168
4169 BT_DBG("%s", hdev->name);
4170
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004171 if (!hci_conn_num(hdev, SCO_LINK))
4172 return;
4173
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4175 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4176 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004177 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
4179 conn->sent++;
4180 if (conn->sent == ~0)
4181 conn->sent = 0;
4182 }
4183 }
4184}
4185
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004186static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004187{
4188 struct hci_conn *conn;
4189 struct sk_buff *skb;
4190 int quote;
4191
4192 BT_DBG("%s", hdev->name);
4193
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004194 if (!hci_conn_num(hdev, ESCO_LINK))
4195 return;
4196
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004197 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4198 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004199 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4200 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004201 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004202
4203 conn->sent++;
4204 if (conn->sent == ~0)
4205 conn->sent = 0;
4206 }
4207 }
4208}
4209
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004210static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004211{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004212 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004213 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004214 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004215
4216 BT_DBG("%s", hdev->name);
4217
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004218 if (!hci_conn_num(hdev, LE_LINK))
4219 return;
4220
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004221 if (!test_bit(HCI_RAW, &hdev->flags)) {
4222 /* LE tx timeout must be longer than maximum
4223 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004224 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004225 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004226 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004227 }
4228
4229 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004230 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004231 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004232 u32 priority = (skb_peek(&chan->data_q))->priority;
4233 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004234 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004235 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004236
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004237 /* Stop if priority has changed */
4238 if (skb->priority < priority)
4239 break;
4240
4241 skb = skb_dequeue(&chan->data_q);
4242
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004243 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004244 hdev->le_last_tx = jiffies;
4245
4246 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004247 chan->sent++;
4248 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004249 }
4250 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004251
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004252 if (hdev->le_pkts)
4253 hdev->le_cnt = cnt;
4254 else
4255 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004256
4257 if (cnt != tmp)
4258 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004259}
4260
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004261static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004263 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 struct sk_buff *skb;
4265
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004266 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004267 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268
Marcel Holtmann52de5992013-09-03 18:08:38 -07004269 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4270 /* Schedule queues and send stuff to HCI driver */
4271 hci_sched_acl(hdev);
4272 hci_sched_sco(hdev);
4273 hci_sched_esco(hdev);
4274 hci_sched_le(hdev);
4275 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004276
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 /* Send next queued raw (unknown type) packet */
4278 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004279 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280}
4281
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004282/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283
4284/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004285static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286{
4287 struct hci_acl_hdr *hdr = (void *) skb->data;
4288 struct hci_conn *conn;
4289 __u16 handle, flags;
4290
4291 skb_pull(skb, HCI_ACL_HDR_SIZE);
4292
4293 handle = __le16_to_cpu(hdr->handle);
4294 flags = hci_flags(handle);
4295 handle = hci_handle(handle);
4296
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004297 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004298 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299
4300 hdev->stat.acl_rx++;
4301
4302 hci_dev_lock(hdev);
4303 conn = hci_conn_hash_lookup_handle(hdev, handle);
4304 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004305
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004307 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004308
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004310 l2cap_recv_acldata(conn, skb, flags);
4311 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004313 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004314 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 }
4316
4317 kfree_skb(skb);
4318}
4319
4320/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004321static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322{
4323 struct hci_sco_hdr *hdr = (void *) skb->data;
4324 struct hci_conn *conn;
4325 __u16 handle;
4326
4327 skb_pull(skb, HCI_SCO_HDR_SIZE);
4328
4329 handle = __le16_to_cpu(hdr->handle);
4330
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004331 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332
4333 hdev->stat.sco_rx++;
4334
4335 hci_dev_lock(hdev);
4336 conn = hci_conn_hash_lookup_handle(hdev, handle);
4337 hci_dev_unlock(hdev);
4338
4339 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004341 sco_recv_scodata(conn, skb);
4342 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004344 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004345 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 }
4347
4348 kfree_skb(skb);
4349}
4350
Johan Hedberg9238f362013-03-05 20:37:48 +02004351static bool hci_req_is_complete(struct hci_dev *hdev)
4352{
4353 struct sk_buff *skb;
4354
4355 skb = skb_peek(&hdev->cmd_q);
4356 if (!skb)
4357 return true;
4358
4359 return bt_cb(skb)->req.start;
4360}
4361
Johan Hedberg42c6b122013-03-05 20:37:49 +02004362static void hci_resend_last(struct hci_dev *hdev)
4363{
4364 struct hci_command_hdr *sent;
4365 struct sk_buff *skb;
4366 u16 opcode;
4367
4368 if (!hdev->sent_cmd)
4369 return;
4370
4371 sent = (void *) hdev->sent_cmd->data;
4372 opcode = __le16_to_cpu(sent->opcode);
4373 if (opcode == HCI_OP_RESET)
4374 return;
4375
4376 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4377 if (!skb)
4378 return;
4379
4380 skb_queue_head(&hdev->cmd_q, skb);
4381 queue_work(hdev->workqueue, &hdev->cmd_work);
4382}
4383
Johan Hedberg9238f362013-03-05 20:37:48 +02004384void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4385{
4386 hci_req_complete_t req_complete = NULL;
4387 struct sk_buff *skb;
4388 unsigned long flags;
4389
4390 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4391
Johan Hedberg42c6b122013-03-05 20:37:49 +02004392 /* If the completed command doesn't match the last one that was
4393 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004394 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004395 if (!hci_sent_cmd_data(hdev, opcode)) {
4396 /* Some CSR based controllers generate a spontaneous
4397 * reset complete event during init and any pending
4398 * command will never be completed. In such a case we
4399 * need to resend whatever was the last sent
4400 * command.
4401 */
4402 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4403 hci_resend_last(hdev);
4404
Johan Hedberg9238f362013-03-05 20:37:48 +02004405 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004406 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004407
4408 /* If the command succeeded and there's still more commands in
4409 * this request the request is not yet complete.
4410 */
4411 if (!status && !hci_req_is_complete(hdev))
4412 return;
4413
4414 /* If this was the last command in a request the complete
4415 * callback would be found in hdev->sent_cmd instead of the
4416 * command queue (hdev->cmd_q).
4417 */
4418 if (hdev->sent_cmd) {
4419 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004420
4421 if (req_complete) {
4422 /* We must set the complete callback to NULL to
4423 * avoid calling the callback more than once if
4424 * this function gets called again.
4425 */
4426 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4427
Johan Hedberg9238f362013-03-05 20:37:48 +02004428 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004429 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004430 }
4431
4432 /* Remove all pending commands belonging to this request */
4433 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4434 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4435 if (bt_cb(skb)->req.start) {
4436 __skb_queue_head(&hdev->cmd_q, skb);
4437 break;
4438 }
4439
4440 req_complete = bt_cb(skb)->req.complete;
4441 kfree_skb(skb);
4442 }
4443 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4444
4445call_complete:
4446 if (req_complete)
4447 req_complete(hdev, status);
4448}
4449
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004450static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004452 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 struct sk_buff *skb;
4454
4455 BT_DBG("%s", hdev->name);
4456
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004458 /* Send copy to monitor */
4459 hci_send_to_monitor(hdev, skb);
4460
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 if (atomic_read(&hdev->promisc)) {
4462 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004463 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 }
4465
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004466 if (test_bit(HCI_RAW, &hdev->flags) ||
4467 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 kfree_skb(skb);
4469 continue;
4470 }
4471
4472 if (test_bit(HCI_INIT, &hdev->flags)) {
4473 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004474 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 case HCI_ACLDATA_PKT:
4476 case HCI_SCODATA_PKT:
4477 kfree_skb(skb);
4478 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 }
4481
4482 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004483 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004485 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 hci_event_packet(hdev, skb);
4487 break;
4488
4489 case HCI_ACLDATA_PKT:
4490 BT_DBG("%s ACL data packet", hdev->name);
4491 hci_acldata_packet(hdev, skb);
4492 break;
4493
4494 case HCI_SCODATA_PKT:
4495 BT_DBG("%s SCO data packet", hdev->name);
4496 hci_scodata_packet(hdev, skb);
4497 break;
4498
4499 default:
4500 kfree_skb(skb);
4501 break;
4502 }
4503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504}
4505
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004506static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004508 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 struct sk_buff *skb;
4510
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004511 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4512 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004515 if (atomic_read(&hdev->cmd_cnt)) {
4516 skb = skb_dequeue(&hdev->cmd_q);
4517 if (!skb)
4518 return;
4519
Wei Yongjun7585b972009-02-25 18:29:52 +08004520 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004522 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004523 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004525 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004526 if (test_bit(HCI_RESET, &hdev->flags))
4527 del_timer(&hdev->cmd_timer);
4528 else
4529 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004530 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 } else {
4532 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004533 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 }
4535 }
4536}