blob: df25af5502ef9593b9ab52db477266ba8b8e81c4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Marcel Holtmannb78752c2010-08-08 23:06:53 -040038static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020039static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020040static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
Sasha Levin3df92b32012-05-27 22:36:56 +020050/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053/* ---- HCI notifications ---- */
54
Marcel Holtmann65164552005-10-28 19:20:48 +020055static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Marcel Holtmann040030e2012-02-20 14:50:37 +010057 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070060/* ---- HCI debugfs entries ---- */
61
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070062static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
82 int err;
83
84 if (!test_bit(HCI_UP, &hdev->flags))
85 return -ENETDOWN;
86
87 if (copy_from_user(buf, user_buf, buf_size))
88 return -EFAULT;
89
90 buf[buf_size] = '\0';
91 if (strtobool(buf, &enable))
92 return -EINVAL;
93
94 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
95 return -EALREADY;
96
97 hci_req_lock(hdev);
98 if (enable)
99 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
100 HCI_CMD_TIMEOUT);
101 else
102 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 hci_req_unlock(hdev);
105
106 if (IS_ERR(skb))
107 return PTR_ERR(skb);
108
109 err = -bt_to_errno(skb->data[0]);
110 kfree_skb(skb);
111
112 if (err < 0)
113 return err;
114
115 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
116
117 return count;
118}
119
120static const struct file_operations dut_mode_fops = {
121 .open = simple_open,
122 .read = dut_mode_read,
123 .write = dut_mode_write,
124 .llseek = default_llseek,
125};
126
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700127static int features_show(struct seq_file *f, void *ptr)
128{
129 struct hci_dev *hdev = f->private;
130 u8 p;
131
132 hci_dev_lock(hdev);
133 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700134 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700135 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
136 hdev->features[p][0], hdev->features[p][1],
137 hdev->features[p][2], hdev->features[p][3],
138 hdev->features[p][4], hdev->features[p][5],
139 hdev->features[p][6], hdev->features[p][7]);
140 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700141 if (lmp_le_capable(hdev))
142 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
143 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
144 hdev->le_features[0], hdev->le_features[1],
145 hdev->le_features[2], hdev->le_features[3],
146 hdev->le_features[4], hdev->le_features[5],
147 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 hci_dev_unlock(hdev);
149
150 return 0;
151}
152
153static int features_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, features_show, inode->i_private);
156}
157
158static const struct file_operations features_fops = {
159 .open = features_open,
160 .read = seq_read,
161 .llseek = seq_lseek,
162 .release = single_release,
163};
164
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700165static int blacklist_show(struct seq_file *f, void *p)
166{
167 struct hci_dev *hdev = f->private;
168 struct bdaddr_list *b;
169
170 hci_dev_lock(hdev);
171 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700172 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int blacklist_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, blacklist_show, inode->i_private);
181}
182
183static const struct file_operations blacklist_fops = {
184 .open = blacklist_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann47219832013-10-17 17:24:15 -0700190static int uuids_show(struct seq_file *f, void *p)
191{
192 struct hci_dev *hdev = f->private;
193 struct bt_uuid *uuid;
194
195 hci_dev_lock(hdev);
196 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700197 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700198
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 /* The Bluetooth UUID values are stored in big endian,
200 * but with reversed byte order. So convert them into
201 * the right order for the %pUb modifier.
202 */
203 for (i = 0; i < 16; i++)
204 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700205
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700206 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700207 }
208 hci_dev_unlock(hdev);
209
210 return 0;
211}
212
213static int uuids_open(struct inode *inode, struct file *file)
214{
215 return single_open(file, uuids_show, inode->i_private);
216}
217
218static const struct file_operations uuids_fops = {
219 .open = uuids_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223};
224
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700225static int inquiry_cache_show(struct seq_file *f, void *p)
226{
227 struct hci_dev *hdev = f->private;
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
230
231 hci_dev_lock(hdev);
232
233 list_for_each_entry(e, &cache->all, all) {
234 struct inquiry_data *data = &e->data;
235 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
236 &data->bdaddr,
237 data->pscan_rep_mode, data->pscan_period_mode,
238 data->pscan_mode, data->dev_class[2],
239 data->dev_class[1], data->dev_class[0],
240 __le16_to_cpu(data->clock_offset),
241 data->rssi, data->ssp_mode, e->timestamp);
242 }
243
244 hci_dev_unlock(hdev);
245
246 return 0;
247}
248
249static int inquiry_cache_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, inquiry_cache_show, inode->i_private);
252}
253
254static const struct file_operations inquiry_cache_fops = {
255 .open = inquiry_cache_open,
256 .read = seq_read,
257 .llseek = seq_lseek,
258 .release = single_release,
259};
260
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700261static int link_keys_show(struct seq_file *f, void *ptr)
262{
263 struct hci_dev *hdev = f->private;
264 struct list_head *p, *n;
265
266 hci_dev_lock(hdev);
267 list_for_each_safe(p, n, &hdev->link_keys) {
268 struct link_key *key = list_entry(p, struct link_key, list);
269 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
270 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
271 }
272 hci_dev_unlock(hdev);
273
274 return 0;
275}
276
277static int link_keys_open(struct inode *inode, struct file *file)
278{
279 return single_open(file, link_keys_show, inode->i_private);
280}
281
282static const struct file_operations link_keys_fops = {
283 .open = link_keys_open,
284 .read = seq_read,
285 .llseek = seq_lseek,
286 .release = single_release,
287};
288
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700289static int dev_class_show(struct seq_file *f, void *ptr)
290{
291 struct hci_dev *hdev = f->private;
292
293 hci_dev_lock(hdev);
294 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
295 hdev->dev_class[1], hdev->dev_class[0]);
296 hci_dev_unlock(hdev);
297
298 return 0;
299}
300
301static int dev_class_open(struct inode *inode, struct file *file)
302{
303 return single_open(file, dev_class_show, inode->i_private);
304}
305
306static const struct file_operations dev_class_fops = {
307 .open = dev_class_open,
308 .read = seq_read,
309 .llseek = seq_lseek,
310 .release = single_release,
311};
312
Marcel Holtmann041000b2013-10-17 12:02:31 -0700313static int voice_setting_get(void *data, u64 *val)
314{
315 struct hci_dev *hdev = data;
316
317 hci_dev_lock(hdev);
318 *val = hdev->voice_setting;
319 hci_dev_unlock(hdev);
320
321 return 0;
322}
323
324DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
325 NULL, "0x%4.4llx\n");
326
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700327static int auto_accept_delay_set(void *data, u64 val)
328{
329 struct hci_dev *hdev = data;
330
331 hci_dev_lock(hdev);
332 hdev->auto_accept_delay = val;
333 hci_dev_unlock(hdev);
334
335 return 0;
336}
337
338static int auto_accept_delay_get(void *data, u64 *val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 *val = hdev->auto_accept_delay;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
350 auto_accept_delay_set, "%llu\n");
351
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700352static int ssp_debug_mode_set(void *data, u64 val)
353{
354 struct hci_dev *hdev = data;
355 struct sk_buff *skb;
356 __u8 mode;
357 int err;
358
359 if (val != 0 && val != 1)
360 return -EINVAL;
361
362 if (!test_bit(HCI_UP, &hdev->flags))
363 return -ENETDOWN;
364
365 hci_req_lock(hdev);
366 mode = val;
367 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
368 &mode, HCI_CMD_TIMEOUT);
369 hci_req_unlock(hdev);
370
371 if (IS_ERR(skb))
372 return PTR_ERR(skb);
373
374 err = -bt_to_errno(skb->data[0]);
375 kfree_skb(skb);
376
377 if (err < 0)
378 return err;
379
380 hci_dev_lock(hdev);
381 hdev->ssp_debug_mode = val;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387static int ssp_debug_mode_get(void *data, u64 *val)
388{
389 struct hci_dev *hdev = data;
390
391 hci_dev_lock(hdev);
392 *val = hdev->ssp_debug_mode;
393 hci_dev_unlock(hdev);
394
395 return 0;
396}
397
398DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
399 ssp_debug_mode_set, "%llu\n");
400
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800401static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static ssize_t force_sc_support_write(struct file *file,
414 const char __user *user_buf,
415 size_t count, loff_t *ppos)
416{
417 struct hci_dev *hdev = file->private_data;
418 char buf[32];
419 size_t buf_size = min(count, (sizeof(buf)-1));
420 bool enable;
421
422 if (test_bit(HCI_UP, &hdev->flags))
423 return -EBUSY;
424
425 if (copy_from_user(buf, user_buf, buf_size))
426 return -EFAULT;
427
428 buf[buf_size] = '\0';
429 if (strtobool(buf, &enable))
430 return -EINVAL;
431
432 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
433 return -EALREADY;
434
435 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
436
437 return count;
438}
439
440static const struct file_operations force_sc_support_fops = {
441 .open = simple_open,
442 .read = force_sc_support_read,
443 .write = force_sc_support_write,
444 .llseek = default_llseek,
445};
446
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800447static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
448 size_t count, loff_t *ppos)
449{
450 struct hci_dev *hdev = file->private_data;
451 char buf[3];
452
453 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
454 buf[1] = '\n';
455 buf[2] = '\0';
456 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
457}
458
459static const struct file_operations sc_only_mode_fops = {
460 .open = simple_open,
461 .read = sc_only_mode_read,
462 .llseek = default_llseek,
463};
464
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700465static int idle_timeout_set(void *data, u64 val)
466{
467 struct hci_dev *hdev = data;
468
469 if (val != 0 && (val < 500 || val > 3600000))
470 return -EINVAL;
471
472 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700473 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479static int idle_timeout_get(void *data, u64 *val)
480{
481 struct hci_dev *hdev = data;
482
483 hci_dev_lock(hdev);
484 *val = hdev->idle_timeout;
485 hci_dev_unlock(hdev);
486
487 return 0;
488}
489
490DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
491 idle_timeout_set, "%llu\n");
492
493static int sniff_min_interval_set(void *data, u64 val)
494{
495 struct hci_dev *hdev = data;
496
497 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
498 return -EINVAL;
499
500 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700501 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700502 hci_dev_unlock(hdev);
503
504 return 0;
505}
506
507static int sniff_min_interval_get(void *data, u64 *val)
508{
509 struct hci_dev *hdev = data;
510
511 hci_dev_lock(hdev);
512 *val = hdev->sniff_min_interval;
513 hci_dev_unlock(hdev);
514
515 return 0;
516}
517
518DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
519 sniff_min_interval_set, "%llu\n");
520
521static int sniff_max_interval_set(void *data, u64 val)
522{
523 struct hci_dev *hdev = data;
524
525 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
526 return -EINVAL;
527
528 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700529 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700530 hci_dev_unlock(hdev);
531
532 return 0;
533}
534
535static int sniff_max_interval_get(void *data, u64 *val)
536{
537 struct hci_dev *hdev = data;
538
539 hci_dev_lock(hdev);
540 *val = hdev->sniff_max_interval;
541 hci_dev_unlock(hdev);
542
543 return 0;
544}
545
546DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
547 sniff_max_interval_set, "%llu\n");
548
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700549static int static_address_show(struct seq_file *f, void *p)
550{
551 struct hci_dev *hdev = f->private;
552
553 hci_dev_lock(hdev);
554 seq_printf(f, "%pMR\n", &hdev->static_addr);
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560static int static_address_open(struct inode *inode, struct file *file)
561{
562 return single_open(file, static_address_show, inode->i_private);
563}
564
565static const struct file_operations static_address_fops = {
566 .open = static_address_open,
567 .read = seq_read,
568 .llseek = seq_lseek,
569 .release = single_release,
570};
571
Marcel Holtmann92202182013-10-18 16:38:10 -0700572static int own_address_type_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val != 0 && val != 1)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->own_addr_type = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int own_address_type_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->own_addr_type;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
598 own_address_type_set, "%llu\n");
599
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700600static int long_term_keys_show(struct seq_file *f, void *ptr)
601{
602 struct hci_dev *hdev = f->private;
603 struct list_head *p, *n;
604
605 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800606 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700607 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800608 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700609 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
610 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
611 8, ltk->rand, 16, ltk->val);
612 }
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int long_term_keys_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, long_term_keys_show, inode->i_private);
621}
622
623static const struct file_operations long_term_keys_fops = {
624 .open = long_term_keys_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700630static int conn_min_interval_set(void *data, u64 val)
631{
632 struct hci_dev *hdev = data;
633
634 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
635 return -EINVAL;
636
637 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700638 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700639 hci_dev_unlock(hdev);
640
641 return 0;
642}
643
644static int conn_min_interval_get(void *data, u64 *val)
645{
646 struct hci_dev *hdev = data;
647
648 hci_dev_lock(hdev);
649 *val = hdev->le_conn_min_interval;
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
656 conn_min_interval_set, "%llu\n");
657
658static int conn_max_interval_set(void *data, u64 val)
659{
660 struct hci_dev *hdev = data;
661
662 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
663 return -EINVAL;
664
665 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700666 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700667 hci_dev_unlock(hdev);
668
669 return 0;
670}
671
672static int conn_max_interval_get(void *data, u64 *val)
673{
674 struct hci_dev *hdev = data;
675
676 hci_dev_lock(hdev);
677 *val = hdev->le_conn_max_interval;
678 hci_dev_unlock(hdev);
679
680 return 0;
681}
682
683DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
684 conn_max_interval_set, "%llu\n");
685
Jukka Rissanen89863102013-12-11 17:05:38 +0200686static ssize_t lowpan_read(struct file *file, char __user *user_buf,
687 size_t count, loff_t *ppos)
688{
689 struct hci_dev *hdev = file->private_data;
690 char buf[3];
691
692 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
693 buf[1] = '\n';
694 buf[2] = '\0';
695 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
696}
697
698static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
699 size_t count, loff_t *position)
700{
701 struct hci_dev *hdev = fp->private_data;
702 bool enable;
703 char buf[32];
704 size_t buf_size = min(count, (sizeof(buf)-1));
705
706 if (copy_from_user(buf, user_buffer, buf_size))
707 return -EFAULT;
708
709 buf[buf_size] = '\0';
710
711 if (strtobool(buf, &enable) < 0)
712 return -EINVAL;
713
714 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
715 return -EALREADY;
716
717 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
718
719 return count;
720}
721
722static const struct file_operations lowpan_debugfs_fops = {
723 .open = simple_open,
724 .read = lowpan_read,
725 .write = lowpan_write,
726 .llseek = default_llseek,
727};
728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729/* ---- HCI requests ---- */
730
Johan Hedberg42c6b122013-03-05 20:37:49 +0200731static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 if (hdev->req_status == HCI_REQ_PEND) {
736 hdev->req_result = result;
737 hdev->req_status = HCI_REQ_DONE;
738 wake_up_interruptible(&hdev->req_wait_q);
739 }
740}
741
742static void hci_req_cancel(struct hci_dev *hdev, int err)
743{
744 BT_DBG("%s err 0x%2.2x", hdev->name, err);
745
746 if (hdev->req_status == HCI_REQ_PEND) {
747 hdev->req_result = err;
748 hdev->req_status = HCI_REQ_CANCELED;
749 wake_up_interruptible(&hdev->req_wait_q);
750 }
751}
752
Fengguang Wu77a63e02013-04-20 16:24:31 +0300753static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
754 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300755{
756 struct hci_ev_cmd_complete *ev;
757 struct hci_event_hdr *hdr;
758 struct sk_buff *skb;
759
760 hci_dev_lock(hdev);
761
762 skb = hdev->recv_evt;
763 hdev->recv_evt = NULL;
764
765 hci_dev_unlock(hdev);
766
767 if (!skb)
768 return ERR_PTR(-ENODATA);
769
770 if (skb->len < sizeof(*hdr)) {
771 BT_ERR("Too short HCI event");
772 goto failed;
773 }
774
775 hdr = (void *) skb->data;
776 skb_pull(skb, HCI_EVENT_HDR_SIZE);
777
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300778 if (event) {
779 if (hdr->evt != event)
780 goto failed;
781 return skb;
782 }
783
Johan Hedberg75e84b72013-04-02 13:35:04 +0300784 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
785 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
786 goto failed;
787 }
788
789 if (skb->len < sizeof(*ev)) {
790 BT_ERR("Too short cmd_complete event");
791 goto failed;
792 }
793
794 ev = (void *) skb->data;
795 skb_pull(skb, sizeof(*ev));
796
797 if (opcode == __le16_to_cpu(ev->opcode))
798 return skb;
799
800 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
801 __le16_to_cpu(ev->opcode));
802
803failed:
804 kfree_skb(skb);
805 return ERR_PTR(-ENODATA);
806}
807
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300808struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300809 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300810{
811 DECLARE_WAITQUEUE(wait, current);
812 struct hci_request req;
813 int err = 0;
814
815 BT_DBG("%s", hdev->name);
816
817 hci_req_init(&req, hdev);
818
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300819 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300820
821 hdev->req_status = HCI_REQ_PEND;
822
823 err = hci_req_run(&req, hci_req_sync_complete);
824 if (err < 0)
825 return ERR_PTR(err);
826
827 add_wait_queue(&hdev->req_wait_q, &wait);
828 set_current_state(TASK_INTERRUPTIBLE);
829
830 schedule_timeout(timeout);
831
832 remove_wait_queue(&hdev->req_wait_q, &wait);
833
834 if (signal_pending(current))
835 return ERR_PTR(-EINTR);
836
837 switch (hdev->req_status) {
838 case HCI_REQ_DONE:
839 err = -bt_to_errno(hdev->req_result);
840 break;
841
842 case HCI_REQ_CANCELED:
843 err = -hdev->req_result;
844 break;
845
846 default:
847 err = -ETIMEDOUT;
848 break;
849 }
850
851 hdev->req_status = hdev->req_result = 0;
852
853 BT_DBG("%s end: err %d", hdev->name, err);
854
855 if (err < 0)
856 return ERR_PTR(err);
857
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300858 return hci_get_cmd_complete(hdev, opcode, event);
859}
860EXPORT_SYMBOL(__hci_cmd_sync_ev);
861
862struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300863 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300864{
865 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300866}
867EXPORT_SYMBOL(__hci_cmd_sync);
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200870static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200871 void (*func)(struct hci_request *req,
872 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200873 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200875 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 DECLARE_WAITQUEUE(wait, current);
877 int err = 0;
878
879 BT_DBG("%s start", hdev->name);
880
Johan Hedberg42c6b122013-03-05 20:37:49 +0200881 hci_req_init(&req, hdev);
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 hdev->req_status = HCI_REQ_PEND;
884
Johan Hedberg42c6b122013-03-05 20:37:49 +0200885 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200886
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 err = hci_req_run(&req, hci_req_sync_complete);
888 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200889 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300890
891 /* ENODATA means the HCI request command queue is empty.
892 * This can happen when a request with conditionals doesn't
893 * trigger any commands to be sent. This is normal behavior
894 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200895 */
Andre Guedes920c8302013-03-08 11:20:15 -0300896 if (err == -ENODATA)
897 return 0;
898
899 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200900 }
901
Andre Guedesbc4445c2013-03-08 11:20:13 -0300902 add_wait_queue(&hdev->req_wait_q, &wait);
903 set_current_state(TASK_INTERRUPTIBLE);
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 schedule_timeout(timeout);
906
907 remove_wait_queue(&hdev->req_wait_q, &wait);
908
909 if (signal_pending(current))
910 return -EINTR;
911
912 switch (hdev->req_status) {
913 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700914 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 break;
916
917 case HCI_REQ_CANCELED:
918 err = -hdev->req_result;
919 break;
920
921 default:
922 err = -ETIMEDOUT;
923 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Johan Hedberga5040ef2011-01-10 13:28:59 +0200926 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
928 BT_DBG("%s end: err %d", hdev->name, err);
929
930 return err;
931}
932
Johan Hedberg01178cd2013-03-05 20:37:41 +0200933static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200934 void (*req)(struct hci_request *req,
935 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200936 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
938 int ret;
939
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200940 if (!test_bit(HCI_UP, &hdev->flags))
941 return -ENETDOWN;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 /* Serialize all requests */
944 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200945 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 hci_req_unlock(hdev);
947
948 return ret;
949}
950
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200956 set_bit(HCI_RESET, &req->hdev->flags);
957 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
959
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200965 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200967 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200969
970 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200971 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972}
973
Johan Hedberg42c6b122013-03-05 20:37:49 +0200974static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200975{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200977
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200978 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200979 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300980
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700981 /* Read Local Supported Commands */
982 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
983
984 /* Read Local Supported Features */
985 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
986
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300987 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300989
990 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700992
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700993 /* Read Flow Control Mode */
994 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
995
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700996 /* Read Location Data */
997 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200998}
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001001{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001003
1004 BT_DBG("%s %ld", hdev->name, opt);
1005
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001006 /* Reset */
1007 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001009
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001010 switch (hdev->dev_type) {
1011 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001012 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001013 break;
1014
1015 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001017 break;
1018
1019 default:
1020 BT_ERR("Unknown device type %d", hdev->dev_type);
1021 break;
1022 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001023}
1024
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001026{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001027 struct hci_dev *hdev = req->hdev;
1028
Johan Hedberg2177bab2013-03-05 20:37:43 +02001029 __le16 param;
1030 __u8 flt_type;
1031
1032 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001034
1035 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001037
1038 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001039 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001040
1041 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001043
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001044 /* Read Number of Supported IAC */
1045 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1046
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001047 /* Read Current IAC LAP */
1048 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1049
Johan Hedberg2177bab2013-03-05 20:37:43 +02001050 /* Clear Event Filters */
1051 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001052 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001053
1054 /* Connection accept timeout ~20 secs */
1055 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001057
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001058 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1059 * but it does not support page scan related HCI commands.
1060 */
1061 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1063 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1064 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001065}
1066
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001068{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001069 struct hci_dev *hdev = req->hdev;
1070
Johan Hedberg2177bab2013-03-05 20:37:43 +02001071 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073
1074 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001076
1077 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001079
1080 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001081 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082
1083 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001085
1086 /* LE-only controllers have LE implicitly enabled */
1087 if (!lmp_bredr_capable(hdev))
1088 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001089}
1090
1091static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1092{
1093 if (lmp_ext_inq_capable(hdev))
1094 return 0x02;
1095
1096 if (lmp_inq_rssi_capable(hdev))
1097 return 0x01;
1098
1099 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1100 hdev->lmp_subver == 0x0757)
1101 return 0x01;
1102
1103 if (hdev->manufacturer == 15) {
1104 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1105 return 0x01;
1106 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1109 return 0x01;
1110 }
1111
1112 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1113 hdev->lmp_subver == 0x1805)
1114 return 0x01;
1115
1116 return 0x00;
1117}
1118
Johan Hedberg42c6b122013-03-05 20:37:49 +02001119static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001120{
1121 u8 mode;
1122
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001124
Johan Hedberg42c6b122013-03-05 20:37:49 +02001125 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001126}
1127
Johan Hedberg42c6b122013-03-05 20:37:49 +02001128static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001129{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130 struct hci_dev *hdev = req->hdev;
1131
Johan Hedberg2177bab2013-03-05 20:37:43 +02001132 /* The second byte is 0xff instead of 0x9f (two reserved bits
1133 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1134 * command otherwise.
1135 */
1136 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1137
1138 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1139 * any event mask for pre 1.2 devices.
1140 */
1141 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1142 return;
1143
1144 if (lmp_bredr_capable(hdev)) {
1145 events[4] |= 0x01; /* Flow Specification Complete */
1146 events[4] |= 0x02; /* Inquiry Result with RSSI */
1147 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1148 events[5] |= 0x08; /* Synchronous Connection Complete */
1149 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001150 } else {
1151 /* Use a different default for LE-only devices */
1152 memset(events, 0, sizeof(events));
1153 events[0] |= 0x10; /* Disconnection Complete */
1154 events[0] |= 0x80; /* Encryption Change */
1155 events[1] |= 0x08; /* Read Remote Version Information Complete */
1156 events[1] |= 0x20; /* Command Complete */
1157 events[1] |= 0x40; /* Command Status */
1158 events[1] |= 0x80; /* Hardware Error */
1159 events[2] |= 0x04; /* Number of Completed Packets */
1160 events[3] |= 0x02; /* Data Buffer Overflow */
1161 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001162 }
1163
1164 if (lmp_inq_rssi_capable(hdev))
1165 events[4] |= 0x02; /* Inquiry Result with RSSI */
1166
1167 if (lmp_sniffsubr_capable(hdev))
1168 events[5] |= 0x20; /* Sniff Subrating */
1169
1170 if (lmp_pause_enc_capable(hdev))
1171 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1172
1173 if (lmp_ext_inq_capable(hdev))
1174 events[5] |= 0x40; /* Extended Inquiry Result */
1175
1176 if (lmp_no_flush_capable(hdev))
1177 events[7] |= 0x01; /* Enhanced Flush Complete */
1178
1179 if (lmp_lsto_capable(hdev))
1180 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1181
1182 if (lmp_ssp_capable(hdev)) {
1183 events[6] |= 0x01; /* IO Capability Request */
1184 events[6] |= 0x02; /* IO Capability Response */
1185 events[6] |= 0x04; /* User Confirmation Request */
1186 events[6] |= 0x08; /* User Passkey Request */
1187 events[6] |= 0x10; /* Remote OOB Data Request */
1188 events[6] |= 0x20; /* Simple Pairing Complete */
1189 events[7] |= 0x04; /* User Passkey Notification */
1190 events[7] |= 0x08; /* Keypress Notification */
1191 events[7] |= 0x10; /* Remote Host Supported
1192 * Features Notification
1193 */
1194 }
1195
1196 if (lmp_le_capable(hdev))
1197 events[7] |= 0x20; /* LE Meta-Event */
1198
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001200
1201 if (lmp_le_capable(hdev)) {
1202 memset(events, 0, sizeof(events));
1203 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1205 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206 }
1207}
1208
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 struct hci_dev *hdev = req->hdev;
1212
Johan Hedberg2177bab2013-03-05 20:37:43 +02001213 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001214 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001215 else
1216 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217
1218 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001219 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001220
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001223 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1224 * local supported commands HCI command.
1225 */
1226 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001228
1229 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001230 /* When SSP is available, then the host features page
1231 * should also be available as well. However some
1232 * controllers list the max_page as 0 as long as SSP
1233 * has not been enabled. To achieve proper debugging
1234 * output, force the minimum max_page to 1 at least.
1235 */
1236 hdev->max_page = 0x01;
1237
Johan Hedberg2177bab2013-03-05 20:37:43 +02001238 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1239 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1241 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001242 } else {
1243 struct hci_cp_write_eir cp;
1244
1245 memset(hdev->eir, 0, sizeof(hdev->eir));
1246 memset(&cp, 0, sizeof(cp));
1247
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001249 }
1250 }
1251
1252 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001254
1255 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001256 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001257
1258 if (lmp_ext_feat_capable(hdev)) {
1259 struct hci_cp_read_local_ext_features cp;
1260
1261 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1263 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001264 }
1265
1266 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1267 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1269 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001270 }
1271}
1272
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276 struct hci_cp_write_def_link_policy cp;
1277 u16 link_policy = 0;
1278
1279 if (lmp_rswitch_capable(hdev))
1280 link_policy |= HCI_LP_RSWITCH;
1281 if (lmp_hold_capable(hdev))
1282 link_policy |= HCI_LP_HOLD;
1283 if (lmp_sniff_capable(hdev))
1284 link_policy |= HCI_LP_SNIFF;
1285 if (lmp_park_capable(hdev))
1286 link_policy |= HCI_LP_PARK;
1287
1288 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290}
1291
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295 struct hci_cp_write_le_host_supported cp;
1296
Johan Hedbergc73eee92013-04-19 18:35:21 +03001297 /* LE-only devices do not support explicit enablement */
1298 if (!lmp_bredr_capable(hdev))
1299 return;
1300
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301 memset(&cp, 0, sizeof(cp));
1302
1303 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1304 cp.le = 0x01;
1305 cp.simul = lmp_le_br_capable(hdev);
1306 }
1307
1308 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1310 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311}
1312
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001313static void hci_set_event_mask_page_2(struct hci_request *req)
1314{
1315 struct hci_dev *hdev = req->hdev;
1316 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1317
1318 /* If Connectionless Slave Broadcast master role is supported
1319 * enable all necessary events for it.
1320 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001321 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001322 events[1] |= 0x40; /* Triggered Clock Capture */
1323 events[1] |= 0x80; /* Synchronization Train Complete */
1324 events[2] |= 0x10; /* Slave Page Response Timeout */
1325 events[2] |= 0x20; /* CSB Channel Map Change */
1326 }
1327
1328 /* If Connectionless Slave Broadcast slave role is supported
1329 * enable all necessary events for it.
1330 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001331 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001332 events[2] |= 0x01; /* Synchronization Train Received */
1333 events[2] |= 0x02; /* CSB Receive */
1334 events[2] |= 0x04; /* CSB Timeout */
1335 events[2] |= 0x08; /* Truncated Page Complete */
1336 }
1337
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001338 /* Enable Authenticated Payload Timeout Expired event if supported */
1339 if (lmp_ping_capable(hdev))
1340 events[2] |= 0x80;
1341
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001342 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1343}
1344
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001348 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001350 /* Some Broadcom based Bluetooth controllers do not support the
1351 * Delete Stored Link Key command. They are clearly indicating its
1352 * absence in the bit mask of supported commands.
1353 *
1354 * Check the supported commands and only if the the command is marked
1355 * as supported send it. If not supported assume that the controller
1356 * does not have actual support for stored link keys which makes this
1357 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001358 *
1359 * Some controllers indicate that they support handling deleting
1360 * stored link keys, but they don't. The quirk lets a driver
1361 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001362 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001363 if (hdev->commands[6] & 0x80 &&
1364 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001365 struct hci_cp_delete_stored_link_key cp;
1366
1367 bacpy(&cp.bdaddr, BDADDR_ANY);
1368 cp.delete_all = 0x01;
1369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1370 sizeof(cp), &cp);
1371 }
1372
Johan Hedberg2177bab2013-03-05 20:37:43 +02001373 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375
Marcel Holtmann79830f62013-10-18 16:38:09 -07001376 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001377 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1378 /* If the controller has a public BD_ADDR, then
1379 * by default use that one. If this is a LE only
1380 * controller without a public address, default
1381 * to the random address.
1382 */
1383 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1384 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1385 else
1386 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1387 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001388
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001390 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001391
1392 /* Read features beyond page 1 if available */
1393 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1394 struct hci_cp_read_local_ext_features cp;
1395
1396 cp.page = p;
1397 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1398 sizeof(cp), &cp);
1399 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400}
1401
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001402static void hci_init4_req(struct hci_request *req, unsigned long opt)
1403{
1404 struct hci_dev *hdev = req->hdev;
1405
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001406 /* Set event mask page 2 if the HCI command for it is supported */
1407 if (hdev->commands[22] & 0x04)
1408 hci_set_event_mask_page_2(req);
1409
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001410 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001411 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001412 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001413
1414 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001415 if ((lmp_sc_capable(hdev) ||
1416 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001417 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1418 u8 support = 0x01;
1419 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1420 sizeof(support), &support);
1421 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001422}
1423
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424static int __hci_init(struct hci_dev *hdev)
1425{
1426 int err;
1427
1428 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1429 if (err < 0)
1430 return err;
1431
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001432 /* The Device Under Test (DUT) mode is special and available for
1433 * all controller types. So just create it early on.
1434 */
1435 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1436 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1437 &dut_mode_fops);
1438 }
1439
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1441 * BR/EDR/LE type controllers. AMP controllers only need the
1442 * first stage init.
1443 */
1444 if (hdev->dev_type != HCI_BREDR)
1445 return 0;
1446
1447 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1448 if (err < 0)
1449 return err;
1450
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001451 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1452 if (err < 0)
1453 return err;
1454
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001455 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1456 if (err < 0)
1457 return err;
1458
1459 /* Only create debugfs entries during the initial setup
1460 * phase and not every time the controller gets powered on.
1461 */
1462 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1463 return 0;
1464
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001465 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1466 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001467 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1468 &hdev->manufacturer);
1469 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1470 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001471 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1472 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001473 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1474
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001475 if (lmp_bredr_capable(hdev)) {
1476 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1477 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001478 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1479 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001480 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1481 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001482 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1483 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001484 }
1485
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001486 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001487 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1488 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001489 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1490 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001491 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1492 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001493 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1494 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001495 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001496
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001497 if (lmp_sniff_capable(hdev)) {
1498 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1499 hdev, &idle_timeout_fops);
1500 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1501 hdev, &sniff_min_interval_fops);
1502 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_max_interval_fops);
1504 }
1505
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001506 if (lmp_le_capable(hdev)) {
1507 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1508 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001509 debugfs_create_file("static_address", 0444, hdev->debugfs,
1510 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001511 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1512 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001513 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1514 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001515 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1516 hdev, &conn_min_interval_fops);
1517 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1518 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001519 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1520 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001521 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001522
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001523 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001524}
1525
Johan Hedberg42c6b122013-03-05 20:37:49 +02001526static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527{
1528 __u8 scan = opt;
1529
Johan Hedberg42c6b122013-03-05 20:37:49 +02001530 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
Johan Hedberg42c6b122013-03-05 20:37:49 +02001536static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537{
1538 __u8 auth = opt;
1539
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001543 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544}
1545
Johan Hedberg42c6b122013-03-05 20:37:49 +02001546static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547{
1548 __u8 encrypt = opt;
1549
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001552 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
1555
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001557{
1558 __le16 policy = cpu_to_le16(opt);
1559
Johan Hedberg42c6b122013-03-05 20:37:49 +02001560 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001561
1562 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001563 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001564}
1565
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001566/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 * Device is held on return. */
1568struct hci_dev *hci_dev_get(int index)
1569{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001570 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572 BT_DBG("%d", index);
1573
1574 if (index < 0)
1575 return NULL;
1576
1577 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001578 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (d->id == index) {
1580 hdev = hci_dev_hold(d);
1581 break;
1582 }
1583 }
1584 read_unlock(&hci_dev_list_lock);
1585 return hdev;
1586}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
1588/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001589
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001590bool hci_discovery_active(struct hci_dev *hdev)
1591{
1592 struct discovery_state *discov = &hdev->discovery;
1593
Andre Guedes6fbe1952012-02-03 17:47:58 -03001594 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001595 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001596 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001597 return true;
1598
Andre Guedes6fbe1952012-02-03 17:47:58 -03001599 default:
1600 return false;
1601 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001602}
1603
Johan Hedbergff9ef572012-01-04 14:23:45 +02001604void hci_discovery_set_state(struct hci_dev *hdev, int state)
1605{
1606 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1607
1608 if (hdev->discovery.state == state)
1609 return;
1610
1611 switch (state) {
1612 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001613 if (hdev->discovery.state != DISCOVERY_STARTING)
1614 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001615 break;
1616 case DISCOVERY_STARTING:
1617 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001618 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001619 mgmt_discovering(hdev, 1);
1620 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001621 case DISCOVERY_RESOLVING:
1622 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001623 case DISCOVERY_STOPPING:
1624 break;
1625 }
1626
1627 hdev->discovery.state = state;
1628}
1629
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001630void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631{
Johan Hedberg30883512012-01-04 14:16:21 +02001632 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001633 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
Johan Hedberg561aafb2012-01-04 13:31:59 +02001635 list_for_each_entry_safe(p, n, &cache->all, all) {
1636 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001637 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001639
1640 INIT_LIST_HEAD(&cache->unknown);
1641 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642}
1643
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001644struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1645 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
Johan Hedberg30883512012-01-04 14:16:21 +02001647 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 struct inquiry_entry *e;
1649
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001650 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Johan Hedberg561aafb2012-01-04 13:31:59 +02001652 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001654 return e;
1655 }
1656
1657 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658}
1659
Johan Hedberg561aafb2012-01-04 13:31:59 +02001660struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001661 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001662{
Johan Hedberg30883512012-01-04 14:16:21 +02001663 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001664 struct inquiry_entry *e;
1665
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001666 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001667
1668 list_for_each_entry(e, &cache->unknown, list) {
1669 if (!bacmp(&e->data.bdaddr, bdaddr))
1670 return e;
1671 }
1672
1673 return NULL;
1674}
1675
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001676struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001677 bdaddr_t *bdaddr,
1678 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001679{
1680 struct discovery_state *cache = &hdev->discovery;
1681 struct inquiry_entry *e;
1682
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001683 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001684
1685 list_for_each_entry(e, &cache->resolve, list) {
1686 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1687 return e;
1688 if (!bacmp(&e->data.bdaddr, bdaddr))
1689 return e;
1690 }
1691
1692 return NULL;
1693}
1694
Johan Hedberga3d4e202012-01-09 00:53:02 +02001695void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001696 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001697{
1698 struct discovery_state *cache = &hdev->discovery;
1699 struct list_head *pos = &cache->resolve;
1700 struct inquiry_entry *p;
1701
1702 list_del(&ie->list);
1703
1704 list_for_each_entry(p, &cache->resolve, list) {
1705 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001706 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001707 break;
1708 pos = &p->list;
1709 }
1710
1711 list_add(&ie->list, pos);
1712}
1713
Johan Hedberg31754052012-01-04 13:39:52 +02001714bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001715 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716{
Johan Hedberg30883512012-01-04 14:16:21 +02001717 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001718 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001720 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Szymon Janc2b2fec42012-11-20 11:38:54 +01001722 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1723
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001724 if (ssp)
1725 *ssp = data->ssp_mode;
1726
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001727 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001728 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001729 if (ie->data.ssp_mode && ssp)
1730 *ssp = true;
1731
Johan Hedberga3d4e202012-01-09 00:53:02 +02001732 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001733 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001734 ie->data.rssi = data->rssi;
1735 hci_inquiry_cache_update_resolve(hdev, ie);
1736 }
1737
Johan Hedberg561aafb2012-01-04 13:31:59 +02001738 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001739 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001740
Johan Hedberg561aafb2012-01-04 13:31:59 +02001741 /* Entry not in the cache. Add new one. */
1742 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1743 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001744 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001745
1746 list_add(&ie->all, &cache->all);
1747
1748 if (name_known) {
1749 ie->name_state = NAME_KNOWN;
1750 } else {
1751 ie->name_state = NAME_NOT_KNOWN;
1752 list_add(&ie->list, &cache->unknown);
1753 }
1754
1755update:
1756 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001757 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001758 ie->name_state = NAME_KNOWN;
1759 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 }
1761
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001762 memcpy(&ie->data, data, sizeof(*data));
1763 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001765
1766 if (ie->name_state == NAME_NOT_KNOWN)
1767 return false;
1768
1769 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770}
1771
1772static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1773{
Johan Hedberg30883512012-01-04 14:16:21 +02001774 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 struct inquiry_info *info = (struct inquiry_info *) buf;
1776 struct inquiry_entry *e;
1777 int copied = 0;
1778
Johan Hedberg561aafb2012-01-04 13:31:59 +02001779 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001781
1782 if (copied >= num)
1783 break;
1784
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 bacpy(&info->bdaddr, &data->bdaddr);
1786 info->pscan_rep_mode = data->pscan_rep_mode;
1787 info->pscan_period_mode = data->pscan_period_mode;
1788 info->pscan_mode = data->pscan_mode;
1789 memcpy(info->dev_class, data->dev_class, 3);
1790 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001793 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
1795
1796 BT_DBG("cache %p, copied %d", cache, copied);
1797 return copied;
1798}
1799
Johan Hedberg42c6b122013-03-05 20:37:49 +02001800static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
1802 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001803 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 struct hci_cp_inquiry cp;
1805
1806 BT_DBG("%s", hdev->name);
1807
1808 if (test_bit(HCI_INQUIRY, &hdev->flags))
1809 return;
1810
1811 /* Start Inquiry */
1812 memcpy(&cp.lap, &ir->lap, 3);
1813 cp.length = ir->length;
1814 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001815 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816}
1817
Andre Guedes3e13fa12013-03-27 20:04:56 -03001818static int wait_inquiry(void *word)
1819{
1820 schedule();
1821 return signal_pending(current);
1822}
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824int hci_inquiry(void __user *arg)
1825{
1826 __u8 __user *ptr = arg;
1827 struct hci_inquiry_req ir;
1828 struct hci_dev *hdev;
1829 int err = 0, do_inquiry = 0, max_rsp;
1830 long timeo;
1831 __u8 *buf;
1832
1833 if (copy_from_user(&ir, ptr, sizeof(ir)))
1834 return -EFAULT;
1835
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001836 hdev = hci_dev_get(ir.dev_id);
1837 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 return -ENODEV;
1839
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001840 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1841 err = -EBUSY;
1842 goto done;
1843 }
1844
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001845 if (hdev->dev_type != HCI_BREDR) {
1846 err = -EOPNOTSUPP;
1847 goto done;
1848 }
1849
Johan Hedberg56f87902013-10-02 13:43:13 +03001850 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1851 err = -EOPNOTSUPP;
1852 goto done;
1853 }
1854
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001855 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001856 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001857 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001858 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 do_inquiry = 1;
1860 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001861 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Marcel Holtmann04837f62006-07-03 10:02:33 +02001863 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001864
1865 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001866 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1867 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001868 if (err < 0)
1869 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001870
1871 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1872 * cleared). If it is interrupted by a signal, return -EINTR.
1873 */
1874 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1875 TASK_INTERRUPTIBLE))
1876 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001879 /* for unlimited number of responses we will use buffer with
1880 * 255 entries
1881 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1883
1884 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1885 * copy it to the user space.
1886 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001887 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001888 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 err = -ENOMEM;
1890 goto done;
1891 }
1892
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001893 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001895 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
1897 BT_DBG("num_rsp %d", ir.num_rsp);
1898
1899 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1900 ptr += sizeof(ir);
1901 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001902 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001904 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 err = -EFAULT;
1906
1907 kfree(buf);
1908
1909done:
1910 hci_dev_put(hdev);
1911 return err;
1912}
1913
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001914static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 int ret = 0;
1917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 BT_DBG("%s %p", hdev->name, hdev);
1919
1920 hci_req_lock(hdev);
1921
Johan Hovold94324962012-03-15 14:48:41 +01001922 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1923 ret = -ENODEV;
1924 goto done;
1925 }
1926
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001927 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1928 /* Check for rfkill but allow the HCI setup stage to
1929 * proceed (which in itself doesn't cause any RF activity).
1930 */
1931 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1932 ret = -ERFKILL;
1933 goto done;
1934 }
1935
1936 /* Check for valid public address or a configured static
1937 * random adddress, but let the HCI setup proceed to
1938 * be able to determine if there is a public address
1939 * or not.
1940 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001941 * In case of user channel usage, it is not important
1942 * if a public address or static random address is
1943 * available.
1944 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001945 * This check is only valid for BR/EDR controllers
1946 * since AMP controllers do not have an address.
1947 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001948 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1949 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001950 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1951 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1952 ret = -EADDRNOTAVAIL;
1953 goto done;
1954 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001955 }
1956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 if (test_bit(HCI_UP, &hdev->flags)) {
1958 ret = -EALREADY;
1959 goto done;
1960 }
1961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 if (hdev->open(hdev)) {
1963 ret = -EIO;
1964 goto done;
1965 }
1966
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001967 atomic_set(&hdev->cmd_cnt, 1);
1968 set_bit(HCI_INIT, &hdev->flags);
1969
1970 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1971 ret = hdev->setup(hdev);
1972
1973 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975 set_bit(HCI_RAW, &hdev->flags);
1976
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001977 if (!test_bit(HCI_RAW, &hdev->flags) &&
1978 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001979 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
1981
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001982 clear_bit(HCI_INIT, &hdev->flags);
1983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 if (!ret) {
1985 hci_dev_hold(hdev);
1986 set_bit(HCI_UP, &hdev->flags);
1987 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001988 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001989 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001990 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001991 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001992 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001993 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001994 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001995 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001997 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001998 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001999 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
2001 skb_queue_purge(&hdev->cmd_q);
2002 skb_queue_purge(&hdev->rx_q);
2003
2004 if (hdev->flush)
2005 hdev->flush(hdev);
2006
2007 if (hdev->sent_cmd) {
2008 kfree_skb(hdev->sent_cmd);
2009 hdev->sent_cmd = NULL;
2010 }
2011
2012 hdev->close(hdev);
2013 hdev->flags = 0;
2014 }
2015
2016done:
2017 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 return ret;
2019}
2020
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002021/* ---- HCI ioctl helpers ---- */
2022
2023int hci_dev_open(__u16 dev)
2024{
2025 struct hci_dev *hdev;
2026 int err;
2027
2028 hdev = hci_dev_get(dev);
2029 if (!hdev)
2030 return -ENODEV;
2031
Johan Hedberge1d08f42013-10-01 22:44:50 +03002032 /* We need to ensure that no other power on/off work is pending
2033 * before proceeding to call hci_dev_do_open. This is
2034 * particularly important if the setup procedure has not yet
2035 * completed.
2036 */
2037 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2038 cancel_delayed_work(&hdev->power_off);
2039
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002040 /* After this call it is guaranteed that the setup procedure
2041 * has finished. This means that error conditions like RFKILL
2042 * or no valid public or static random address apply.
2043 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002044 flush_workqueue(hdev->req_workqueue);
2045
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002046 err = hci_dev_do_open(hdev);
2047
2048 hci_dev_put(hdev);
2049
2050 return err;
2051}
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053static int hci_dev_do_close(struct hci_dev *hdev)
2054{
2055 BT_DBG("%s %p", hdev->name, hdev);
2056
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002057 cancel_delayed_work(&hdev->power_off);
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 hci_req_cancel(hdev, ENODEV);
2060 hci_req_lock(hdev);
2061
2062 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002063 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 hci_req_unlock(hdev);
2065 return 0;
2066 }
2067
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002068 /* Flush RX and TX works */
2069 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002070 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002072 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002073 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002074 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002075 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002076 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002077 }
2078
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002079 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002080 cancel_delayed_work(&hdev->service_cache);
2081
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002082 cancel_delayed_work_sync(&hdev->le_scan_disable);
2083
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002084 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002085 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002087 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 hci_notify(hdev, HCI_DEV_DOWN);
2090
2091 if (hdev->flush)
2092 hdev->flush(hdev);
2093
2094 /* Reset device */
2095 skb_queue_purge(&hdev->cmd_q);
2096 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002097 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002098 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002099 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002101 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 clear_bit(HCI_INIT, &hdev->flags);
2103 }
2104
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002105 /* flush cmd work */
2106 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108 /* Drop queues */
2109 skb_queue_purge(&hdev->rx_q);
2110 skb_queue_purge(&hdev->cmd_q);
2111 skb_queue_purge(&hdev->raw_q);
2112
2113 /* Drop last sent command */
2114 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002115 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 kfree_skb(hdev->sent_cmd);
2117 hdev->sent_cmd = NULL;
2118 }
2119
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002120 kfree_skb(hdev->recv_evt);
2121 hdev->recv_evt = NULL;
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 /* After this point our queues are empty
2124 * and no tasks are scheduled. */
2125 hdev->close(hdev);
2126
Johan Hedberg35b973c2013-03-15 17:06:59 -05002127 /* Clear flags */
2128 hdev->flags = 0;
2129 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2130
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002131 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2132 if (hdev->dev_type == HCI_BREDR) {
2133 hci_dev_lock(hdev);
2134 mgmt_powered(hdev, 0);
2135 hci_dev_unlock(hdev);
2136 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002137 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002138
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002139 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002140 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002141
Johan Hedberge59fda82012-02-22 18:11:53 +02002142 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002143 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 hci_req_unlock(hdev);
2146
2147 hci_dev_put(hdev);
2148 return 0;
2149}
2150
2151int hci_dev_close(__u16 dev)
2152{
2153 struct hci_dev *hdev;
2154 int err;
2155
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002156 hdev = hci_dev_get(dev);
2157 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002159
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002160 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2161 err = -EBUSY;
2162 goto done;
2163 }
2164
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002165 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2166 cancel_delayed_work(&hdev->power_off);
2167
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002169
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002170done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 hci_dev_put(hdev);
2172 return err;
2173}
2174
2175int hci_dev_reset(__u16 dev)
2176{
2177 struct hci_dev *hdev;
2178 int ret = 0;
2179
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002180 hdev = hci_dev_get(dev);
2181 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 return -ENODEV;
2183
2184 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Marcel Holtmann808a0492013-08-26 20:57:58 -07002186 if (!test_bit(HCI_UP, &hdev->flags)) {
2187 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002191 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2192 ret = -EBUSY;
2193 goto done;
2194 }
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 /* Drop queues */
2197 skb_queue_purge(&hdev->rx_q);
2198 skb_queue_purge(&hdev->cmd_q);
2199
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002200 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002201 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002203 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 if (hdev->flush)
2206 hdev->flush(hdev);
2207
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002208 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002209 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
2211 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002212 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
2214done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 hci_req_unlock(hdev);
2216 hci_dev_put(hdev);
2217 return ret;
2218}
2219
2220int hci_dev_reset_stat(__u16 dev)
2221{
2222 struct hci_dev *hdev;
2223 int ret = 0;
2224
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002225 hdev = hci_dev_get(dev);
2226 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 return -ENODEV;
2228
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002229 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2230 ret = -EBUSY;
2231 goto done;
2232 }
2233
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2235
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002236done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 return ret;
2239}
2240
2241int hci_dev_cmd(unsigned int cmd, void __user *arg)
2242{
2243 struct hci_dev *hdev;
2244 struct hci_dev_req dr;
2245 int err = 0;
2246
2247 if (copy_from_user(&dr, arg, sizeof(dr)))
2248 return -EFAULT;
2249
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002250 hdev = hci_dev_get(dr.dev_id);
2251 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 return -ENODEV;
2253
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002254 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2255 err = -EBUSY;
2256 goto done;
2257 }
2258
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002259 if (hdev->dev_type != HCI_BREDR) {
2260 err = -EOPNOTSUPP;
2261 goto done;
2262 }
2263
Johan Hedberg56f87902013-10-02 13:43:13 +03002264 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2265 err = -EOPNOTSUPP;
2266 goto done;
2267 }
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 switch (cmd) {
2270 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002271 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2272 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 break;
2274
2275 case HCISETENCRYPT:
2276 if (!lmp_encrypt_capable(hdev)) {
2277 err = -EOPNOTSUPP;
2278 break;
2279 }
2280
2281 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2282 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002283 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2284 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 if (err)
2286 break;
2287 }
2288
Johan Hedberg01178cd2013-03-05 20:37:41 +02002289 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2290 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 break;
2292
2293 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002294 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2295 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 break;
2297
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002298 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002299 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2300 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002301 break;
2302
2303 case HCISETLINKMODE:
2304 hdev->link_mode = ((__u16) dr.dev_opt) &
2305 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2306 break;
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 case HCISETPTYPE:
2309 hdev->pkt_type = (__u16) dr.dev_opt;
2310 break;
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002313 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2314 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 break;
2316
2317 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002318 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2319 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 break;
2321
2322 default:
2323 err = -EINVAL;
2324 break;
2325 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002326
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002327done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 hci_dev_put(hdev);
2329 return err;
2330}
2331
2332int hci_get_dev_list(void __user *arg)
2333{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002334 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 struct hci_dev_list_req *dl;
2336 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 int n = 0, size, err;
2338 __u16 dev_num;
2339
2340 if (get_user(dev_num, (__u16 __user *) arg))
2341 return -EFAULT;
2342
2343 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2344 return -EINVAL;
2345
2346 size = sizeof(*dl) + dev_num * sizeof(*dr);
2347
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002348 dl = kzalloc(size, GFP_KERNEL);
2349 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 return -ENOMEM;
2351
2352 dr = dl->dev_req;
2353
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002354 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002355 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002356 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002357 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002358
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002359 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2360 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 (dr + n)->dev_id = hdev->id;
2363 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 if (++n >= dev_num)
2366 break;
2367 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002368 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
2370 dl->dev_num = n;
2371 size = sizeof(*dl) + n * sizeof(*dr);
2372
2373 err = copy_to_user(arg, dl, size);
2374 kfree(dl);
2375
2376 return err ? -EFAULT : 0;
2377}
2378
2379int hci_get_dev_info(void __user *arg)
2380{
2381 struct hci_dev *hdev;
2382 struct hci_dev_info di;
2383 int err = 0;
2384
2385 if (copy_from_user(&di, arg, sizeof(di)))
2386 return -EFAULT;
2387
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002388 hdev = hci_dev_get(di.dev_id);
2389 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 return -ENODEV;
2391
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002392 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002393 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002394
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002395 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2396 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 strcpy(di.name, hdev->name);
2399 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002400 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 di.flags = hdev->flags;
2402 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002403 if (lmp_bredr_capable(hdev)) {
2404 di.acl_mtu = hdev->acl_mtu;
2405 di.acl_pkts = hdev->acl_pkts;
2406 di.sco_mtu = hdev->sco_mtu;
2407 di.sco_pkts = hdev->sco_pkts;
2408 } else {
2409 di.acl_mtu = hdev->le_mtu;
2410 di.acl_pkts = hdev->le_pkts;
2411 di.sco_mtu = 0;
2412 di.sco_pkts = 0;
2413 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 di.link_policy = hdev->link_policy;
2415 di.link_mode = hdev->link_mode;
2416
2417 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2418 memcpy(&di.features, &hdev->features, sizeof(di.features));
2419
2420 if (copy_to_user(arg, &di, sizeof(di)))
2421 err = -EFAULT;
2422
2423 hci_dev_put(hdev);
2424
2425 return err;
2426}
2427
2428/* ---- Interface to HCI drivers ---- */
2429
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002430static int hci_rfkill_set_block(void *data, bool blocked)
2431{
2432 struct hci_dev *hdev = data;
2433
2434 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2435
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002436 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2437 return -EBUSY;
2438
Johan Hedberg5e130362013-09-13 08:58:17 +03002439 if (blocked) {
2440 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002441 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2442 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002443 } else {
2444 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002445 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002446
2447 return 0;
2448}
2449
2450static const struct rfkill_ops hci_rfkill_ops = {
2451 .set_block = hci_rfkill_set_block,
2452};
2453
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002454static void hci_power_on(struct work_struct *work)
2455{
2456 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002457 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002458
2459 BT_DBG("%s", hdev->name);
2460
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002461 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002462 if (err < 0) {
2463 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002464 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002465 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002466
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002467 /* During the HCI setup phase, a few error conditions are
2468 * ignored and they need to be checked now. If they are still
2469 * valid, it is important to turn the device back off.
2470 */
2471 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2472 (hdev->dev_type == HCI_BREDR &&
2473 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2474 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002475 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2476 hci_dev_do_close(hdev);
2477 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002478 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2479 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002480 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002481
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002482 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002483 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002484}
2485
2486static void hci_power_off(struct work_struct *work)
2487{
Johan Hedberg32435532011-11-07 22:16:04 +02002488 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002489 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002490
2491 BT_DBG("%s", hdev->name);
2492
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002493 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002494}
2495
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002496static void hci_discov_off(struct work_struct *work)
2497{
2498 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002499
2500 hdev = container_of(work, struct hci_dev, discov_off.work);
2501
2502 BT_DBG("%s", hdev->name);
2503
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002504 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002505}
2506
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002507int hci_uuids_clear(struct hci_dev *hdev)
2508{
Johan Hedberg48210022013-01-27 00:31:28 +02002509 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002510
Johan Hedberg48210022013-01-27 00:31:28 +02002511 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2512 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002513 kfree(uuid);
2514 }
2515
2516 return 0;
2517}
2518
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002519int hci_link_keys_clear(struct hci_dev *hdev)
2520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
2531
2532 return 0;
2533}
2534
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002535int hci_smp_ltks_clear(struct hci_dev *hdev)
2536{
2537 struct smp_ltk *k, *tmp;
2538
2539 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2540 list_del(&k->list);
2541 kfree(k);
2542 }
2543
2544 return 0;
2545}
2546
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002547struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2548{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002549 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002550
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002551 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002552 if (bacmp(bdaddr, &k->bdaddr) == 0)
2553 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002554
2555 return NULL;
2556}
2557
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302558static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002559 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002560{
2561 /* Legacy key */
2562 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302563 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002564
2565 /* Debug keys are insecure so don't store them persistently */
2566 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302567 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002568
2569 /* Changed combination key and there's no previous one */
2570 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302571 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002572
2573 /* Security mode 3 case */
2574 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302575 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002576
2577 /* Neither local nor remote side had no-bonding as requirement */
2578 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302579 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002580
2581 /* Local side had dedicated bonding as requirement */
2582 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302583 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002584
2585 /* Remote side had dedicated bonding as requirement */
2586 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302587 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002588
2589 /* If none of the above criteria match, then don't store the key
2590 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302591 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002592}
2593
Johan Hedberg98a0b842014-01-30 19:40:00 -08002594static bool ltk_type_master(u8 type)
2595{
2596 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2597 return true;
2598
2599 return false;
2600}
2601
2602struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2603 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002604{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002605 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002606
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002607 list_for_each_entry(k, &hdev->long_term_keys, list) {
2608 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002609 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002610 continue;
2611
Johan Hedberg98a0b842014-01-30 19:40:00 -08002612 if (ltk_type_master(k->type) != master)
2613 continue;
2614
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002615 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002616 }
2617
2618 return NULL;
2619}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002620
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002621struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002622 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002623{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002624 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002625
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002626 list_for_each_entry(k, &hdev->long_term_keys, list)
2627 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002628 bacmp(bdaddr, &k->bdaddr) == 0 &&
2629 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002630 return k;
2631
2632 return NULL;
2633}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002634
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002635int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002636 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002637{
2638 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302639 u8 old_key_type;
2640 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002641
2642 old_key = hci_find_link_key(hdev, bdaddr);
2643 if (old_key) {
2644 old_key_type = old_key->type;
2645 key = old_key;
2646 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002647 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002648 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2649 if (!key)
2650 return -ENOMEM;
2651 list_add(&key->list, &hdev->link_keys);
2652 }
2653
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002654 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002655
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002656 /* Some buggy controller combinations generate a changed
2657 * combination key for legacy pairing even when there's no
2658 * previous key */
2659 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002660 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002661 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002662 if (conn)
2663 conn->key_type = type;
2664 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002665
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002666 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002667 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002668 key->pin_len = pin_len;
2669
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002670 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002671 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002672 else
2673 key->type = type;
2674
Johan Hedberg4df378a2011-04-28 11:29:03 -07002675 if (!new_key)
2676 return 0;
2677
2678 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2679
Johan Hedberg744cf192011-11-08 20:40:14 +02002680 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002681
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302682 if (conn)
2683 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002684
2685 return 0;
2686}
2687
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002688int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002689 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002690 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002691{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002692 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002693 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002694 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002695
Johan Hedberg98a0b842014-01-30 19:40:00 -08002696 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002697 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002698 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002699 else {
2700 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002701 if (!key)
2702 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002703 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002704 }
2705
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002706 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002707 key->bdaddr_type = addr_type;
2708 memcpy(key->val, tk, sizeof(key->val));
2709 key->authenticated = authenticated;
2710 key->ediv = ediv;
2711 key->enc_size = enc_size;
2712 key->type = type;
2713 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002714
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002715 if (!new_key)
2716 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002717
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002718 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2719 persistent = 0;
2720 else
2721 persistent = 1;
2722
Johan Hedberg21b93b72014-01-30 19:39:58 -08002723 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002724 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002725
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002726 return 0;
2727}
2728
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002729int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2730{
2731 struct link_key *key;
2732
2733 key = hci_find_link_key(hdev, bdaddr);
2734 if (!key)
2735 return -ENOENT;
2736
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002737 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002738
2739 list_del(&key->list);
2740 kfree(key);
2741
2742 return 0;
2743}
2744
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002745int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2746{
2747 struct smp_ltk *k, *tmp;
2748
2749 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2750 if (bacmp(bdaddr, &k->bdaddr))
2751 continue;
2752
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002753 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002754
2755 list_del(&k->list);
2756 kfree(k);
2757 }
2758
2759 return 0;
2760}
2761
Ville Tervo6bd32322011-02-16 16:32:41 +02002762/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002763static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002764{
2765 struct hci_dev *hdev = (void *) arg;
2766
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002767 if (hdev->sent_cmd) {
2768 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2769 u16 opcode = __le16_to_cpu(sent->opcode);
2770
2771 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2772 } else {
2773 BT_ERR("%s command tx timeout", hdev->name);
2774 }
2775
Ville Tervo6bd32322011-02-16 16:32:41 +02002776 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002777 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002778}
2779
Szymon Janc2763eda2011-03-22 13:12:22 +01002780struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002781 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002782{
2783 struct oob_data *data;
2784
2785 list_for_each_entry(data, &hdev->remote_oob_data, list)
2786 if (bacmp(bdaddr, &data->bdaddr) == 0)
2787 return data;
2788
2789 return NULL;
2790}
2791
2792int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2793{
2794 struct oob_data *data;
2795
2796 data = hci_find_remote_oob_data(hdev, bdaddr);
2797 if (!data)
2798 return -ENOENT;
2799
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002800 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002801
2802 list_del(&data->list);
2803 kfree(data);
2804
2805 return 0;
2806}
2807
2808int hci_remote_oob_data_clear(struct hci_dev *hdev)
2809{
2810 struct oob_data *data, *n;
2811
2812 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2813 list_del(&data->list);
2814 kfree(data);
2815 }
2816
2817 return 0;
2818}
2819
Marcel Holtmann07988722014-01-10 02:07:29 -08002820int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2821 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002822{
2823 struct oob_data *data;
2824
2825 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002826 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002827 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002828 if (!data)
2829 return -ENOMEM;
2830
2831 bacpy(&data->bdaddr, bdaddr);
2832 list_add(&data->list, &hdev->remote_oob_data);
2833 }
2834
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002835 memcpy(data->hash192, hash, sizeof(data->hash192));
2836 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002837
Marcel Holtmann07988722014-01-10 02:07:29 -08002838 memset(data->hash256, 0, sizeof(data->hash256));
2839 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2840
2841 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2842
2843 return 0;
2844}
2845
2846int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2847 u8 *hash192, u8 *randomizer192,
2848 u8 *hash256, u8 *randomizer256)
2849{
2850 struct oob_data *data;
2851
2852 data = hci_find_remote_oob_data(hdev, bdaddr);
2853 if (!data) {
2854 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2855 if (!data)
2856 return -ENOMEM;
2857
2858 bacpy(&data->bdaddr, bdaddr);
2859 list_add(&data->list, &hdev->remote_oob_data);
2860 }
2861
2862 memcpy(data->hash192, hash192, sizeof(data->hash192));
2863 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2864
2865 memcpy(data->hash256, hash256, sizeof(data->hash256));
2866 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2867
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002868 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002869
2870 return 0;
2871}
2872
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002873struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2874 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002875{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002876 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002877
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002878 list_for_each_entry(b, &hdev->blacklist, list) {
2879 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002880 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002881 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002882
2883 return NULL;
2884}
2885
2886int hci_blacklist_clear(struct hci_dev *hdev)
2887{
2888 struct list_head *p, *n;
2889
2890 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002891 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002892
2893 list_del(p);
2894 kfree(b);
2895 }
2896
2897 return 0;
2898}
2899
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002900int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002901{
2902 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002903
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002904 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002905 return -EBADF;
2906
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002907 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002908 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002909
2910 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002911 if (!entry)
2912 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002913
2914 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002915 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002916
2917 list_add(&entry->list, &hdev->blacklist);
2918
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002919 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002920}
2921
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002922int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002923{
2924 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002925
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002926 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002927 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002928
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002929 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002930 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002931 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002932
2933 list_del(&entry->list);
2934 kfree(entry);
2935
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002936 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002937}
2938
Andre Guedes15819a72014-02-03 13:56:18 -03002939/* This function requires the caller holds hdev->lock */
2940struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2941 bdaddr_t *addr, u8 addr_type)
2942{
2943 struct hci_conn_params *params;
2944
2945 list_for_each_entry(params, &hdev->le_conn_params, list) {
2946 if (bacmp(&params->addr, addr) == 0 &&
2947 params->addr_type == addr_type) {
2948 return params;
2949 }
2950 }
2951
2952 return NULL;
2953}
2954
2955/* This function requires the caller holds hdev->lock */
2956void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
2957 u16 conn_min_interval, u16 conn_max_interval)
2958{
2959 struct hci_conn_params *params;
2960
2961 params = hci_conn_params_lookup(hdev, addr, addr_type);
2962 if (params) {
2963 params->conn_min_interval = conn_min_interval;
2964 params->conn_max_interval = conn_max_interval;
2965 return;
2966 }
2967
2968 params = kzalloc(sizeof(*params), GFP_KERNEL);
2969 if (!params) {
2970 BT_ERR("Out of memory");
2971 return;
2972 }
2973
2974 bacpy(&params->addr, addr);
2975 params->addr_type = addr_type;
2976 params->conn_min_interval = conn_min_interval;
2977 params->conn_max_interval = conn_max_interval;
2978
2979 list_add(&params->list, &hdev->le_conn_params);
2980
2981 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
2982 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
2983 conn_max_interval);
2984}
2985
2986/* This function requires the caller holds hdev->lock */
2987void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2988{
2989 struct hci_conn_params *params;
2990
2991 params = hci_conn_params_lookup(hdev, addr, addr_type);
2992 if (!params)
2993 return;
2994
2995 list_del(&params->list);
2996 kfree(params);
2997
2998 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2999}
3000
3001/* This function requires the caller holds hdev->lock */
3002void hci_conn_params_clear(struct hci_dev *hdev)
3003{
3004 struct hci_conn_params *params, *tmp;
3005
3006 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3007 list_del(&params->list);
3008 kfree(params);
3009 }
3010
3011 BT_DBG("All LE connection parameters were removed");
3012}
3013
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003014static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003015{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003016 if (status) {
3017 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003018
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003019 hci_dev_lock(hdev);
3020 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3021 hci_dev_unlock(hdev);
3022 return;
3023 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003024}
3025
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003026static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003027{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003028 /* General inquiry access code (GIAC) */
3029 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3030 struct hci_request req;
3031 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003032 int err;
3033
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003034 if (status) {
3035 BT_ERR("Failed to disable LE scanning: status %d", status);
3036 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003037 }
3038
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003039 switch (hdev->discovery.type) {
3040 case DISCOV_TYPE_LE:
3041 hci_dev_lock(hdev);
3042 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3043 hci_dev_unlock(hdev);
3044 break;
3045
3046 case DISCOV_TYPE_INTERLEAVED:
3047 hci_req_init(&req, hdev);
3048
3049 memset(&cp, 0, sizeof(cp));
3050 memcpy(&cp.lap, lap, sizeof(cp.lap));
3051 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3052 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3053
3054 hci_dev_lock(hdev);
3055
3056 hci_inquiry_cache_flush(hdev);
3057
3058 err = hci_req_run(&req, inquiry_complete);
3059 if (err) {
3060 BT_ERR("Inquiry request failed: err %d", err);
3061 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3062 }
3063
3064 hci_dev_unlock(hdev);
3065 break;
3066 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003067}
3068
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003069static void le_scan_disable_work(struct work_struct *work)
3070{
3071 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003072 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003073 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003074 struct hci_request req;
3075 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003076
3077 BT_DBG("%s", hdev->name);
3078
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003079 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003080
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003081 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003082 cp.enable = LE_SCAN_DISABLE;
3083 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003084
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003085 err = hci_req_run(&req, le_scan_disable_work_complete);
3086 if (err)
3087 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003088}
3089
David Herrmann9be0dab2012-04-22 14:39:57 +02003090/* Alloc HCI device */
3091struct hci_dev *hci_alloc_dev(void)
3092{
3093 struct hci_dev *hdev;
3094
3095 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3096 if (!hdev)
3097 return NULL;
3098
David Herrmannb1b813d2012-04-22 14:39:58 +02003099 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3100 hdev->esco_type = (ESCO_HV1);
3101 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003102 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3103 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003104 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3105 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003106
David Herrmannb1b813d2012-04-22 14:39:58 +02003107 hdev->sniff_max_interval = 800;
3108 hdev->sniff_min_interval = 80;
3109
Marcel Holtmannbef64732013-10-11 08:23:19 -07003110 hdev->le_scan_interval = 0x0060;
3111 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003112 hdev->le_conn_min_interval = 0x0028;
3113 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003114
David Herrmannb1b813d2012-04-22 14:39:58 +02003115 mutex_init(&hdev->lock);
3116 mutex_init(&hdev->req_lock);
3117
3118 INIT_LIST_HEAD(&hdev->mgmt_pending);
3119 INIT_LIST_HEAD(&hdev->blacklist);
3120 INIT_LIST_HEAD(&hdev->uuids);
3121 INIT_LIST_HEAD(&hdev->link_keys);
3122 INIT_LIST_HEAD(&hdev->long_term_keys);
3123 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003124 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003125 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003126
3127 INIT_WORK(&hdev->rx_work, hci_rx_work);
3128 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3129 INIT_WORK(&hdev->tx_work, hci_tx_work);
3130 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003131
David Herrmannb1b813d2012-04-22 14:39:58 +02003132 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3133 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3134 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3135
David Herrmannb1b813d2012-04-22 14:39:58 +02003136 skb_queue_head_init(&hdev->rx_q);
3137 skb_queue_head_init(&hdev->cmd_q);
3138 skb_queue_head_init(&hdev->raw_q);
3139
3140 init_waitqueue_head(&hdev->req_wait_q);
3141
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003142 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003143
David Herrmannb1b813d2012-04-22 14:39:58 +02003144 hci_init_sysfs(hdev);
3145 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003146
3147 return hdev;
3148}
3149EXPORT_SYMBOL(hci_alloc_dev);
3150
3151/* Free HCI device */
3152void hci_free_dev(struct hci_dev *hdev)
3153{
David Herrmann9be0dab2012-04-22 14:39:57 +02003154 /* will free via device release */
3155 put_device(&hdev->dev);
3156}
3157EXPORT_SYMBOL(hci_free_dev);
3158
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159/* Register HCI device */
3160int hci_register_dev(struct hci_dev *hdev)
3161{
David Herrmannb1b813d2012-04-22 14:39:58 +02003162 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163
David Herrmann010666a2012-01-07 15:47:07 +01003164 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 return -EINVAL;
3166
Mat Martineau08add512011-11-02 16:18:36 -07003167 /* Do not allow HCI_AMP devices to register at index 0,
3168 * so the index can be used as the AMP controller ID.
3169 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003170 switch (hdev->dev_type) {
3171 case HCI_BREDR:
3172 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3173 break;
3174 case HCI_AMP:
3175 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3176 break;
3177 default:
3178 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003180
Sasha Levin3df92b32012-05-27 22:36:56 +02003181 if (id < 0)
3182 return id;
3183
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 sprintf(hdev->name, "hci%d", id);
3185 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003186
3187 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3188
Kees Cookd8537542013-07-03 15:04:57 -07003189 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3190 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003191 if (!hdev->workqueue) {
3192 error = -ENOMEM;
3193 goto err;
3194 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003195
Kees Cookd8537542013-07-03 15:04:57 -07003196 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3197 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003198 if (!hdev->req_workqueue) {
3199 destroy_workqueue(hdev->workqueue);
3200 error = -ENOMEM;
3201 goto err;
3202 }
3203
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003204 if (!IS_ERR_OR_NULL(bt_debugfs))
3205 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3206
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003207 dev_set_name(&hdev->dev, "%s", hdev->name);
3208
Johan Hedberg99780a72014-02-18 10:40:07 +02003209 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3210 CRYPTO_ALG_ASYNC);
3211 if (IS_ERR(hdev->tfm_aes)) {
3212 BT_ERR("Unable to create crypto context");
3213 error = PTR_ERR(hdev->tfm_aes);
3214 hdev->tfm_aes = NULL;
3215 goto err_wqueue;
3216 }
3217
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003218 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003219 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003220 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003222 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003223 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3224 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003225 if (hdev->rfkill) {
3226 if (rfkill_register(hdev->rfkill) < 0) {
3227 rfkill_destroy(hdev->rfkill);
3228 hdev->rfkill = NULL;
3229 }
3230 }
3231
Johan Hedberg5e130362013-09-13 08:58:17 +03003232 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3233 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3234
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003235 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003236 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003237
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003238 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003239 /* Assume BR/EDR support until proven otherwise (such as
3240 * through reading supported features during init.
3241 */
3242 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3243 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003244
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003245 write_lock(&hci_dev_list_lock);
3246 list_add(&hdev->list, &hci_dev_list);
3247 write_unlock(&hci_dev_list_lock);
3248
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003250 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251
Johan Hedberg19202572013-01-14 22:33:51 +02003252 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003253
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003255
Johan Hedberg99780a72014-02-18 10:40:07 +02003256err_tfm:
3257 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003258err_wqueue:
3259 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003260 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003261err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003262 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003263
David Herrmann33ca9542011-10-08 14:58:49 +02003264 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265}
3266EXPORT_SYMBOL(hci_register_dev);
3267
3268/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003269void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270{
Sasha Levin3df92b32012-05-27 22:36:56 +02003271 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003272
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003273 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
Johan Hovold94324962012-03-15 14:48:41 +01003275 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3276
Sasha Levin3df92b32012-05-27 22:36:56 +02003277 id = hdev->id;
3278
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003279 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003281 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
3283 hci_dev_do_close(hdev);
3284
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303285 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003286 kfree_skb(hdev->reassembly[i]);
3287
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003288 cancel_work_sync(&hdev->power_on);
3289
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003290 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003291 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003292 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003293 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003294 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003295 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003296
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003297 /* mgmt_index_removed should take care of emptying the
3298 * pending list */
3299 BUG_ON(!list_empty(&hdev->mgmt_pending));
3300
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 hci_notify(hdev, HCI_DEV_UNREG);
3302
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003303 if (hdev->rfkill) {
3304 rfkill_unregister(hdev->rfkill);
3305 rfkill_destroy(hdev->rfkill);
3306 }
3307
Johan Hedberg99780a72014-02-18 10:40:07 +02003308 if (hdev->tfm_aes)
3309 crypto_free_blkcipher(hdev->tfm_aes);
3310
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003311 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003312
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003313 debugfs_remove_recursive(hdev->debugfs);
3314
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003315 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003316 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003317
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003318 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003319 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003320 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003321 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003322 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003323 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003324 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003325 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003326
David Herrmanndc946bd2012-01-07 15:47:24 +01003327 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003328
3329 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330}
3331EXPORT_SYMBOL(hci_unregister_dev);
3332
3333/* Suspend HCI device */
3334int hci_suspend_dev(struct hci_dev *hdev)
3335{
3336 hci_notify(hdev, HCI_DEV_SUSPEND);
3337 return 0;
3338}
3339EXPORT_SYMBOL(hci_suspend_dev);
3340
3341/* Resume HCI device */
3342int hci_resume_dev(struct hci_dev *hdev)
3343{
3344 hci_notify(hdev, HCI_DEV_RESUME);
3345 return 0;
3346}
3347EXPORT_SYMBOL(hci_resume_dev);
3348
Marcel Holtmann76bca882009-11-18 00:40:39 +01003349/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003350int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003351{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003352 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003354 kfree_skb(skb);
3355 return -ENXIO;
3356 }
3357
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003358 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003359 bt_cb(skb)->incoming = 1;
3360
3361 /* Time stamp */
3362 __net_timestamp(skb);
3363
Marcel Holtmann76bca882009-11-18 00:40:39 +01003364 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003365 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003366
Marcel Holtmann76bca882009-11-18 00:40:39 +01003367 return 0;
3368}
3369EXPORT_SYMBOL(hci_recv_frame);
3370
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303371static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003372 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303373{
3374 int len = 0;
3375 int hlen = 0;
3376 int remain = count;
3377 struct sk_buff *skb;
3378 struct bt_skb_cb *scb;
3379
3380 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003381 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303382 return -EILSEQ;
3383
3384 skb = hdev->reassembly[index];
3385
3386 if (!skb) {
3387 switch (type) {
3388 case HCI_ACLDATA_PKT:
3389 len = HCI_MAX_FRAME_SIZE;
3390 hlen = HCI_ACL_HDR_SIZE;
3391 break;
3392 case HCI_EVENT_PKT:
3393 len = HCI_MAX_EVENT_SIZE;
3394 hlen = HCI_EVENT_HDR_SIZE;
3395 break;
3396 case HCI_SCODATA_PKT:
3397 len = HCI_MAX_SCO_SIZE;
3398 hlen = HCI_SCO_HDR_SIZE;
3399 break;
3400 }
3401
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003402 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303403 if (!skb)
3404 return -ENOMEM;
3405
3406 scb = (void *) skb->cb;
3407 scb->expect = hlen;
3408 scb->pkt_type = type;
3409
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303410 hdev->reassembly[index] = skb;
3411 }
3412
3413 while (count) {
3414 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003415 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303416
3417 memcpy(skb_put(skb, len), data, len);
3418
3419 count -= len;
3420 data += len;
3421 scb->expect -= len;
3422 remain = count;
3423
3424 switch (type) {
3425 case HCI_EVENT_PKT:
3426 if (skb->len == HCI_EVENT_HDR_SIZE) {
3427 struct hci_event_hdr *h = hci_event_hdr(skb);
3428 scb->expect = h->plen;
3429
3430 if (skb_tailroom(skb) < scb->expect) {
3431 kfree_skb(skb);
3432 hdev->reassembly[index] = NULL;
3433 return -ENOMEM;
3434 }
3435 }
3436 break;
3437
3438 case HCI_ACLDATA_PKT:
3439 if (skb->len == HCI_ACL_HDR_SIZE) {
3440 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3441 scb->expect = __le16_to_cpu(h->dlen);
3442
3443 if (skb_tailroom(skb) < scb->expect) {
3444 kfree_skb(skb);
3445 hdev->reassembly[index] = NULL;
3446 return -ENOMEM;
3447 }
3448 }
3449 break;
3450
3451 case HCI_SCODATA_PKT:
3452 if (skb->len == HCI_SCO_HDR_SIZE) {
3453 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3454 scb->expect = h->dlen;
3455
3456 if (skb_tailroom(skb) < scb->expect) {
3457 kfree_skb(skb);
3458 hdev->reassembly[index] = NULL;
3459 return -ENOMEM;
3460 }
3461 }
3462 break;
3463 }
3464
3465 if (scb->expect == 0) {
3466 /* Complete frame */
3467
3468 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003469 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303470
3471 hdev->reassembly[index] = NULL;
3472 return remain;
3473 }
3474 }
3475
3476 return remain;
3477}
3478
Marcel Holtmannef222012007-07-11 06:42:04 +02003479int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3480{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303481 int rem = 0;
3482
Marcel Holtmannef222012007-07-11 06:42:04 +02003483 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3484 return -EILSEQ;
3485
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003486 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003487 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303488 if (rem < 0)
3489 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003490
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303491 data += (count - rem);
3492 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003493 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003494
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303495 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003496}
3497EXPORT_SYMBOL(hci_recv_fragment);
3498
Suraj Sumangala99811512010-07-14 13:02:19 +05303499#define STREAM_REASSEMBLY 0
3500
3501int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3502{
3503 int type;
3504 int rem = 0;
3505
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003506 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303507 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3508
3509 if (!skb) {
3510 struct { char type; } *pkt;
3511
3512 /* Start of the frame */
3513 pkt = data;
3514 type = pkt->type;
3515
3516 data++;
3517 count--;
3518 } else
3519 type = bt_cb(skb)->pkt_type;
3520
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003521 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003522 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303523 if (rem < 0)
3524 return rem;
3525
3526 data += (count - rem);
3527 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003528 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303529
3530 return rem;
3531}
3532EXPORT_SYMBOL(hci_recv_stream_fragment);
3533
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534/* ---- Interface to upper protocols ---- */
3535
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536int hci_register_cb(struct hci_cb *cb)
3537{
3538 BT_DBG("%p name %s", cb, cb->name);
3539
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003540 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003542 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
3544 return 0;
3545}
3546EXPORT_SYMBOL(hci_register_cb);
3547
3548int hci_unregister_cb(struct hci_cb *cb)
3549{
3550 BT_DBG("%p name %s", cb, cb->name);
3551
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003552 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003554 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
3556 return 0;
3557}
3558EXPORT_SYMBOL(hci_unregister_cb);
3559
Marcel Holtmann51086992013-10-10 14:54:19 -07003560static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003562 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003564 /* Time stamp */
3565 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003567 /* Send copy to monitor */
3568 hci_send_to_monitor(hdev, skb);
3569
3570 if (atomic_read(&hdev->promisc)) {
3571 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003572 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 }
3574
3575 /* Get rid of skb owner, prior to sending to the driver. */
3576 skb_orphan(skb);
3577
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003578 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003579 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580}
3581
Johan Hedberg3119ae92013-03-05 20:37:44 +02003582void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3583{
3584 skb_queue_head_init(&req->cmd_q);
3585 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003586 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003587}
3588
3589int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3590{
3591 struct hci_dev *hdev = req->hdev;
3592 struct sk_buff *skb;
3593 unsigned long flags;
3594
3595 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3596
Andre Guedes5d73e032013-03-08 11:20:16 -03003597 /* If an error occured during request building, remove all HCI
3598 * commands queued on the HCI request queue.
3599 */
3600 if (req->err) {
3601 skb_queue_purge(&req->cmd_q);
3602 return req->err;
3603 }
3604
Johan Hedberg3119ae92013-03-05 20:37:44 +02003605 /* Do not allow empty requests */
3606 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003607 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003608
3609 skb = skb_peek_tail(&req->cmd_q);
3610 bt_cb(skb)->req.complete = complete;
3611
3612 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3613 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3614 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3615
3616 queue_work(hdev->workqueue, &hdev->cmd_work);
3617
3618 return 0;
3619}
3620
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003621static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003622 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623{
3624 int len = HCI_COMMAND_HDR_SIZE + plen;
3625 struct hci_command_hdr *hdr;
3626 struct sk_buff *skb;
3627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003629 if (!skb)
3630 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631
3632 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003633 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 hdr->plen = plen;
3635
3636 if (plen)
3637 memcpy(skb_put(skb, plen), param, plen);
3638
3639 BT_DBG("skb len %d", skb->len);
3640
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003641 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003642
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003643 return skb;
3644}
3645
3646/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003647int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3648 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003649{
3650 struct sk_buff *skb;
3651
3652 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3653
3654 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3655 if (!skb) {
3656 BT_ERR("%s no memory for command", hdev->name);
3657 return -ENOMEM;
3658 }
3659
Johan Hedberg11714b32013-03-05 20:37:47 +02003660 /* Stand-alone HCI commands must be flaged as
3661 * single-command requests.
3662 */
3663 bt_cb(skb)->req.start = true;
3664
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003666 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667
3668 return 0;
3669}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670
Johan Hedberg71c76a12013-03-05 20:37:46 +02003671/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003672void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3673 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003674{
3675 struct hci_dev *hdev = req->hdev;
3676 struct sk_buff *skb;
3677
3678 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3679
Andre Guedes34739c12013-03-08 11:20:18 -03003680 /* If an error occured during request building, there is no point in
3681 * queueing the HCI command. We can simply return.
3682 */
3683 if (req->err)
3684 return;
3685
Johan Hedberg71c76a12013-03-05 20:37:46 +02003686 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3687 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003688 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3689 hdev->name, opcode);
3690 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003691 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003692 }
3693
3694 if (skb_queue_empty(&req->cmd_q))
3695 bt_cb(skb)->req.start = true;
3696
Johan Hedberg02350a72013-04-03 21:50:29 +03003697 bt_cb(skb)->req.event = event;
3698
Johan Hedberg71c76a12013-03-05 20:37:46 +02003699 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003700}
3701
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003702void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3703 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003704{
3705 hci_req_add_ev(req, opcode, plen, param, 0);
3706}
3707
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003709void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710{
3711 struct hci_command_hdr *hdr;
3712
3713 if (!hdev->sent_cmd)
3714 return NULL;
3715
3716 hdr = (void *) hdev->sent_cmd->data;
3717
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003718 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 return NULL;
3720
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003721 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
3723 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3724}
3725
3726/* Send ACL data */
3727static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3728{
3729 struct hci_acl_hdr *hdr;
3730 int len = skb->len;
3731
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003732 skb_push(skb, HCI_ACL_HDR_SIZE);
3733 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003734 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003735 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3736 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737}
3738
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003739static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003740 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003742 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 struct hci_dev *hdev = conn->hdev;
3744 struct sk_buff *list;
3745
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003746 skb->len = skb_headlen(skb);
3747 skb->data_len = 0;
3748
3749 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003750
3751 switch (hdev->dev_type) {
3752 case HCI_BREDR:
3753 hci_add_acl_hdr(skb, conn->handle, flags);
3754 break;
3755 case HCI_AMP:
3756 hci_add_acl_hdr(skb, chan->handle, flags);
3757 break;
3758 default:
3759 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3760 return;
3761 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003762
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003763 list = skb_shinfo(skb)->frag_list;
3764 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 /* Non fragmented */
3766 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3767
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003768 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 } else {
3770 /* Fragmented */
3771 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3772
3773 skb_shinfo(skb)->frag_list = NULL;
3774
3775 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003776 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003778 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003779
3780 flags &= ~ACL_START;
3781 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 do {
3783 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003784
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003785 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003786 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
3788 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3789
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003790 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 } while (list);
3792
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003793 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003795}
3796
3797void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3798{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003799 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003800
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003801 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003802
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003803 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003805 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807
3808/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003809void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810{
3811 struct hci_dev *hdev = conn->hdev;
3812 struct hci_sco_hdr hdr;
3813
3814 BT_DBG("%s len %d", hdev->name, skb->len);
3815
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003816 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 hdr.dlen = skb->len;
3818
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003819 skb_push(skb, HCI_SCO_HDR_SIZE);
3820 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003821 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003823 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003826 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
3829/* ---- HCI TX task (outgoing data) ---- */
3830
3831/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003832static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3833 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834{
3835 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003836 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003837 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003839 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003841
3842 rcu_read_lock();
3843
3844 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003845 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003847
3848 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3849 continue;
3850
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 num++;
3852
3853 if (c->sent < min) {
3854 min = c->sent;
3855 conn = c;
3856 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003857
3858 if (hci_conn_num(hdev, type) == num)
3859 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 }
3861
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003862 rcu_read_unlock();
3863
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003865 int cnt, q;
3866
3867 switch (conn->type) {
3868 case ACL_LINK:
3869 cnt = hdev->acl_cnt;
3870 break;
3871 case SCO_LINK:
3872 case ESCO_LINK:
3873 cnt = hdev->sco_cnt;
3874 break;
3875 case LE_LINK:
3876 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3877 break;
3878 default:
3879 cnt = 0;
3880 BT_ERR("Unknown link type");
3881 }
3882
3883 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 *quote = q ? q : 1;
3885 } else
3886 *quote = 0;
3887
3888 BT_DBG("conn %p quote %d", conn, *quote);
3889 return conn;
3890}
3891
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003892static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893{
3894 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003895 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896
Ville Tervobae1f5d92011-02-10 22:38:53 -03003897 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003899 rcu_read_lock();
3900
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003902 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003903 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003904 BT_ERR("%s killing stalled connection %pMR",
3905 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003906 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907 }
3908 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003909
3910 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911}
3912
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003913static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3914 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003915{
3916 struct hci_conn_hash *h = &hdev->conn_hash;
3917 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003918 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003919 struct hci_conn *conn;
3920 int cnt, q, conn_num = 0;
3921
3922 BT_DBG("%s", hdev->name);
3923
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003924 rcu_read_lock();
3925
3926 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003927 struct hci_chan *tmp;
3928
3929 if (conn->type != type)
3930 continue;
3931
3932 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3933 continue;
3934
3935 conn_num++;
3936
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003937 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003938 struct sk_buff *skb;
3939
3940 if (skb_queue_empty(&tmp->data_q))
3941 continue;
3942
3943 skb = skb_peek(&tmp->data_q);
3944 if (skb->priority < cur_prio)
3945 continue;
3946
3947 if (skb->priority > cur_prio) {
3948 num = 0;
3949 min = ~0;
3950 cur_prio = skb->priority;
3951 }
3952
3953 num++;
3954
3955 if (conn->sent < min) {
3956 min = conn->sent;
3957 chan = tmp;
3958 }
3959 }
3960
3961 if (hci_conn_num(hdev, type) == conn_num)
3962 break;
3963 }
3964
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003965 rcu_read_unlock();
3966
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003967 if (!chan)
3968 return NULL;
3969
3970 switch (chan->conn->type) {
3971 case ACL_LINK:
3972 cnt = hdev->acl_cnt;
3973 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003974 case AMP_LINK:
3975 cnt = hdev->block_cnt;
3976 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003977 case SCO_LINK:
3978 case ESCO_LINK:
3979 cnt = hdev->sco_cnt;
3980 break;
3981 case LE_LINK:
3982 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3983 break;
3984 default:
3985 cnt = 0;
3986 BT_ERR("Unknown link type");
3987 }
3988
3989 q = cnt / num;
3990 *quote = q ? q : 1;
3991 BT_DBG("chan %p quote %d", chan, *quote);
3992 return chan;
3993}
3994
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003995static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3996{
3997 struct hci_conn_hash *h = &hdev->conn_hash;
3998 struct hci_conn *conn;
3999 int num = 0;
4000
4001 BT_DBG("%s", hdev->name);
4002
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004003 rcu_read_lock();
4004
4005 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004006 struct hci_chan *chan;
4007
4008 if (conn->type != type)
4009 continue;
4010
4011 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4012 continue;
4013
4014 num++;
4015
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004016 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004017 struct sk_buff *skb;
4018
4019 if (chan->sent) {
4020 chan->sent = 0;
4021 continue;
4022 }
4023
4024 if (skb_queue_empty(&chan->data_q))
4025 continue;
4026
4027 skb = skb_peek(&chan->data_q);
4028 if (skb->priority >= HCI_PRIO_MAX - 1)
4029 continue;
4030
4031 skb->priority = HCI_PRIO_MAX - 1;
4032
4033 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004034 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004035 }
4036
4037 if (hci_conn_num(hdev, type) == num)
4038 break;
4039 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004040
4041 rcu_read_unlock();
4042
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004043}
4044
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004045static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4046{
4047 /* Calculate count of blocks used by this packet */
4048 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4049}
4050
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004051static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 if (!test_bit(HCI_RAW, &hdev->flags)) {
4054 /* ACL tx timeout must be longer than maximum
4055 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004056 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004057 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004058 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004060}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004062static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004063{
4064 unsigned int cnt = hdev->acl_cnt;
4065 struct hci_chan *chan;
4066 struct sk_buff *skb;
4067 int quote;
4068
4069 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004070
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004071 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004072 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004073 u32 priority = (skb_peek(&chan->data_q))->priority;
4074 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004075 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004076 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004077
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004078 /* Stop if priority has changed */
4079 if (skb->priority < priority)
4080 break;
4081
4082 skb = skb_dequeue(&chan->data_q);
4083
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004084 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004085 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004086
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004087 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 hdev->acl_last_tx = jiffies;
4089
4090 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004091 chan->sent++;
4092 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093 }
4094 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004095
4096 if (cnt != hdev->acl_cnt)
4097 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098}
4099
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004100static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004101{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004102 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004103 struct hci_chan *chan;
4104 struct sk_buff *skb;
4105 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004106 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004107
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004108 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004109
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004110 BT_DBG("%s", hdev->name);
4111
4112 if (hdev->dev_type == HCI_AMP)
4113 type = AMP_LINK;
4114 else
4115 type = ACL_LINK;
4116
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004117 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004118 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004119 u32 priority = (skb_peek(&chan->data_q))->priority;
4120 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4121 int blocks;
4122
4123 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004124 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004125
4126 /* Stop if priority has changed */
4127 if (skb->priority < priority)
4128 break;
4129
4130 skb = skb_dequeue(&chan->data_q);
4131
4132 blocks = __get_blocks(hdev, skb);
4133 if (blocks > hdev->block_cnt)
4134 return;
4135
4136 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004137 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004138
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004139 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004140 hdev->acl_last_tx = jiffies;
4141
4142 hdev->block_cnt -= blocks;
4143 quote -= blocks;
4144
4145 chan->sent += blocks;
4146 chan->conn->sent += blocks;
4147 }
4148 }
4149
4150 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004151 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004152}
4153
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004154static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004155{
4156 BT_DBG("%s", hdev->name);
4157
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004158 /* No ACL link over BR/EDR controller */
4159 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4160 return;
4161
4162 /* No AMP link over AMP controller */
4163 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004164 return;
4165
4166 switch (hdev->flow_ctl_mode) {
4167 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4168 hci_sched_acl_pkt(hdev);
4169 break;
4170
4171 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4172 hci_sched_acl_blk(hdev);
4173 break;
4174 }
4175}
4176
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004178static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179{
4180 struct hci_conn *conn;
4181 struct sk_buff *skb;
4182 int quote;
4183
4184 BT_DBG("%s", hdev->name);
4185
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004186 if (!hci_conn_num(hdev, SCO_LINK))
4187 return;
4188
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4191 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004192 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193
4194 conn->sent++;
4195 if (conn->sent == ~0)
4196 conn->sent = 0;
4197 }
4198 }
4199}
4200
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004201static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004202{
4203 struct hci_conn *conn;
4204 struct sk_buff *skb;
4205 int quote;
4206
4207 BT_DBG("%s", hdev->name);
4208
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004209 if (!hci_conn_num(hdev, ESCO_LINK))
4210 return;
4211
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004212 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4213 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004214 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4215 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004216 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004217
4218 conn->sent++;
4219 if (conn->sent == ~0)
4220 conn->sent = 0;
4221 }
4222 }
4223}
4224
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004225static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004226{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004227 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004228 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004229 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004230
4231 BT_DBG("%s", hdev->name);
4232
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004233 if (!hci_conn_num(hdev, LE_LINK))
4234 return;
4235
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004236 if (!test_bit(HCI_RAW, &hdev->flags)) {
4237 /* LE tx timeout must be longer than maximum
4238 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004239 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004240 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004241 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004242 }
4243
4244 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004245 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004246 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004247 u32 priority = (skb_peek(&chan->data_q))->priority;
4248 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004249 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004250 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004251
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004252 /* Stop if priority has changed */
4253 if (skb->priority < priority)
4254 break;
4255
4256 skb = skb_dequeue(&chan->data_q);
4257
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004258 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004259 hdev->le_last_tx = jiffies;
4260
4261 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004262 chan->sent++;
4263 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004264 }
4265 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004266
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004267 if (hdev->le_pkts)
4268 hdev->le_cnt = cnt;
4269 else
4270 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004271
4272 if (cnt != tmp)
4273 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004274}
4275
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004276static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004278 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 struct sk_buff *skb;
4280
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004281 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004282 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283
Marcel Holtmann52de5992013-09-03 18:08:38 -07004284 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4285 /* Schedule queues and send stuff to HCI driver */
4286 hci_sched_acl(hdev);
4287 hci_sched_sco(hdev);
4288 hci_sched_esco(hdev);
4289 hci_sched_le(hdev);
4290 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004291
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 /* Send next queued raw (unknown type) packet */
4293 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004294 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295}
4296
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004297/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298
4299/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004300static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301{
4302 struct hci_acl_hdr *hdr = (void *) skb->data;
4303 struct hci_conn *conn;
4304 __u16 handle, flags;
4305
4306 skb_pull(skb, HCI_ACL_HDR_SIZE);
4307
4308 handle = __le16_to_cpu(hdr->handle);
4309 flags = hci_flags(handle);
4310 handle = hci_handle(handle);
4311
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004312 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004313 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
4315 hdev->stat.acl_rx++;
4316
4317 hci_dev_lock(hdev);
4318 conn = hci_conn_hash_lookup_handle(hdev, handle);
4319 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004320
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004322 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004323
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004325 l2cap_recv_acldata(conn, skb, flags);
4326 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004328 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004329 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 }
4331
4332 kfree_skb(skb);
4333}
4334
4335/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004336static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337{
4338 struct hci_sco_hdr *hdr = (void *) skb->data;
4339 struct hci_conn *conn;
4340 __u16 handle;
4341
4342 skb_pull(skb, HCI_SCO_HDR_SIZE);
4343
4344 handle = __le16_to_cpu(hdr->handle);
4345
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004346 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347
4348 hdev->stat.sco_rx++;
4349
4350 hci_dev_lock(hdev);
4351 conn = hci_conn_hash_lookup_handle(hdev, handle);
4352 hci_dev_unlock(hdev);
4353
4354 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004356 sco_recv_scodata(conn, skb);
4357 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004359 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004360 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 }
4362
4363 kfree_skb(skb);
4364}
4365
Johan Hedberg9238f362013-03-05 20:37:48 +02004366static bool hci_req_is_complete(struct hci_dev *hdev)
4367{
4368 struct sk_buff *skb;
4369
4370 skb = skb_peek(&hdev->cmd_q);
4371 if (!skb)
4372 return true;
4373
4374 return bt_cb(skb)->req.start;
4375}
4376
Johan Hedberg42c6b122013-03-05 20:37:49 +02004377static void hci_resend_last(struct hci_dev *hdev)
4378{
4379 struct hci_command_hdr *sent;
4380 struct sk_buff *skb;
4381 u16 opcode;
4382
4383 if (!hdev->sent_cmd)
4384 return;
4385
4386 sent = (void *) hdev->sent_cmd->data;
4387 opcode = __le16_to_cpu(sent->opcode);
4388 if (opcode == HCI_OP_RESET)
4389 return;
4390
4391 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4392 if (!skb)
4393 return;
4394
4395 skb_queue_head(&hdev->cmd_q, skb);
4396 queue_work(hdev->workqueue, &hdev->cmd_work);
4397}
4398
Johan Hedberg9238f362013-03-05 20:37:48 +02004399void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4400{
4401 hci_req_complete_t req_complete = NULL;
4402 struct sk_buff *skb;
4403 unsigned long flags;
4404
4405 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4406
Johan Hedberg42c6b122013-03-05 20:37:49 +02004407 /* If the completed command doesn't match the last one that was
4408 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004409 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004410 if (!hci_sent_cmd_data(hdev, opcode)) {
4411 /* Some CSR based controllers generate a spontaneous
4412 * reset complete event during init and any pending
4413 * command will never be completed. In such a case we
4414 * need to resend whatever was the last sent
4415 * command.
4416 */
4417 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4418 hci_resend_last(hdev);
4419
Johan Hedberg9238f362013-03-05 20:37:48 +02004420 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004421 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004422
4423 /* If the command succeeded and there's still more commands in
4424 * this request the request is not yet complete.
4425 */
4426 if (!status && !hci_req_is_complete(hdev))
4427 return;
4428
4429 /* If this was the last command in a request the complete
4430 * callback would be found in hdev->sent_cmd instead of the
4431 * command queue (hdev->cmd_q).
4432 */
4433 if (hdev->sent_cmd) {
4434 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004435
4436 if (req_complete) {
4437 /* We must set the complete callback to NULL to
4438 * avoid calling the callback more than once if
4439 * this function gets called again.
4440 */
4441 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4442
Johan Hedberg9238f362013-03-05 20:37:48 +02004443 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004444 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004445 }
4446
4447 /* Remove all pending commands belonging to this request */
4448 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4449 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4450 if (bt_cb(skb)->req.start) {
4451 __skb_queue_head(&hdev->cmd_q, skb);
4452 break;
4453 }
4454
4455 req_complete = bt_cb(skb)->req.complete;
4456 kfree_skb(skb);
4457 }
4458 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4459
4460call_complete:
4461 if (req_complete)
4462 req_complete(hdev, status);
4463}
4464
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004465static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004467 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 struct sk_buff *skb;
4469
4470 BT_DBG("%s", hdev->name);
4471
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004473 /* Send copy to monitor */
4474 hci_send_to_monitor(hdev, skb);
4475
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 if (atomic_read(&hdev->promisc)) {
4477 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004478 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 }
4480
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004481 if (test_bit(HCI_RAW, &hdev->flags) ||
4482 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 kfree_skb(skb);
4484 continue;
4485 }
4486
4487 if (test_bit(HCI_INIT, &hdev->flags)) {
4488 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004489 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490 case HCI_ACLDATA_PKT:
4491 case HCI_SCODATA_PKT:
4492 kfree_skb(skb);
4493 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 }
4496
4497 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004498 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004500 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 hci_event_packet(hdev, skb);
4502 break;
4503
4504 case HCI_ACLDATA_PKT:
4505 BT_DBG("%s ACL data packet", hdev->name);
4506 hci_acldata_packet(hdev, skb);
4507 break;
4508
4509 case HCI_SCODATA_PKT:
4510 BT_DBG("%s SCO data packet", hdev->name);
4511 hci_scodata_packet(hdev, skb);
4512 break;
4513
4514 default:
4515 kfree_skb(skb);
4516 break;
4517 }
4518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519}
4520
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004521static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004523 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 struct sk_buff *skb;
4525
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004526 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4527 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004530 if (atomic_read(&hdev->cmd_cnt)) {
4531 skb = skb_dequeue(&hdev->cmd_q);
4532 if (!skb)
4533 return;
4534
Wei Yongjun7585b972009-02-25 18:29:52 +08004535 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004537 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004538 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004540 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004541 if (test_bit(HCI_RESET, &hdev->flags))
4542 del_timer(&hdev->cmd_timer);
4543 else
4544 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004545 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 } else {
4547 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004548 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 }
4550 }
4551}