blob: 69b7145bfce2dfe1398a19f0ce4c509fa4ef925b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmann92202182013-10-18 16:38:10 -0700574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800608 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700640 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700668 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
Jukka Rissanen89863102013-12-11 17:05:38 +0200688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/* ---- HCI requests ---- */
732
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
Fengguang Wu77a63e02013-04-20 16:24:31 +0300755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
Johan Hedberg75e84b72013-04-02 13:35:04 +0300786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300811 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300821 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300865 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 void (*func)(struct hci_request *req,
874 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200875 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 hci_req_init(&req, hdev);
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 hdev->req_status = HCI_REQ_PEND;
886
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200891 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200897 */
Andre Guedes920c8302013-03-08 11:20:15 -0300898 if (err == -ENODATA)
899 return 0;
900
901 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200902 }
903
Andre Guedesbc4445c2013-03-08 11:20:13 -0300904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700916 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700926 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Johan Hedberga5040ef2011-01-10 13:28:59 +0200928 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 void (*req)(struct hci_request *req,
937 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200938 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
940 int ret;
941
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 /* Serialize all requests */
946 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200947 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200969 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971
972 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200979
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200980 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300982
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300989 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300991
992 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700994
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000}
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001011
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001015 break;
1016
1017 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001029 struct hci_dev *hdev = req->hdev;
1030
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
1043 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001045
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001059
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067}
1068
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001071 struct hci_dev *hdev = req->hdev;
1072
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084
1085 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001122{
1123 u8 mode;
1124
Johan Hedberg42c6b122013-03-05 20:37:49 +02001125 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128}
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 struct hci_dev *hdev = req->hdev;
1133
Johan Hedberg2177bab2013-03-05 20:37:43 +02001134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001208 }
1209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 struct hci_dev *hdev = req->hdev;
1214
Johan Hedberg2177bab2013-03-05 20:37:43 +02001215 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
1220 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001224
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230
1231 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001272 }
1273}
1274
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292}
1293
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297 struct hci_cp_write_le_host_supported cp;
1298
Johan Hedbergc73eee92013-04-19 18:35:21 +03001299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
Johan Hedberg2177bab2013-03-05 20:37:43 +02001303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313}
1314
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001323 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001333 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001350 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001364 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377
Marcel Holtmann79830f62013-10-18 16:38:09 -07001378 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001392 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402}
1403
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001412 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001413 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001415
1416 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001424}
1425
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
Johan Hedberg2177bab2013-03-05 20:37:43 +02001442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001486 }
1487
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001497 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001498
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001523 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001524
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001525 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526}
1527
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 __u8 scan = opt;
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
1540 __u8 auth = opt;
1541
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
1550 __u8 encrypt = opt;
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001554 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001559{
1560 __le16 policy = cpu_to_le16(opt);
1561
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563
1564 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001566}
1567
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001568/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001572 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001580 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001591
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
Andre Guedes6fbe1952012-02-03 17:47:58 -03001596 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001597 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001599 return true;
1600
Andre Guedes6fbe1952012-02-03 17:47:58 -03001601 default:
1602 return false;
1603 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001604}
1605
Johan Hedbergff9ef572012-01-04 14:23:45 +02001606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001617 break;
1618 case DISCOVERY_STARTING:
1619 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001620 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001621 mgmt_discovering(hdev, 1);
1622 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001623 case DISCOVERY_RESOLVING:
1624 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001632void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
Johan Hedberg30883512012-01-04 14:16:21 +02001634 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001635 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Johan Hedberg561aafb2012-01-04 13:31:59 +02001637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001639 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644}
1645
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
Johan Hedberg30883512012-01-04 14:16:21 +02001649 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 struct inquiry_entry *e;
1651
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001652 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Johan Hedberg561aafb2012-01-04 13:31:59 +02001654 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001656 return e;
1657 }
1658
1659 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
Johan Hedberg561aafb2012-01-04 13:31:59 +02001662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001663 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001664{
Johan Hedberg30883512012-01-04 14:16:21 +02001665 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666 struct inquiry_entry *e;
1667
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001668 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001669
1670 list_for_each_entry(e, &cache->unknown, list) {
1671 if (!bacmp(&e->data.bdaddr, bdaddr))
1672 return e;
1673 }
1674
1675 return NULL;
1676}
1677
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001679 bdaddr_t *bdaddr,
1680 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
Johan Hedberga3d4e202012-01-09 00:53:02 +02001697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001698 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
Johan Hedberg31754052012-01-04 13:39:52 +02001716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001717 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Johan Hedberg30883512012-01-04 14:16:21 +02001719 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001720 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Szymon Janc2b2fec42012-11-20 11:38:54 +01001724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001730 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
Johan Hedberga3d4e202012-01-09 00:53:02 +02001734 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001735 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001741 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001742
Johan Hedberg561aafb2012-01-04 13:31:59 +02001743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001746 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
1756
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001759 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
Johan Hedberg30883512012-01-04 14:16:21 +02001776 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
Johan Hedberg561aafb2012-01-04 13:31:59 +02001781 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001783
1784 if (copied >= num)
1785 break;
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001795 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001805 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Andre Guedes3e13fa12013-03-27 20:04:56 -03001820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return -ENODEV;
1841
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
Johan Hedberg56f87902013-10-02 13:43:13 +03001852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001860 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 do_inquiry = 1;
1862 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001863 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Marcel Holtmann04837f62006-07-03 10:02:33 +02001865 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001866
1867 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001870 if (err < 0)
1871 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001890 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 err = -ENOMEM;
1892 goto done;
1893 }
1894
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001895 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001897 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001904 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001906 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001916static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 int ret = 0;
1919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
Johan Hovold94324962012-03-15 14:48:41 +01001924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 }
1983
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001984 clear_bit(HCI_INIT, &hdev->flags);
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001992 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001993 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001994 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001995 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001997 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001999 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002000 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002001 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 return ret;
2021}
2022
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
Johan Hedberge1d08f42013-10-01 22:44:50 +03002034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002046 flush_workqueue(hdev->req_workqueue);
2047
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002059 cancel_delayed_work(&hdev->power_off);
2060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002065 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002072 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002074 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002075 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002076 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002079 }
2080
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002082 cancel_delayed_work(&hdev->service_cache);
2083
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002086 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002087 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002089 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002099 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002117 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
Johan Hedberg35b973c2013-03-15 17:06:59 -05002129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002139 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002140
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002141 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002143
Johan Hedberge59fda82012-02-22 18:11:53 +02002144 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 hdev = hci_dev_get(dev);
2159 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002161
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002171
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002172done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002182 hdev = hci_dev_get(dev);
2183 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Marcel Holtmann808a0492013-08-26 20:57:58 -07002188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002202 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002203 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002205 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002210 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002227 hdev = hci_dev_get(dev);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002238done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 return -ENODEV;
2255
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
Johan Hedberg56f87902013-10-02 13:43:13 +03002266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 switch (cmd) {
2272 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 if (err)
2288 break;
2289 }
2290
Johan Hedberg01178cd2013-03-05 20:37:41 +02002291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 break;
2294
2295 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 break;
2299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002300 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002303 break;
2304
2305 case HCISETLINKMODE:
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
2312 break;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 break;
2318
2319 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002328
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002329done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002336 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002356 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002357 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002359 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002360
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002366
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 if (++n >= dev_num)
2368 break;
2369 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002370 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 return -ENODEV;
2393
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002395 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002396
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
Johan Hedberg5e130362013-09-13 08:58:17 +03002441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002447 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
2461 BT_DBG("%s", hdev->name);
2462
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002463 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002466 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002467 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002468
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002482 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002483
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002485 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
Johan Hedberg32435532011-11-07 22:16:04 +02002490 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002491 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002492
2493 BT_DBG("%s", hdev->name);
2494
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002495 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002496}
2497
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002506 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002507}
2508
Johan Hedberg35f74982014-02-18 17:14:32 +02002509void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002510{
Johan Hedberg48210022013-01-27 00:31:28 +02002511 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002512
Johan Hedberg48210022013-01-27 00:31:28 +02002513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002515 kfree(uuid);
2516 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002517}
2518
Johan Hedberg35f74982014-02-18 17:14:32 +02002519void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002531}
2532
Johan Hedberg35f74982014-02-18 17:14:32 +02002533void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002534{
2535 struct smp_ltk *k, *tmp;
2536
2537 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2538 list_del(&k->list);
2539 kfree(k);
2540 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002541}
2542
Johan Hedberg970c4e42014-02-18 10:19:33 +02002543void hci_smp_irks_clear(struct hci_dev *hdev)
2544{
2545 struct smp_irk *k, *tmp;
2546
2547 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551}
2552
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002553struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2554{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002555 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002556
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002557 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002558 if (bacmp(bdaddr, &k->bdaddr) == 0)
2559 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002560
2561 return NULL;
2562}
2563
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302564static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002565 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002566{
2567 /* Legacy key */
2568 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302569 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002570
2571 /* Debug keys are insecure so don't store them persistently */
2572 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302573 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574
2575 /* Changed combination key and there's no previous one */
2576 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Security mode 3 case */
2580 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* Neither local nor remote side had no-bonding as requirement */
2584 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586
2587 /* Local side had dedicated bonding as requirement */
2588 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302589 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002590
2591 /* Remote side had dedicated bonding as requirement */
2592 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302593 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002594
2595 /* If none of the above criteria match, then don't store the key
2596 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302597 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002598}
2599
Johan Hedberg98a0b842014-01-30 19:40:00 -08002600static bool ltk_type_master(u8 type)
2601{
2602 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2603 return true;
2604
2605 return false;
2606}
2607
2608struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2609 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002610{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002611 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002612
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002613 list_for_each_entry(k, &hdev->long_term_keys, list) {
2614 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002615 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002616 continue;
2617
Johan Hedberg98a0b842014-01-30 19:40:00 -08002618 if (ltk_type_master(k->type) != master)
2619 continue;
2620
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002621 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622 }
2623
2624 return NULL;
2625}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002626
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002627struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002628 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002629{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002630 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002631
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002632 list_for_each_entry(k, &hdev->long_term_keys, list)
2633 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002634 bacmp(bdaddr, &k->bdaddr) == 0 &&
2635 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002636 return k;
2637
2638 return NULL;
2639}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002640
Johan Hedberg970c4e42014-02-18 10:19:33 +02002641struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2642{
2643 struct smp_irk *irk;
2644
2645 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2646 if (!bacmp(&irk->rpa, rpa))
2647 return irk;
2648 }
2649
2650 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2651 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2652 bacpy(&irk->rpa, rpa);
2653 return irk;
2654 }
2655 }
2656
2657 return NULL;
2658}
2659
2660struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 addr_type)
2662{
2663 struct smp_irk *irk;
2664
2665 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2666 if (addr_type == irk->addr_type &&
2667 bacmp(bdaddr, &irk->bdaddr) == 0)
2668 return irk;
2669 }
2670
2671 return NULL;
2672}
2673
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002674int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002675 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002676{
2677 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302678 u8 old_key_type;
2679 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002680
2681 old_key = hci_find_link_key(hdev, bdaddr);
2682 if (old_key) {
2683 old_key_type = old_key->type;
2684 key = old_key;
2685 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002686 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002687 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2688 if (!key)
2689 return -ENOMEM;
2690 list_add(&key->list, &hdev->link_keys);
2691 }
2692
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002693 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002694
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002695 /* Some buggy controller combinations generate a changed
2696 * combination key for legacy pairing even when there's no
2697 * previous key */
2698 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002699 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002700 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002701 if (conn)
2702 conn->key_type = type;
2703 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002704
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002705 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002706 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002707 key->pin_len = pin_len;
2708
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002709 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002710 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002711 else
2712 key->type = type;
2713
Johan Hedberg4df378a2011-04-28 11:29:03 -07002714 if (!new_key)
2715 return 0;
2716
2717 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2718
Johan Hedberg744cf192011-11-08 20:40:14 +02002719 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002720
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302721 if (conn)
2722 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002723
2724 return 0;
2725}
2726
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002727int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002728 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002729 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002730{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002731 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002732 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002733 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002734
Johan Hedberg98a0b842014-01-30 19:40:00 -08002735 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002736 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002737 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002738 else {
2739 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002740 if (!key)
2741 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002742 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002743 }
2744
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002745 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002746 key->bdaddr_type = addr_type;
2747 memcpy(key->val, tk, sizeof(key->val));
2748 key->authenticated = authenticated;
2749 key->ediv = ediv;
2750 key->enc_size = enc_size;
2751 key->type = type;
2752 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002753
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002754 if (!new_key)
2755 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002756
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002757 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2758 persistent = 0;
2759 else
2760 persistent = 1;
2761
Johan Hedberg21b93b72014-01-30 19:39:58 -08002762 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002763 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002764
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002765 return 0;
2766}
2767
Johan Hedberg970c4e42014-02-18 10:19:33 +02002768int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2769 u8 val[16], bdaddr_t *rpa)
2770{
2771 struct smp_irk *irk;
2772
2773 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2774 if (!irk) {
2775 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2776 if (!irk)
2777 return -ENOMEM;
2778
2779 bacpy(&irk->bdaddr, bdaddr);
2780 irk->addr_type = addr_type;
2781
2782 list_add(&irk->list, &hdev->identity_resolving_keys);
2783 }
2784
2785 memcpy(irk->val, val, 16);
2786 bacpy(&irk->rpa, rpa);
2787
2788 return 0;
2789}
2790
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002791int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2792{
2793 struct link_key *key;
2794
2795 key = hci_find_link_key(hdev, bdaddr);
2796 if (!key)
2797 return -ENOENT;
2798
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002799 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002800
2801 list_del(&key->list);
2802 kfree(key);
2803
2804 return 0;
2805}
2806
Johan Hedberge0b2b272014-02-18 17:14:31 +02002807int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002808{
2809 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002810 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002811
2812 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002813 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002814 continue;
2815
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002816 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002817
2818 list_del(&k->list);
2819 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002820 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002821 }
2822
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002823 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002824}
2825
Ville Tervo6bd32322011-02-16 16:32:41 +02002826/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002827static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002828{
2829 struct hci_dev *hdev = (void *) arg;
2830
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002831 if (hdev->sent_cmd) {
2832 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2833 u16 opcode = __le16_to_cpu(sent->opcode);
2834
2835 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2836 } else {
2837 BT_ERR("%s command tx timeout", hdev->name);
2838 }
2839
Ville Tervo6bd32322011-02-16 16:32:41 +02002840 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002841 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002842}
2843
Szymon Janc2763eda2011-03-22 13:12:22 +01002844struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002845 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002846{
2847 struct oob_data *data;
2848
2849 list_for_each_entry(data, &hdev->remote_oob_data, list)
2850 if (bacmp(bdaddr, &data->bdaddr) == 0)
2851 return data;
2852
2853 return NULL;
2854}
2855
2856int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2857{
2858 struct oob_data *data;
2859
2860 data = hci_find_remote_oob_data(hdev, bdaddr);
2861 if (!data)
2862 return -ENOENT;
2863
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002864 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002865
2866 list_del(&data->list);
2867 kfree(data);
2868
2869 return 0;
2870}
2871
Johan Hedberg35f74982014-02-18 17:14:32 +02002872void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002873{
2874 struct oob_data *data, *n;
2875
2876 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2877 list_del(&data->list);
2878 kfree(data);
2879 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002880}
2881
Marcel Holtmann07988722014-01-10 02:07:29 -08002882int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2883 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002884{
2885 struct oob_data *data;
2886
2887 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002888 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002889 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002890 if (!data)
2891 return -ENOMEM;
2892
2893 bacpy(&data->bdaddr, bdaddr);
2894 list_add(&data->list, &hdev->remote_oob_data);
2895 }
2896
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002897 memcpy(data->hash192, hash, sizeof(data->hash192));
2898 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002899
Marcel Holtmann07988722014-01-10 02:07:29 -08002900 memset(data->hash256, 0, sizeof(data->hash256));
2901 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2902
2903 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2904
2905 return 0;
2906}
2907
2908int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2909 u8 *hash192, u8 *randomizer192,
2910 u8 *hash256, u8 *randomizer256)
2911{
2912 struct oob_data *data;
2913
2914 data = hci_find_remote_oob_data(hdev, bdaddr);
2915 if (!data) {
2916 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2917 if (!data)
2918 return -ENOMEM;
2919
2920 bacpy(&data->bdaddr, bdaddr);
2921 list_add(&data->list, &hdev->remote_oob_data);
2922 }
2923
2924 memcpy(data->hash192, hash192, sizeof(data->hash192));
2925 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2926
2927 memcpy(data->hash256, hash256, sizeof(data->hash256));
2928 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2929
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002930 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002931
2932 return 0;
2933}
2934
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002935struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2936 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002937{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002938 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002939
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002940 list_for_each_entry(b, &hdev->blacklist, list) {
2941 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002942 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002943 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002944
2945 return NULL;
2946}
2947
Johan Hedberg35f74982014-02-18 17:14:32 +02002948void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002949{
2950 struct list_head *p, *n;
2951
2952 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002953 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002954
2955 list_del(p);
2956 kfree(b);
2957 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002958}
2959
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002960int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002961{
2962 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002963
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002964 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002965 return -EBADF;
2966
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002967 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002968 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002969
2970 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002971 if (!entry)
2972 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002973
2974 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002975 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002976
2977 list_add(&entry->list, &hdev->blacklist);
2978
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002979 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002980}
2981
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002982int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002983{
2984 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002985
Johan Hedberg35f74982014-02-18 17:14:32 +02002986 if (!bacmp(bdaddr, BDADDR_ANY)) {
2987 hci_blacklist_clear(hdev);
2988 return 0;
2989 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002990
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002991 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002992 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002993 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002994
2995 list_del(&entry->list);
2996 kfree(entry);
2997
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002998 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002999}
3000
Andre Guedes15819a72014-02-03 13:56:18 -03003001/* This function requires the caller holds hdev->lock */
3002struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3003 bdaddr_t *addr, u8 addr_type)
3004{
3005 struct hci_conn_params *params;
3006
3007 list_for_each_entry(params, &hdev->le_conn_params, list) {
3008 if (bacmp(&params->addr, addr) == 0 &&
3009 params->addr_type == addr_type) {
3010 return params;
3011 }
3012 }
3013
3014 return NULL;
3015}
3016
3017/* This function requires the caller holds hdev->lock */
3018void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3019 u16 conn_min_interval, u16 conn_max_interval)
3020{
3021 struct hci_conn_params *params;
3022
3023 params = hci_conn_params_lookup(hdev, addr, addr_type);
3024 if (params) {
3025 params->conn_min_interval = conn_min_interval;
3026 params->conn_max_interval = conn_max_interval;
3027 return;
3028 }
3029
3030 params = kzalloc(sizeof(*params), GFP_KERNEL);
3031 if (!params) {
3032 BT_ERR("Out of memory");
3033 return;
3034 }
3035
3036 bacpy(&params->addr, addr);
3037 params->addr_type = addr_type;
3038 params->conn_min_interval = conn_min_interval;
3039 params->conn_max_interval = conn_max_interval;
3040
3041 list_add(&params->list, &hdev->le_conn_params);
3042
3043 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3044 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3045 conn_max_interval);
3046}
3047
3048/* This function requires the caller holds hdev->lock */
3049void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3050{
3051 struct hci_conn_params *params;
3052
3053 params = hci_conn_params_lookup(hdev, addr, addr_type);
3054 if (!params)
3055 return;
3056
3057 list_del(&params->list);
3058 kfree(params);
3059
3060 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3061}
3062
3063/* This function requires the caller holds hdev->lock */
3064void hci_conn_params_clear(struct hci_dev *hdev)
3065{
3066 struct hci_conn_params *params, *tmp;
3067
3068 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3069 list_del(&params->list);
3070 kfree(params);
3071 }
3072
3073 BT_DBG("All LE connection parameters were removed");
3074}
3075
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003076static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003077{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003078 if (status) {
3079 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003080
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003081 hci_dev_lock(hdev);
3082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3083 hci_dev_unlock(hdev);
3084 return;
3085 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003086}
3087
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003088static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003089{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003090 /* General inquiry access code (GIAC) */
3091 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3092 struct hci_request req;
3093 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003094 int err;
3095
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003096 if (status) {
3097 BT_ERR("Failed to disable LE scanning: status %d", status);
3098 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003099 }
3100
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003101 switch (hdev->discovery.type) {
3102 case DISCOV_TYPE_LE:
3103 hci_dev_lock(hdev);
3104 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3105 hci_dev_unlock(hdev);
3106 break;
3107
3108 case DISCOV_TYPE_INTERLEAVED:
3109 hci_req_init(&req, hdev);
3110
3111 memset(&cp, 0, sizeof(cp));
3112 memcpy(&cp.lap, lap, sizeof(cp.lap));
3113 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3114 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3115
3116 hci_dev_lock(hdev);
3117
3118 hci_inquiry_cache_flush(hdev);
3119
3120 err = hci_req_run(&req, inquiry_complete);
3121 if (err) {
3122 BT_ERR("Inquiry request failed: err %d", err);
3123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3124 }
3125
3126 hci_dev_unlock(hdev);
3127 break;
3128 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003129}
3130
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003131static void le_scan_disable_work(struct work_struct *work)
3132{
3133 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003134 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003135 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003136 struct hci_request req;
3137 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003138
3139 BT_DBG("%s", hdev->name);
3140
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003141 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003142
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003143 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003144 cp.enable = LE_SCAN_DISABLE;
3145 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003146
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003147 err = hci_req_run(&req, le_scan_disable_work_complete);
3148 if (err)
3149 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003150}
3151
David Herrmann9be0dab2012-04-22 14:39:57 +02003152/* Alloc HCI device */
3153struct hci_dev *hci_alloc_dev(void)
3154{
3155 struct hci_dev *hdev;
3156
3157 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3158 if (!hdev)
3159 return NULL;
3160
David Herrmannb1b813d2012-04-22 14:39:58 +02003161 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3162 hdev->esco_type = (ESCO_HV1);
3163 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003164 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3165 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003166 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3167 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003168
David Herrmannb1b813d2012-04-22 14:39:58 +02003169 hdev->sniff_max_interval = 800;
3170 hdev->sniff_min_interval = 80;
3171
Marcel Holtmannbef64732013-10-11 08:23:19 -07003172 hdev->le_scan_interval = 0x0060;
3173 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003174 hdev->le_conn_min_interval = 0x0028;
3175 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003176
David Herrmannb1b813d2012-04-22 14:39:58 +02003177 mutex_init(&hdev->lock);
3178 mutex_init(&hdev->req_lock);
3179
3180 INIT_LIST_HEAD(&hdev->mgmt_pending);
3181 INIT_LIST_HEAD(&hdev->blacklist);
3182 INIT_LIST_HEAD(&hdev->uuids);
3183 INIT_LIST_HEAD(&hdev->link_keys);
3184 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003185 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003186 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003187 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003188 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003189
3190 INIT_WORK(&hdev->rx_work, hci_rx_work);
3191 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3192 INIT_WORK(&hdev->tx_work, hci_tx_work);
3193 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003194
David Herrmannb1b813d2012-04-22 14:39:58 +02003195 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3196 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3197 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3198
David Herrmannb1b813d2012-04-22 14:39:58 +02003199 skb_queue_head_init(&hdev->rx_q);
3200 skb_queue_head_init(&hdev->cmd_q);
3201 skb_queue_head_init(&hdev->raw_q);
3202
3203 init_waitqueue_head(&hdev->req_wait_q);
3204
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003205 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003206
David Herrmannb1b813d2012-04-22 14:39:58 +02003207 hci_init_sysfs(hdev);
3208 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003209
3210 return hdev;
3211}
3212EXPORT_SYMBOL(hci_alloc_dev);
3213
3214/* Free HCI device */
3215void hci_free_dev(struct hci_dev *hdev)
3216{
David Herrmann9be0dab2012-04-22 14:39:57 +02003217 /* will free via device release */
3218 put_device(&hdev->dev);
3219}
3220EXPORT_SYMBOL(hci_free_dev);
3221
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222/* Register HCI device */
3223int hci_register_dev(struct hci_dev *hdev)
3224{
David Herrmannb1b813d2012-04-22 14:39:58 +02003225 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
David Herrmann010666a2012-01-07 15:47:07 +01003227 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 return -EINVAL;
3229
Mat Martineau08add512011-11-02 16:18:36 -07003230 /* Do not allow HCI_AMP devices to register at index 0,
3231 * so the index can be used as the AMP controller ID.
3232 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003233 switch (hdev->dev_type) {
3234 case HCI_BREDR:
3235 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3236 break;
3237 case HCI_AMP:
3238 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3239 break;
3240 default:
3241 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003243
Sasha Levin3df92b32012-05-27 22:36:56 +02003244 if (id < 0)
3245 return id;
3246
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 sprintf(hdev->name, "hci%d", id);
3248 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003249
3250 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3251
Kees Cookd8537542013-07-03 15:04:57 -07003252 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3253 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003254 if (!hdev->workqueue) {
3255 error = -ENOMEM;
3256 goto err;
3257 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003258
Kees Cookd8537542013-07-03 15:04:57 -07003259 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3260 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003261 if (!hdev->req_workqueue) {
3262 destroy_workqueue(hdev->workqueue);
3263 error = -ENOMEM;
3264 goto err;
3265 }
3266
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003267 if (!IS_ERR_OR_NULL(bt_debugfs))
3268 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3269
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003270 dev_set_name(&hdev->dev, "%s", hdev->name);
3271
Johan Hedberg99780a72014-02-18 10:40:07 +02003272 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3273 CRYPTO_ALG_ASYNC);
3274 if (IS_ERR(hdev->tfm_aes)) {
3275 BT_ERR("Unable to create crypto context");
3276 error = PTR_ERR(hdev->tfm_aes);
3277 hdev->tfm_aes = NULL;
3278 goto err_wqueue;
3279 }
3280
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003281 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003282 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003283 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003285 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003286 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3287 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003288 if (hdev->rfkill) {
3289 if (rfkill_register(hdev->rfkill) < 0) {
3290 rfkill_destroy(hdev->rfkill);
3291 hdev->rfkill = NULL;
3292 }
3293 }
3294
Johan Hedberg5e130362013-09-13 08:58:17 +03003295 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3296 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3297
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003298 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003299 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003300
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003301 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003302 /* Assume BR/EDR support until proven otherwise (such as
3303 * through reading supported features during init.
3304 */
3305 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3306 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003307
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003308 write_lock(&hci_dev_list_lock);
3309 list_add(&hdev->list, &hci_dev_list);
3310 write_unlock(&hci_dev_list_lock);
3311
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003313 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314
Johan Hedberg19202572013-01-14 22:33:51 +02003315 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003316
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003318
Johan Hedberg99780a72014-02-18 10:40:07 +02003319err_tfm:
3320 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003321err_wqueue:
3322 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003323 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003324err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003325 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003326
David Herrmann33ca9542011-10-08 14:58:49 +02003327 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328}
3329EXPORT_SYMBOL(hci_register_dev);
3330
3331/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003332void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333{
Sasha Levin3df92b32012-05-27 22:36:56 +02003334 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003335
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003336 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
Johan Hovold94324962012-03-15 14:48:41 +01003338 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3339
Sasha Levin3df92b32012-05-27 22:36:56 +02003340 id = hdev->id;
3341
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003342 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003344 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345
3346 hci_dev_do_close(hdev);
3347
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303348 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003349 kfree_skb(hdev->reassembly[i]);
3350
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003351 cancel_work_sync(&hdev->power_on);
3352
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003353 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003354 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003355 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003356 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003357 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003358 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003359
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003360 /* mgmt_index_removed should take care of emptying the
3361 * pending list */
3362 BUG_ON(!list_empty(&hdev->mgmt_pending));
3363
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 hci_notify(hdev, HCI_DEV_UNREG);
3365
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003366 if (hdev->rfkill) {
3367 rfkill_unregister(hdev->rfkill);
3368 rfkill_destroy(hdev->rfkill);
3369 }
3370
Johan Hedberg99780a72014-02-18 10:40:07 +02003371 if (hdev->tfm_aes)
3372 crypto_free_blkcipher(hdev->tfm_aes);
3373
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003374 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003375
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003376 debugfs_remove_recursive(hdev->debugfs);
3377
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003378 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003379 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003380
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003381 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003382 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003383 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003384 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003385 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003386 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003387 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003388 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003389 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003390
David Herrmanndc946bd2012-01-07 15:47:24 +01003391 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003392
3393 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394}
3395EXPORT_SYMBOL(hci_unregister_dev);
3396
3397/* Suspend HCI device */
3398int hci_suspend_dev(struct hci_dev *hdev)
3399{
3400 hci_notify(hdev, HCI_DEV_SUSPEND);
3401 return 0;
3402}
3403EXPORT_SYMBOL(hci_suspend_dev);
3404
3405/* Resume HCI device */
3406int hci_resume_dev(struct hci_dev *hdev)
3407{
3408 hci_notify(hdev, HCI_DEV_RESUME);
3409 return 0;
3410}
3411EXPORT_SYMBOL(hci_resume_dev);
3412
Marcel Holtmann76bca882009-11-18 00:40:39 +01003413/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003414int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003415{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003416 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003417 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003418 kfree_skb(skb);
3419 return -ENXIO;
3420 }
3421
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003422 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003423 bt_cb(skb)->incoming = 1;
3424
3425 /* Time stamp */
3426 __net_timestamp(skb);
3427
Marcel Holtmann76bca882009-11-18 00:40:39 +01003428 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003429 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003430
Marcel Holtmann76bca882009-11-18 00:40:39 +01003431 return 0;
3432}
3433EXPORT_SYMBOL(hci_recv_frame);
3434
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303435static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003436 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303437{
3438 int len = 0;
3439 int hlen = 0;
3440 int remain = count;
3441 struct sk_buff *skb;
3442 struct bt_skb_cb *scb;
3443
3444 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003445 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303446 return -EILSEQ;
3447
3448 skb = hdev->reassembly[index];
3449
3450 if (!skb) {
3451 switch (type) {
3452 case HCI_ACLDATA_PKT:
3453 len = HCI_MAX_FRAME_SIZE;
3454 hlen = HCI_ACL_HDR_SIZE;
3455 break;
3456 case HCI_EVENT_PKT:
3457 len = HCI_MAX_EVENT_SIZE;
3458 hlen = HCI_EVENT_HDR_SIZE;
3459 break;
3460 case HCI_SCODATA_PKT:
3461 len = HCI_MAX_SCO_SIZE;
3462 hlen = HCI_SCO_HDR_SIZE;
3463 break;
3464 }
3465
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003466 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303467 if (!skb)
3468 return -ENOMEM;
3469
3470 scb = (void *) skb->cb;
3471 scb->expect = hlen;
3472 scb->pkt_type = type;
3473
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303474 hdev->reassembly[index] = skb;
3475 }
3476
3477 while (count) {
3478 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003479 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303480
3481 memcpy(skb_put(skb, len), data, len);
3482
3483 count -= len;
3484 data += len;
3485 scb->expect -= len;
3486 remain = count;
3487
3488 switch (type) {
3489 case HCI_EVENT_PKT:
3490 if (skb->len == HCI_EVENT_HDR_SIZE) {
3491 struct hci_event_hdr *h = hci_event_hdr(skb);
3492 scb->expect = h->plen;
3493
3494 if (skb_tailroom(skb) < scb->expect) {
3495 kfree_skb(skb);
3496 hdev->reassembly[index] = NULL;
3497 return -ENOMEM;
3498 }
3499 }
3500 break;
3501
3502 case HCI_ACLDATA_PKT:
3503 if (skb->len == HCI_ACL_HDR_SIZE) {
3504 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3505 scb->expect = __le16_to_cpu(h->dlen);
3506
3507 if (skb_tailroom(skb) < scb->expect) {
3508 kfree_skb(skb);
3509 hdev->reassembly[index] = NULL;
3510 return -ENOMEM;
3511 }
3512 }
3513 break;
3514
3515 case HCI_SCODATA_PKT:
3516 if (skb->len == HCI_SCO_HDR_SIZE) {
3517 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3518 scb->expect = h->dlen;
3519
3520 if (skb_tailroom(skb) < scb->expect) {
3521 kfree_skb(skb);
3522 hdev->reassembly[index] = NULL;
3523 return -ENOMEM;
3524 }
3525 }
3526 break;
3527 }
3528
3529 if (scb->expect == 0) {
3530 /* Complete frame */
3531
3532 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003533 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303534
3535 hdev->reassembly[index] = NULL;
3536 return remain;
3537 }
3538 }
3539
3540 return remain;
3541}
3542
Marcel Holtmannef222012007-07-11 06:42:04 +02003543int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3544{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303545 int rem = 0;
3546
Marcel Holtmannef222012007-07-11 06:42:04 +02003547 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3548 return -EILSEQ;
3549
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003550 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003551 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303552 if (rem < 0)
3553 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003554
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303555 data += (count - rem);
3556 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003557 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003558
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303559 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003560}
3561EXPORT_SYMBOL(hci_recv_fragment);
3562
Suraj Sumangala99811512010-07-14 13:02:19 +05303563#define STREAM_REASSEMBLY 0
3564
3565int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3566{
3567 int type;
3568 int rem = 0;
3569
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003570 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303571 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3572
3573 if (!skb) {
3574 struct { char type; } *pkt;
3575
3576 /* Start of the frame */
3577 pkt = data;
3578 type = pkt->type;
3579
3580 data++;
3581 count--;
3582 } else
3583 type = bt_cb(skb)->pkt_type;
3584
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003585 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003586 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303587 if (rem < 0)
3588 return rem;
3589
3590 data += (count - rem);
3591 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003592 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303593
3594 return rem;
3595}
3596EXPORT_SYMBOL(hci_recv_stream_fragment);
3597
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598/* ---- Interface to upper protocols ---- */
3599
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600int hci_register_cb(struct hci_cb *cb)
3601{
3602 BT_DBG("%p name %s", cb, cb->name);
3603
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003604 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003606 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
3608 return 0;
3609}
3610EXPORT_SYMBOL(hci_register_cb);
3611
3612int hci_unregister_cb(struct hci_cb *cb)
3613{
3614 BT_DBG("%p name %s", cb, cb->name);
3615
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003616 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003618 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
3620 return 0;
3621}
3622EXPORT_SYMBOL(hci_unregister_cb);
3623
Marcel Holtmann51086992013-10-10 14:54:19 -07003624static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003626 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003628 /* Time stamp */
3629 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003631 /* Send copy to monitor */
3632 hci_send_to_monitor(hdev, skb);
3633
3634 if (atomic_read(&hdev->promisc)) {
3635 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003636 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 }
3638
3639 /* Get rid of skb owner, prior to sending to the driver. */
3640 skb_orphan(skb);
3641
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003642 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003643 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644}
3645
Johan Hedberg3119ae92013-03-05 20:37:44 +02003646void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3647{
3648 skb_queue_head_init(&req->cmd_q);
3649 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003650 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003651}
3652
3653int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3654{
3655 struct hci_dev *hdev = req->hdev;
3656 struct sk_buff *skb;
3657 unsigned long flags;
3658
3659 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3660
Andre Guedes5d73e032013-03-08 11:20:16 -03003661 /* If an error occured during request building, remove all HCI
3662 * commands queued on the HCI request queue.
3663 */
3664 if (req->err) {
3665 skb_queue_purge(&req->cmd_q);
3666 return req->err;
3667 }
3668
Johan Hedberg3119ae92013-03-05 20:37:44 +02003669 /* Do not allow empty requests */
3670 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003671 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003672
3673 skb = skb_peek_tail(&req->cmd_q);
3674 bt_cb(skb)->req.complete = complete;
3675
3676 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3677 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3678 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3679
3680 queue_work(hdev->workqueue, &hdev->cmd_work);
3681
3682 return 0;
3683}
3684
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003685static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003686 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687{
3688 int len = HCI_COMMAND_HDR_SIZE + plen;
3689 struct hci_command_hdr *hdr;
3690 struct sk_buff *skb;
3691
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003693 if (!skb)
3694 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
3696 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003697 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 hdr->plen = plen;
3699
3700 if (plen)
3701 memcpy(skb_put(skb, plen), param, plen);
3702
3703 BT_DBG("skb len %d", skb->len);
3704
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003705 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003706
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003707 return skb;
3708}
3709
3710/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003711int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3712 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003713{
3714 struct sk_buff *skb;
3715
3716 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3717
3718 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3719 if (!skb) {
3720 BT_ERR("%s no memory for command", hdev->name);
3721 return -ENOMEM;
3722 }
3723
Johan Hedberg11714b32013-03-05 20:37:47 +02003724 /* Stand-alone HCI commands must be flaged as
3725 * single-command requests.
3726 */
3727 bt_cb(skb)->req.start = true;
3728
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003730 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731
3732 return 0;
3733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
Johan Hedberg71c76a12013-03-05 20:37:46 +02003735/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003736void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3737 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003738{
3739 struct hci_dev *hdev = req->hdev;
3740 struct sk_buff *skb;
3741
3742 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3743
Andre Guedes34739c12013-03-08 11:20:18 -03003744 /* If an error occured during request building, there is no point in
3745 * queueing the HCI command. We can simply return.
3746 */
3747 if (req->err)
3748 return;
3749
Johan Hedberg71c76a12013-03-05 20:37:46 +02003750 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3751 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003752 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3753 hdev->name, opcode);
3754 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003755 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003756 }
3757
3758 if (skb_queue_empty(&req->cmd_q))
3759 bt_cb(skb)->req.start = true;
3760
Johan Hedberg02350a72013-04-03 21:50:29 +03003761 bt_cb(skb)->req.event = event;
3762
Johan Hedberg71c76a12013-03-05 20:37:46 +02003763 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003764}
3765
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003766void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3767 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003768{
3769 hci_req_add_ev(req, opcode, plen, param, 0);
3770}
3771
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003773void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774{
3775 struct hci_command_hdr *hdr;
3776
3777 if (!hdev->sent_cmd)
3778 return NULL;
3779
3780 hdr = (void *) hdev->sent_cmd->data;
3781
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003782 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 return NULL;
3784
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003785 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
3787 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3788}
3789
3790/* Send ACL data */
3791static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3792{
3793 struct hci_acl_hdr *hdr;
3794 int len = skb->len;
3795
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003796 skb_push(skb, HCI_ACL_HDR_SIZE);
3797 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003798 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003799 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3800 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801}
3802
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003803static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003804 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003806 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 struct hci_dev *hdev = conn->hdev;
3808 struct sk_buff *list;
3809
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003810 skb->len = skb_headlen(skb);
3811 skb->data_len = 0;
3812
3813 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003814
3815 switch (hdev->dev_type) {
3816 case HCI_BREDR:
3817 hci_add_acl_hdr(skb, conn->handle, flags);
3818 break;
3819 case HCI_AMP:
3820 hci_add_acl_hdr(skb, chan->handle, flags);
3821 break;
3822 default:
3823 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3824 return;
3825 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003826
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003827 list = skb_shinfo(skb)->frag_list;
3828 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829 /* Non fragmented */
3830 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3831
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003832 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 } else {
3834 /* Fragmented */
3835 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3836
3837 skb_shinfo(skb)->frag_list = NULL;
3838
3839 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003840 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003842 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003843
3844 flags &= ~ACL_START;
3845 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 do {
3847 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003848
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003849 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003850 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851
3852 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3853
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003854 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 } while (list);
3856
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003857 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003859}
3860
3861void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3862{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003863 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003864
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003865 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003866
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003867 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003869 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
3872/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003873void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874{
3875 struct hci_dev *hdev = conn->hdev;
3876 struct hci_sco_hdr hdr;
3877
3878 BT_DBG("%s len %d", hdev->name, skb->len);
3879
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003880 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 hdr.dlen = skb->len;
3882
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003883 skb_push(skb, HCI_SCO_HDR_SIZE);
3884 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003885 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003887 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003888
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003890 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
3893/* ---- HCI TX task (outgoing data) ---- */
3894
3895/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003896static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3897 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898{
3899 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003900 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003901 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003903 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003905
3906 rcu_read_lock();
3907
3908 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003909 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003911
3912 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3913 continue;
3914
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 num++;
3916
3917 if (c->sent < min) {
3918 min = c->sent;
3919 conn = c;
3920 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003921
3922 if (hci_conn_num(hdev, type) == num)
3923 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 }
3925
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003926 rcu_read_unlock();
3927
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003929 int cnt, q;
3930
3931 switch (conn->type) {
3932 case ACL_LINK:
3933 cnt = hdev->acl_cnt;
3934 break;
3935 case SCO_LINK:
3936 case ESCO_LINK:
3937 cnt = hdev->sco_cnt;
3938 break;
3939 case LE_LINK:
3940 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3941 break;
3942 default:
3943 cnt = 0;
3944 BT_ERR("Unknown link type");
3945 }
3946
3947 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 *quote = q ? q : 1;
3949 } else
3950 *quote = 0;
3951
3952 BT_DBG("conn %p quote %d", conn, *quote);
3953 return conn;
3954}
3955
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003956static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957{
3958 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003959 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960
Ville Tervobae1f5d92011-02-10 22:38:53 -03003961 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003963 rcu_read_lock();
3964
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003966 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003967 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003968 BT_ERR("%s killing stalled connection %pMR",
3969 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003970 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 }
3972 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003973
3974 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975}
3976
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003977static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3978 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003979{
3980 struct hci_conn_hash *h = &hdev->conn_hash;
3981 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003982 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003983 struct hci_conn *conn;
3984 int cnt, q, conn_num = 0;
3985
3986 BT_DBG("%s", hdev->name);
3987
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003988 rcu_read_lock();
3989
3990 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003991 struct hci_chan *tmp;
3992
3993 if (conn->type != type)
3994 continue;
3995
3996 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3997 continue;
3998
3999 conn_num++;
4000
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004001 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004002 struct sk_buff *skb;
4003
4004 if (skb_queue_empty(&tmp->data_q))
4005 continue;
4006
4007 skb = skb_peek(&tmp->data_q);
4008 if (skb->priority < cur_prio)
4009 continue;
4010
4011 if (skb->priority > cur_prio) {
4012 num = 0;
4013 min = ~0;
4014 cur_prio = skb->priority;
4015 }
4016
4017 num++;
4018
4019 if (conn->sent < min) {
4020 min = conn->sent;
4021 chan = tmp;
4022 }
4023 }
4024
4025 if (hci_conn_num(hdev, type) == conn_num)
4026 break;
4027 }
4028
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004029 rcu_read_unlock();
4030
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004031 if (!chan)
4032 return NULL;
4033
4034 switch (chan->conn->type) {
4035 case ACL_LINK:
4036 cnt = hdev->acl_cnt;
4037 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004038 case AMP_LINK:
4039 cnt = hdev->block_cnt;
4040 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004041 case SCO_LINK:
4042 case ESCO_LINK:
4043 cnt = hdev->sco_cnt;
4044 break;
4045 case LE_LINK:
4046 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4047 break;
4048 default:
4049 cnt = 0;
4050 BT_ERR("Unknown link type");
4051 }
4052
4053 q = cnt / num;
4054 *quote = q ? q : 1;
4055 BT_DBG("chan %p quote %d", chan, *quote);
4056 return chan;
4057}
4058
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004059static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4060{
4061 struct hci_conn_hash *h = &hdev->conn_hash;
4062 struct hci_conn *conn;
4063 int num = 0;
4064
4065 BT_DBG("%s", hdev->name);
4066
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004067 rcu_read_lock();
4068
4069 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004070 struct hci_chan *chan;
4071
4072 if (conn->type != type)
4073 continue;
4074
4075 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4076 continue;
4077
4078 num++;
4079
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004080 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004081 struct sk_buff *skb;
4082
4083 if (chan->sent) {
4084 chan->sent = 0;
4085 continue;
4086 }
4087
4088 if (skb_queue_empty(&chan->data_q))
4089 continue;
4090
4091 skb = skb_peek(&chan->data_q);
4092 if (skb->priority >= HCI_PRIO_MAX - 1)
4093 continue;
4094
4095 skb->priority = HCI_PRIO_MAX - 1;
4096
4097 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004098 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004099 }
4100
4101 if (hci_conn_num(hdev, type) == num)
4102 break;
4103 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004104
4105 rcu_read_unlock();
4106
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004107}
4108
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004109static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4110{
4111 /* Calculate count of blocks used by this packet */
4112 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4113}
4114
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004115static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 if (!test_bit(HCI_RAW, &hdev->flags)) {
4118 /* ACL tx timeout must be longer than maximum
4119 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004120 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004121 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004122 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004124}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004126static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004127{
4128 unsigned int cnt = hdev->acl_cnt;
4129 struct hci_chan *chan;
4130 struct sk_buff *skb;
4131 int quote;
4132
4133 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004134
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004135 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004136 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004137 u32 priority = (skb_peek(&chan->data_q))->priority;
4138 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004139 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004140 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004141
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004142 /* Stop if priority has changed */
4143 if (skb->priority < priority)
4144 break;
4145
4146 skb = skb_dequeue(&chan->data_q);
4147
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004148 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004149 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004150
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004151 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 hdev->acl_last_tx = jiffies;
4153
4154 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004155 chan->sent++;
4156 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 }
4158 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004159
4160 if (cnt != hdev->acl_cnt)
4161 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162}
4163
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004164static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004165{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004166 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004167 struct hci_chan *chan;
4168 struct sk_buff *skb;
4169 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004170 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004171
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004172 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004173
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004174 BT_DBG("%s", hdev->name);
4175
4176 if (hdev->dev_type == HCI_AMP)
4177 type = AMP_LINK;
4178 else
4179 type = ACL_LINK;
4180
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004181 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004182 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004183 u32 priority = (skb_peek(&chan->data_q))->priority;
4184 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4185 int blocks;
4186
4187 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004188 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004189
4190 /* Stop if priority has changed */
4191 if (skb->priority < priority)
4192 break;
4193
4194 skb = skb_dequeue(&chan->data_q);
4195
4196 blocks = __get_blocks(hdev, skb);
4197 if (blocks > hdev->block_cnt)
4198 return;
4199
4200 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004201 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004202
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004203 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004204 hdev->acl_last_tx = jiffies;
4205
4206 hdev->block_cnt -= blocks;
4207 quote -= blocks;
4208
4209 chan->sent += blocks;
4210 chan->conn->sent += blocks;
4211 }
4212 }
4213
4214 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004215 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004216}
4217
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004218static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004219{
4220 BT_DBG("%s", hdev->name);
4221
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004222 /* No ACL link over BR/EDR controller */
4223 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4224 return;
4225
4226 /* No AMP link over AMP controller */
4227 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004228 return;
4229
4230 switch (hdev->flow_ctl_mode) {
4231 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4232 hci_sched_acl_pkt(hdev);
4233 break;
4234
4235 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4236 hci_sched_acl_blk(hdev);
4237 break;
4238 }
4239}
4240
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004242static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243{
4244 struct hci_conn *conn;
4245 struct sk_buff *skb;
4246 int quote;
4247
4248 BT_DBG("%s", hdev->name);
4249
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004250 if (!hci_conn_num(hdev, SCO_LINK))
4251 return;
4252
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4254 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4255 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004256 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
4258 conn->sent++;
4259 if (conn->sent == ~0)
4260 conn->sent = 0;
4261 }
4262 }
4263}
4264
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004265static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004266{
4267 struct hci_conn *conn;
4268 struct sk_buff *skb;
4269 int quote;
4270
4271 BT_DBG("%s", hdev->name);
4272
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004273 if (!hci_conn_num(hdev, ESCO_LINK))
4274 return;
4275
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004276 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4277 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004278 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4279 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004280 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004281
4282 conn->sent++;
4283 if (conn->sent == ~0)
4284 conn->sent = 0;
4285 }
4286 }
4287}
4288
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004289static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004290{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004291 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004292 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004293 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004294
4295 BT_DBG("%s", hdev->name);
4296
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004297 if (!hci_conn_num(hdev, LE_LINK))
4298 return;
4299
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004300 if (!test_bit(HCI_RAW, &hdev->flags)) {
4301 /* LE tx timeout must be longer than maximum
4302 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004303 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004304 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004305 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004306 }
4307
4308 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004309 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004310 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004311 u32 priority = (skb_peek(&chan->data_q))->priority;
4312 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004313 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004314 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004315
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004316 /* Stop if priority has changed */
4317 if (skb->priority < priority)
4318 break;
4319
4320 skb = skb_dequeue(&chan->data_q);
4321
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004322 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004323 hdev->le_last_tx = jiffies;
4324
4325 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004326 chan->sent++;
4327 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004328 }
4329 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004330
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004331 if (hdev->le_pkts)
4332 hdev->le_cnt = cnt;
4333 else
4334 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004335
4336 if (cnt != tmp)
4337 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004338}
4339
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004340static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004342 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343 struct sk_buff *skb;
4344
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004345 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004346 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347
Marcel Holtmann52de5992013-09-03 18:08:38 -07004348 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4349 /* Schedule queues and send stuff to HCI driver */
4350 hci_sched_acl(hdev);
4351 hci_sched_sco(hdev);
4352 hci_sched_esco(hdev);
4353 hci_sched_le(hdev);
4354 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004355
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 /* Send next queued raw (unknown type) packet */
4357 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004358 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359}
4360
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004361/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362
4363/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004364static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365{
4366 struct hci_acl_hdr *hdr = (void *) skb->data;
4367 struct hci_conn *conn;
4368 __u16 handle, flags;
4369
4370 skb_pull(skb, HCI_ACL_HDR_SIZE);
4371
4372 handle = __le16_to_cpu(hdr->handle);
4373 flags = hci_flags(handle);
4374 handle = hci_handle(handle);
4375
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004376 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004377 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
4379 hdev->stat.acl_rx++;
4380
4381 hci_dev_lock(hdev);
4382 conn = hci_conn_hash_lookup_handle(hdev, handle);
4383 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004384
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004386 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004387
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004389 l2cap_recv_acldata(conn, skb, flags);
4390 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004392 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004393 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394 }
4395
4396 kfree_skb(skb);
4397}
4398
4399/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004400static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401{
4402 struct hci_sco_hdr *hdr = (void *) skb->data;
4403 struct hci_conn *conn;
4404 __u16 handle;
4405
4406 skb_pull(skb, HCI_SCO_HDR_SIZE);
4407
4408 handle = __le16_to_cpu(hdr->handle);
4409
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004410 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
4412 hdev->stat.sco_rx++;
4413
4414 hci_dev_lock(hdev);
4415 conn = hci_conn_hash_lookup_handle(hdev, handle);
4416 hci_dev_unlock(hdev);
4417
4418 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004420 sco_recv_scodata(conn, skb);
4421 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004423 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004424 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 }
4426
4427 kfree_skb(skb);
4428}
4429
Johan Hedberg9238f362013-03-05 20:37:48 +02004430static bool hci_req_is_complete(struct hci_dev *hdev)
4431{
4432 struct sk_buff *skb;
4433
4434 skb = skb_peek(&hdev->cmd_q);
4435 if (!skb)
4436 return true;
4437
4438 return bt_cb(skb)->req.start;
4439}
4440
Johan Hedberg42c6b122013-03-05 20:37:49 +02004441static void hci_resend_last(struct hci_dev *hdev)
4442{
4443 struct hci_command_hdr *sent;
4444 struct sk_buff *skb;
4445 u16 opcode;
4446
4447 if (!hdev->sent_cmd)
4448 return;
4449
4450 sent = (void *) hdev->sent_cmd->data;
4451 opcode = __le16_to_cpu(sent->opcode);
4452 if (opcode == HCI_OP_RESET)
4453 return;
4454
4455 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4456 if (!skb)
4457 return;
4458
4459 skb_queue_head(&hdev->cmd_q, skb);
4460 queue_work(hdev->workqueue, &hdev->cmd_work);
4461}
4462
Johan Hedberg9238f362013-03-05 20:37:48 +02004463void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4464{
4465 hci_req_complete_t req_complete = NULL;
4466 struct sk_buff *skb;
4467 unsigned long flags;
4468
4469 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4470
Johan Hedberg42c6b122013-03-05 20:37:49 +02004471 /* If the completed command doesn't match the last one that was
4472 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004473 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004474 if (!hci_sent_cmd_data(hdev, opcode)) {
4475 /* Some CSR based controllers generate a spontaneous
4476 * reset complete event during init and any pending
4477 * command will never be completed. In such a case we
4478 * need to resend whatever was the last sent
4479 * command.
4480 */
4481 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4482 hci_resend_last(hdev);
4483
Johan Hedberg9238f362013-03-05 20:37:48 +02004484 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004485 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004486
4487 /* If the command succeeded and there's still more commands in
4488 * this request the request is not yet complete.
4489 */
4490 if (!status && !hci_req_is_complete(hdev))
4491 return;
4492
4493 /* If this was the last command in a request the complete
4494 * callback would be found in hdev->sent_cmd instead of the
4495 * command queue (hdev->cmd_q).
4496 */
4497 if (hdev->sent_cmd) {
4498 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004499
4500 if (req_complete) {
4501 /* We must set the complete callback to NULL to
4502 * avoid calling the callback more than once if
4503 * this function gets called again.
4504 */
4505 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4506
Johan Hedberg9238f362013-03-05 20:37:48 +02004507 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004508 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004509 }
4510
4511 /* Remove all pending commands belonging to this request */
4512 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4513 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4514 if (bt_cb(skb)->req.start) {
4515 __skb_queue_head(&hdev->cmd_q, skb);
4516 break;
4517 }
4518
4519 req_complete = bt_cb(skb)->req.complete;
4520 kfree_skb(skb);
4521 }
4522 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4523
4524call_complete:
4525 if (req_complete)
4526 req_complete(hdev, status);
4527}
4528
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004529static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004531 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532 struct sk_buff *skb;
4533
4534 BT_DBG("%s", hdev->name);
4535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004537 /* Send copy to monitor */
4538 hci_send_to_monitor(hdev, skb);
4539
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 if (atomic_read(&hdev->promisc)) {
4541 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004542 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543 }
4544
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004545 if (test_bit(HCI_RAW, &hdev->flags) ||
4546 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 kfree_skb(skb);
4548 continue;
4549 }
4550
4551 if (test_bit(HCI_INIT, &hdev->flags)) {
4552 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004553 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554 case HCI_ACLDATA_PKT:
4555 case HCI_SCODATA_PKT:
4556 kfree_skb(skb);
4557 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 }
4560
4561 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004562 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004564 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565 hci_event_packet(hdev, skb);
4566 break;
4567
4568 case HCI_ACLDATA_PKT:
4569 BT_DBG("%s ACL data packet", hdev->name);
4570 hci_acldata_packet(hdev, skb);
4571 break;
4572
4573 case HCI_SCODATA_PKT:
4574 BT_DBG("%s SCO data packet", hdev->name);
4575 hci_scodata_packet(hdev, skb);
4576 break;
4577
4578 default:
4579 kfree_skb(skb);
4580 break;
4581 }
4582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583}
4584
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004585static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004587 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 struct sk_buff *skb;
4589
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004590 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4591 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004594 if (atomic_read(&hdev->cmd_cnt)) {
4595 skb = skb_dequeue(&hdev->cmd_q);
4596 if (!skb)
4597 return;
4598
Wei Yongjun7585b972009-02-25 18:29:52 +08004599 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004601 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004602 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004604 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004605 if (test_bit(HCI_RESET, &hdev->flags))
4606 del_timer(&hdev->cmd_timer);
4607 else
4608 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004609 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 } else {
4611 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004612 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613 }
4614 }
4615}