blob: b25a36c3064b7f7f2e1e41be41664425585b80b4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800574static ssize_t force_static_address_read(struct file *file,
575 char __user *user_buf,
576 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700577{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800578 struct hci_dev *hdev = file->private_data;
579 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700580
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800581 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
582 buf[1] = '\n';
583 buf[2] = '\0';
584 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
585}
586
587static ssize_t force_static_address_write(struct file *file,
588 const char __user *user_buf,
589 size_t count, loff_t *ppos)
590{
591 struct hci_dev *hdev = file->private_data;
592 char buf[32];
593 size_t buf_size = min(count, (sizeof(buf)-1));
594 bool enable;
595
596 if (test_bit(HCI_UP, &hdev->flags))
597 return -EBUSY;
598
599 if (copy_from_user(buf, user_buf, buf_size))
600 return -EFAULT;
601
602 buf[buf_size] = '\0';
603 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700604 return -EINVAL;
605
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800606 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
607 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700608
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800609 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
610
611 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700612}
613
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800614static const struct file_operations force_static_address_fops = {
615 .open = simple_open,
616 .read = force_static_address_read,
617 .write = force_static_address_write,
618 .llseek = default_llseek,
619};
Marcel Holtmann92202182013-10-18 16:38:10 -0700620
Marcel Holtmann3698d702014-02-18 21:54:49 -0800621static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
622{
623 struct hci_dev *hdev = f->private;
624 struct list_head *p, *n;
625
626 hci_dev_lock(hdev);
627 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
628 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
629 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
630 &irk->bdaddr, irk->addr_type,
631 16, irk->val, &irk->rpa);
632 }
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638static int identity_resolving_keys_open(struct inode *inode, struct file *file)
639{
640 return single_open(file, identity_resolving_keys_show,
641 inode->i_private);
642}
643
644static const struct file_operations identity_resolving_keys_fops = {
645 .open = identity_resolving_keys_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700651static int long_term_keys_show(struct seq_file *f, void *ptr)
652{
653 struct hci_dev *hdev = f->private;
654 struct list_head *p, *n;
655
656 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800657 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700658 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800659 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700660 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
661 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
662 8, ltk->rand, 16, ltk->val);
663 }
664 hci_dev_unlock(hdev);
665
666 return 0;
667}
668
669static int long_term_keys_open(struct inode *inode, struct file *file)
670{
671 return single_open(file, long_term_keys_show, inode->i_private);
672}
673
674static const struct file_operations long_term_keys_fops = {
675 .open = long_term_keys_open,
676 .read = seq_read,
677 .llseek = seq_lseek,
678 .release = single_release,
679};
680
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700681static int conn_min_interval_set(void *data, u64 val)
682{
683 struct hci_dev *hdev = data;
684
685 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
686 return -EINVAL;
687
688 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700689 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700690 hci_dev_unlock(hdev);
691
692 return 0;
693}
694
695static int conn_min_interval_get(void *data, u64 *val)
696{
697 struct hci_dev *hdev = data;
698
699 hci_dev_lock(hdev);
700 *val = hdev->le_conn_min_interval;
701 hci_dev_unlock(hdev);
702
703 return 0;
704}
705
706DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
707 conn_min_interval_set, "%llu\n");
708
709static int conn_max_interval_set(void *data, u64 val)
710{
711 struct hci_dev *hdev = data;
712
713 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
714 return -EINVAL;
715
716 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700717 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700718 hci_dev_unlock(hdev);
719
720 return 0;
721}
722
723static int conn_max_interval_get(void *data, u64 *val)
724{
725 struct hci_dev *hdev = data;
726
727 hci_dev_lock(hdev);
728 *val = hdev->le_conn_max_interval;
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
735 conn_max_interval_set, "%llu\n");
736
Jukka Rissanen89863102013-12-11 17:05:38 +0200737static ssize_t lowpan_read(struct file *file, char __user *user_buf,
738 size_t count, loff_t *ppos)
739{
740 struct hci_dev *hdev = file->private_data;
741 char buf[3];
742
743 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
744 buf[1] = '\n';
745 buf[2] = '\0';
746 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
747}
748
749static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
750 size_t count, loff_t *position)
751{
752 struct hci_dev *hdev = fp->private_data;
753 bool enable;
754 char buf[32];
755 size_t buf_size = min(count, (sizeof(buf)-1));
756
757 if (copy_from_user(buf, user_buffer, buf_size))
758 return -EFAULT;
759
760 buf[buf_size] = '\0';
761
762 if (strtobool(buf, &enable) < 0)
763 return -EINVAL;
764
765 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
766 return -EALREADY;
767
768 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
769
770 return count;
771}
772
773static const struct file_operations lowpan_debugfs_fops = {
774 .open = simple_open,
775 .read = lowpan_read,
776 .write = lowpan_write,
777 .llseek = default_llseek,
778};
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780/* ---- HCI requests ---- */
781
Johan Hedberg42c6b122013-03-05 20:37:49 +0200782static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200784 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 if (hdev->req_status == HCI_REQ_PEND) {
787 hdev->req_result = result;
788 hdev->req_status = HCI_REQ_DONE;
789 wake_up_interruptible(&hdev->req_wait_q);
790 }
791}
792
793static void hci_req_cancel(struct hci_dev *hdev, int err)
794{
795 BT_DBG("%s err 0x%2.2x", hdev->name, err);
796
797 if (hdev->req_status == HCI_REQ_PEND) {
798 hdev->req_result = err;
799 hdev->req_status = HCI_REQ_CANCELED;
800 wake_up_interruptible(&hdev->req_wait_q);
801 }
802}
803
Fengguang Wu77a63e02013-04-20 16:24:31 +0300804static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
805 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300806{
807 struct hci_ev_cmd_complete *ev;
808 struct hci_event_hdr *hdr;
809 struct sk_buff *skb;
810
811 hci_dev_lock(hdev);
812
813 skb = hdev->recv_evt;
814 hdev->recv_evt = NULL;
815
816 hci_dev_unlock(hdev);
817
818 if (!skb)
819 return ERR_PTR(-ENODATA);
820
821 if (skb->len < sizeof(*hdr)) {
822 BT_ERR("Too short HCI event");
823 goto failed;
824 }
825
826 hdr = (void *) skb->data;
827 skb_pull(skb, HCI_EVENT_HDR_SIZE);
828
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300829 if (event) {
830 if (hdr->evt != event)
831 goto failed;
832 return skb;
833 }
834
Johan Hedberg75e84b72013-04-02 13:35:04 +0300835 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
836 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
837 goto failed;
838 }
839
840 if (skb->len < sizeof(*ev)) {
841 BT_ERR("Too short cmd_complete event");
842 goto failed;
843 }
844
845 ev = (void *) skb->data;
846 skb_pull(skb, sizeof(*ev));
847
848 if (opcode == __le16_to_cpu(ev->opcode))
849 return skb;
850
851 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
852 __le16_to_cpu(ev->opcode));
853
854failed:
855 kfree_skb(skb);
856 return ERR_PTR(-ENODATA);
857}
858
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300859struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300860 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300861{
862 DECLARE_WAITQUEUE(wait, current);
863 struct hci_request req;
864 int err = 0;
865
866 BT_DBG("%s", hdev->name);
867
868 hci_req_init(&req, hdev);
869
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300870 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300871
872 hdev->req_status = HCI_REQ_PEND;
873
874 err = hci_req_run(&req, hci_req_sync_complete);
875 if (err < 0)
876 return ERR_PTR(err);
877
878 add_wait_queue(&hdev->req_wait_q, &wait);
879 set_current_state(TASK_INTERRUPTIBLE);
880
881 schedule_timeout(timeout);
882
883 remove_wait_queue(&hdev->req_wait_q, &wait);
884
885 if (signal_pending(current))
886 return ERR_PTR(-EINTR);
887
888 switch (hdev->req_status) {
889 case HCI_REQ_DONE:
890 err = -bt_to_errno(hdev->req_result);
891 break;
892
893 case HCI_REQ_CANCELED:
894 err = -hdev->req_result;
895 break;
896
897 default:
898 err = -ETIMEDOUT;
899 break;
900 }
901
902 hdev->req_status = hdev->req_result = 0;
903
904 BT_DBG("%s end: err %d", hdev->name, err);
905
906 if (err < 0)
907 return ERR_PTR(err);
908
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300909 return hci_get_cmd_complete(hdev, opcode, event);
910}
911EXPORT_SYMBOL(__hci_cmd_sync_ev);
912
913struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300914 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300915{
916 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300917}
918EXPORT_SYMBOL(__hci_cmd_sync);
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200921static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200922 void (*func)(struct hci_request *req,
923 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200924 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200926 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 DECLARE_WAITQUEUE(wait, current);
928 int err = 0;
929
930 BT_DBG("%s start", hdev->name);
931
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932 hci_req_init(&req, hdev);
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 hdev->req_status = HCI_REQ_PEND;
935
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200937
Johan Hedberg42c6b122013-03-05 20:37:49 +0200938 err = hci_req_run(&req, hci_req_sync_complete);
939 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200940 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300941
942 /* ENODATA means the HCI request command queue is empty.
943 * This can happen when a request with conditionals doesn't
944 * trigger any commands to be sent. This is normal behavior
945 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200946 */
Andre Guedes920c8302013-03-08 11:20:15 -0300947 if (err == -ENODATA)
948 return 0;
949
950 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200951 }
952
Andre Guedesbc4445c2013-03-08 11:20:13 -0300953 add_wait_queue(&hdev->req_wait_q, &wait);
954 set_current_state(TASK_INTERRUPTIBLE);
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 schedule_timeout(timeout);
957
958 remove_wait_queue(&hdev->req_wait_q, &wait);
959
960 if (signal_pending(current))
961 return -EINTR;
962
963 switch (hdev->req_status) {
964 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700965 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 break;
967
968 case HCI_REQ_CANCELED:
969 err = -hdev->req_result;
970 break;
971
972 default:
973 err = -ETIMEDOUT;
974 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Johan Hedberga5040ef2011-01-10 13:28:59 +0200977 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 BT_DBG("%s end: err %d", hdev->name, err);
980
981 return err;
982}
983
Johan Hedberg01178cd2013-03-05 20:37:41 +0200984static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985 void (*req)(struct hci_request *req,
986 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200987 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 int ret;
990
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200991 if (!test_bit(HCI_UP, &hdev->flags))
992 return -ENETDOWN;
993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /* Serialize all requests */
995 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200996 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 hci_req_unlock(hdev);
998
999 return ret;
1000}
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 set_bit(HCI_RESET, &req->hdev->flags);
1008 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
1010
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001018 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001019 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001020
1021 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001022 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023}
1024
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001026{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001028
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001029 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001031
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001032 /* Read Local Supported Commands */
1033 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1034
1035 /* Read Local Supported Features */
1036 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1037
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001038 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001039 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001040
1041 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001043
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001044 /* Read Flow Control Mode */
1045 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1046
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001047 /* Read Location Data */
1048 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001049}
1050
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001052{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001054
1055 BT_DBG("%s %ld", hdev->name, opt);
1056
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001057 /* Reset */
1058 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001059 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001060
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001061 switch (hdev->dev_type) {
1062 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001063 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001064 break;
1065
1066 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001068 break;
1069
1070 default:
1071 BT_ERR("Unknown device type %d", hdev->dev_type);
1072 break;
1073 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001074}
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001077{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001078 struct hci_dev *hdev = req->hdev;
1079
Johan Hedberg2177bab2013-03-05 20:37:43 +02001080 __le16 param;
1081 __u8 flt_type;
1082
1083 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085
1086 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001087 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088
1089 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001090 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001091
1092 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001093 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001094
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001095 /* Read Number of Supported IAC */
1096 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1097
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001098 /* Read Current IAC LAP */
1099 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1100
Johan Hedberg2177bab2013-03-05 20:37:43 +02001101 /* Clear Event Filters */
1102 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001103 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001104
1105 /* Connection accept timeout ~20 secs */
1106 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001108
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001109 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1110 * but it does not support page scan related HCI commands.
1111 */
1112 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001113 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1114 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1115 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001116}
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001119{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001120 struct hci_dev *hdev = req->hdev;
1121
Johan Hedberg2177bab2013-03-05 20:37:43 +02001122 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001124
1125 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001127
1128 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001130
1131 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001133
1134 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001135 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001136
1137 /* LE-only controllers have LE implicitly enabled */
1138 if (!lmp_bredr_capable(hdev))
1139 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001140}
1141
1142static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1143{
1144 if (lmp_ext_inq_capable(hdev))
1145 return 0x02;
1146
1147 if (lmp_inq_rssi_capable(hdev))
1148 return 0x01;
1149
1150 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1151 hdev->lmp_subver == 0x0757)
1152 return 0x01;
1153
1154 if (hdev->manufacturer == 15) {
1155 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1156 return 0x01;
1157 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1158 return 0x01;
1159 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1160 return 0x01;
1161 }
1162
1163 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1164 hdev->lmp_subver == 0x1805)
1165 return 0x01;
1166
1167 return 0x00;
1168}
1169
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001171{
1172 u8 mode;
1173
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001175
Johan Hedberg42c6b122013-03-05 20:37:49 +02001176 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001177}
1178
Johan Hedberg42c6b122013-03-05 20:37:49 +02001179static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001180{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001181 struct hci_dev *hdev = req->hdev;
1182
Johan Hedberg2177bab2013-03-05 20:37:43 +02001183 /* The second byte is 0xff instead of 0x9f (two reserved bits
1184 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1185 * command otherwise.
1186 */
1187 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1188
1189 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1190 * any event mask for pre 1.2 devices.
1191 */
1192 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1193 return;
1194
1195 if (lmp_bredr_capable(hdev)) {
1196 events[4] |= 0x01; /* Flow Specification Complete */
1197 events[4] |= 0x02; /* Inquiry Result with RSSI */
1198 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1199 events[5] |= 0x08; /* Synchronous Connection Complete */
1200 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001201 } else {
1202 /* Use a different default for LE-only devices */
1203 memset(events, 0, sizeof(events));
1204 events[0] |= 0x10; /* Disconnection Complete */
1205 events[0] |= 0x80; /* Encryption Change */
1206 events[1] |= 0x08; /* Read Remote Version Information Complete */
1207 events[1] |= 0x20; /* Command Complete */
1208 events[1] |= 0x40; /* Command Status */
1209 events[1] |= 0x80; /* Hardware Error */
1210 events[2] |= 0x04; /* Number of Completed Packets */
1211 events[3] |= 0x02; /* Data Buffer Overflow */
1212 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001213 }
1214
1215 if (lmp_inq_rssi_capable(hdev))
1216 events[4] |= 0x02; /* Inquiry Result with RSSI */
1217
1218 if (lmp_sniffsubr_capable(hdev))
1219 events[5] |= 0x20; /* Sniff Subrating */
1220
1221 if (lmp_pause_enc_capable(hdev))
1222 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1223
1224 if (lmp_ext_inq_capable(hdev))
1225 events[5] |= 0x40; /* Extended Inquiry Result */
1226
1227 if (lmp_no_flush_capable(hdev))
1228 events[7] |= 0x01; /* Enhanced Flush Complete */
1229
1230 if (lmp_lsto_capable(hdev))
1231 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1232
1233 if (lmp_ssp_capable(hdev)) {
1234 events[6] |= 0x01; /* IO Capability Request */
1235 events[6] |= 0x02; /* IO Capability Response */
1236 events[6] |= 0x04; /* User Confirmation Request */
1237 events[6] |= 0x08; /* User Passkey Request */
1238 events[6] |= 0x10; /* Remote OOB Data Request */
1239 events[6] |= 0x20; /* Simple Pairing Complete */
1240 events[7] |= 0x04; /* User Passkey Notification */
1241 events[7] |= 0x08; /* Keypress Notification */
1242 events[7] |= 0x10; /* Remote Host Supported
1243 * Features Notification
1244 */
1245 }
1246
1247 if (lmp_le_capable(hdev))
1248 events[7] |= 0x20; /* LE Meta-Event */
1249
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251
1252 if (lmp_le_capable(hdev)) {
1253 memset(events, 0, sizeof(events));
1254 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1256 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001257 }
1258}
1259
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001261{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 struct hci_dev *hdev = req->hdev;
1263
Johan Hedberg2177bab2013-03-05 20:37:43 +02001264 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001266 else
1267 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001268
1269 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001274 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1275 * local supported commands HCI command.
1276 */
1277 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001279
1280 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001281 /* When SSP is available, then the host features page
1282 * should also be available as well. However some
1283 * controllers list the max_page as 0 as long as SSP
1284 * has not been enabled. To achieve proper debugging
1285 * output, force the minimum max_page to 1 at least.
1286 */
1287 hdev->max_page = 0x01;
1288
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1290 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1292 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293 } else {
1294 struct hci_cp_write_eir cp;
1295
1296 memset(hdev->eir, 0, sizeof(hdev->eir));
1297 memset(&cp, 0, sizeof(cp));
1298
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 }
1301 }
1302
1303 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 if (lmp_ext_feat_capable(hdev)) {
1310 struct hci_cp_read_local_ext_features cp;
1311
1312 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1314 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001315 }
1316
1317 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1318 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001319 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1320 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 }
1322}
1323
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001325{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001326 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001327 struct hci_cp_write_def_link_policy cp;
1328 u16 link_policy = 0;
1329
1330 if (lmp_rswitch_capable(hdev))
1331 link_policy |= HCI_LP_RSWITCH;
1332 if (lmp_hold_capable(hdev))
1333 link_policy |= HCI_LP_HOLD;
1334 if (lmp_sniff_capable(hdev))
1335 link_policy |= HCI_LP_SNIFF;
1336 if (lmp_park_capable(hdev))
1337 link_policy |= HCI_LP_PARK;
1338
1339 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001341}
1342
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346 struct hci_cp_write_le_host_supported cp;
1347
Johan Hedbergc73eee92013-04-19 18:35:21 +03001348 /* LE-only devices do not support explicit enablement */
1349 if (!lmp_bredr_capable(hdev))
1350 return;
1351
Johan Hedberg2177bab2013-03-05 20:37:43 +02001352 memset(&cp, 0, sizeof(cp));
1353
1354 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1355 cp.le = 0x01;
1356 cp.simul = lmp_le_br_capable(hdev);
1357 }
1358
1359 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1361 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001362}
1363
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001364static void hci_set_event_mask_page_2(struct hci_request *req)
1365{
1366 struct hci_dev *hdev = req->hdev;
1367 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1368
1369 /* If Connectionless Slave Broadcast master role is supported
1370 * enable all necessary events for it.
1371 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001372 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001373 events[1] |= 0x40; /* Triggered Clock Capture */
1374 events[1] |= 0x80; /* Synchronization Train Complete */
1375 events[2] |= 0x10; /* Slave Page Response Timeout */
1376 events[2] |= 0x20; /* CSB Channel Map Change */
1377 }
1378
1379 /* If Connectionless Slave Broadcast slave role is supported
1380 * enable all necessary events for it.
1381 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001382 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001383 events[2] |= 0x01; /* Synchronization Train Received */
1384 events[2] |= 0x02; /* CSB Receive */
1385 events[2] |= 0x04; /* CSB Timeout */
1386 events[2] |= 0x08; /* Truncated Page Complete */
1387 }
1388
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001389 /* Enable Authenticated Payload Timeout Expired event if supported */
1390 if (lmp_ping_capable(hdev))
1391 events[2] |= 0x80;
1392
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001393 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1394}
1395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001398 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001399 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001400
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001401 /* Some Broadcom based Bluetooth controllers do not support the
1402 * Delete Stored Link Key command. They are clearly indicating its
1403 * absence in the bit mask of supported commands.
1404 *
1405 * Check the supported commands and only if the the command is marked
1406 * as supported send it. If not supported assume that the controller
1407 * does not have actual support for stored link keys which makes this
1408 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001409 *
1410 * Some controllers indicate that they support handling deleting
1411 * stored link keys, but they don't. The quirk lets a driver
1412 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001413 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001414 if (hdev->commands[6] & 0x80 &&
1415 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001416 struct hci_cp_delete_stored_link_key cp;
1417
1418 bacpy(&cp.bdaddr, BDADDR_ANY);
1419 cp.delete_all = 0x01;
1420 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1421 sizeof(cp), &cp);
1422 }
1423
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426
Marcel Holtmann79830f62013-10-18 16:38:09 -07001427 if (lmp_le_capable(hdev)) {
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001428 /* If the controller has a public BD_ADDR, then by default
1429 * use that one. If this is a LE only controller without
1430 * a public address, default to the random address.
1431 *
1432 * For debugging purposes it is possible to force
1433 * controllers with a public address to use the
1434 * random address instead.
1435 */
1436 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1437 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1438 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1439 else
1440 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
Marcel Holtmann79830f62013-10-18 16:38:09 -07001441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001443 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001444
1445 /* Read features beyond page 1 if available */
1446 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1447 struct hci_cp_read_local_ext_features cp;
1448
1449 cp.page = p;
1450 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1451 sizeof(cp), &cp);
1452 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453}
1454
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001455static void hci_init4_req(struct hci_request *req, unsigned long opt)
1456{
1457 struct hci_dev *hdev = req->hdev;
1458
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001459 /* Set event mask page 2 if the HCI command for it is supported */
1460 if (hdev->commands[22] & 0x04)
1461 hci_set_event_mask_page_2(req);
1462
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001463 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001464 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001465 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001466
1467 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001468 if ((lmp_sc_capable(hdev) ||
1469 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001470 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1471 u8 support = 0x01;
1472 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1473 sizeof(support), &support);
1474 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001475}
1476
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477static int __hci_init(struct hci_dev *hdev)
1478{
1479 int err;
1480
1481 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1482 if (err < 0)
1483 return err;
1484
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001485 /* The Device Under Test (DUT) mode is special and available for
1486 * all controller types. So just create it early on.
1487 */
1488 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1489 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1490 &dut_mode_fops);
1491 }
1492
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1494 * BR/EDR/LE type controllers. AMP controllers only need the
1495 * first stage init.
1496 */
1497 if (hdev->dev_type != HCI_BREDR)
1498 return 0;
1499
1500 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1501 if (err < 0)
1502 return err;
1503
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001504 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1505 if (err < 0)
1506 return err;
1507
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001508 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1509 if (err < 0)
1510 return err;
1511
1512 /* Only create debugfs entries during the initial setup
1513 * phase and not every time the controller gets powered on.
1514 */
1515 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1516 return 0;
1517
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001518 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1519 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001520 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1521 &hdev->manufacturer);
1522 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1523 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001524 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1525 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001526 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1527
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001528 if (lmp_bredr_capable(hdev)) {
1529 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1530 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001531 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1532 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001533 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1534 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001535 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1536 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001537 }
1538
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001539 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001540 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1541 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001542 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1543 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001544 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1545 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001546 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1547 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001548 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001549
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001550 if (lmp_sniff_capable(hdev)) {
1551 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1552 hdev, &idle_timeout_fops);
1553 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1554 hdev, &sniff_min_interval_fops);
1555 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1556 hdev, &sniff_max_interval_fops);
1557 }
1558
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001559 if (lmp_le_capable(hdev)) {
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001560 debugfs_create_file("static_address", 0444, hdev->debugfs,
1561 hdev, &static_address_fops);
1562
1563 /* For controllers with a public address, provide a debug
1564 * option to force the usage of the configured static
1565 * address. By default the public address is used.
1566 */
1567 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1568 debugfs_create_file("force_static_address", 0644,
1569 hdev->debugfs, hdev,
1570 &force_static_address_fops);
1571
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001572 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1573 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001574 debugfs_create_file("identity_resolving_keys", 0400,
1575 hdev->debugfs, hdev,
1576 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001577 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1578 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001579 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1580 hdev, &conn_min_interval_fops);
1581 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1582 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001583 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1584 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001585 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001586
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001587 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001588}
1589
Johan Hedberg42c6b122013-03-05 20:37:49 +02001590static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
1592 __u8 scan = opt;
1593
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
1596 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001597 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598}
1599
Johan Hedberg42c6b122013-03-05 20:37:49 +02001600static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
1602 __u8 auth = opt;
1603
Johan Hedberg42c6b122013-03-05 20:37:49 +02001604 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
1609
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
1612 __u8 encrypt = opt;
1613
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001616 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001617 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618}
1619
Johan Hedberg42c6b122013-03-05 20:37:49 +02001620static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001621{
1622 __le16 policy = cpu_to_le16(opt);
1623
Johan Hedberg42c6b122013-03-05 20:37:49 +02001624 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001625
1626 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001627 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001628}
1629
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001630/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 * Device is held on return. */
1632struct hci_dev *hci_dev_get(int index)
1633{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001634 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 BT_DBG("%d", index);
1637
1638 if (index < 0)
1639 return NULL;
1640
1641 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001642 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 if (d->id == index) {
1644 hdev = hci_dev_hold(d);
1645 break;
1646 }
1647 }
1648 read_unlock(&hci_dev_list_lock);
1649 return hdev;
1650}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
1652/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001653
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001654bool hci_discovery_active(struct hci_dev *hdev)
1655{
1656 struct discovery_state *discov = &hdev->discovery;
1657
Andre Guedes6fbe1952012-02-03 17:47:58 -03001658 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001659 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001660 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001661 return true;
1662
Andre Guedes6fbe1952012-02-03 17:47:58 -03001663 default:
1664 return false;
1665 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001666}
1667
Johan Hedbergff9ef572012-01-04 14:23:45 +02001668void hci_discovery_set_state(struct hci_dev *hdev, int state)
1669{
1670 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1671
1672 if (hdev->discovery.state == state)
1673 return;
1674
1675 switch (state) {
1676 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001677 if (hdev->discovery.state != DISCOVERY_STARTING)
1678 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001679 break;
1680 case DISCOVERY_STARTING:
1681 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001682 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001683 mgmt_discovering(hdev, 1);
1684 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001685 case DISCOVERY_RESOLVING:
1686 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001687 case DISCOVERY_STOPPING:
1688 break;
1689 }
1690
1691 hdev->discovery.state = state;
1692}
1693
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001694void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695{
Johan Hedberg30883512012-01-04 14:16:21 +02001696 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001697 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Johan Hedberg561aafb2012-01-04 13:31:59 +02001699 list_for_each_entry_safe(p, n, &cache->all, all) {
1700 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001701 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001703
1704 INIT_LIST_HEAD(&cache->unknown);
1705 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706}
1707
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1709 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710{
Johan Hedberg30883512012-01-04 14:16:21 +02001711 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 struct inquiry_entry *e;
1713
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001714 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
Johan Hedberg561aafb2012-01-04 13:31:59 +02001716 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001718 return e;
1719 }
1720
1721 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722}
1723
Johan Hedberg561aafb2012-01-04 13:31:59 +02001724struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001725 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001726{
Johan Hedberg30883512012-01-04 14:16:21 +02001727 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001728 struct inquiry_entry *e;
1729
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001730 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001731
1732 list_for_each_entry(e, &cache->unknown, list) {
1733 if (!bacmp(&e->data.bdaddr, bdaddr))
1734 return e;
1735 }
1736
1737 return NULL;
1738}
1739
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001740struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001741 bdaddr_t *bdaddr,
1742 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001743{
1744 struct discovery_state *cache = &hdev->discovery;
1745 struct inquiry_entry *e;
1746
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001747 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001748
1749 list_for_each_entry(e, &cache->resolve, list) {
1750 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1751 return e;
1752 if (!bacmp(&e->data.bdaddr, bdaddr))
1753 return e;
1754 }
1755
1756 return NULL;
1757}
1758
Johan Hedberga3d4e202012-01-09 00:53:02 +02001759void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001760 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001761{
1762 struct discovery_state *cache = &hdev->discovery;
1763 struct list_head *pos = &cache->resolve;
1764 struct inquiry_entry *p;
1765
1766 list_del(&ie->list);
1767
1768 list_for_each_entry(p, &cache->resolve, list) {
1769 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001770 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001771 break;
1772 pos = &p->list;
1773 }
1774
1775 list_add(&ie->list, pos);
1776}
1777
Johan Hedberg31754052012-01-04 13:39:52 +02001778bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001779 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780{
Johan Hedberg30883512012-01-04 14:16:21 +02001781 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001782 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001784 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
Szymon Janc2b2fec42012-11-20 11:38:54 +01001786 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1787
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001788 if (ssp)
1789 *ssp = data->ssp_mode;
1790
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001791 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001792 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001793 if (ie->data.ssp_mode && ssp)
1794 *ssp = true;
1795
Johan Hedberga3d4e202012-01-09 00:53:02 +02001796 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001797 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001798 ie->data.rssi = data->rssi;
1799 hci_inquiry_cache_update_resolve(hdev, ie);
1800 }
1801
Johan Hedberg561aafb2012-01-04 13:31:59 +02001802 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001803 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001804
Johan Hedberg561aafb2012-01-04 13:31:59 +02001805 /* Entry not in the cache. Add new one. */
1806 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1807 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001808 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001809
1810 list_add(&ie->all, &cache->all);
1811
1812 if (name_known) {
1813 ie->name_state = NAME_KNOWN;
1814 } else {
1815 ie->name_state = NAME_NOT_KNOWN;
1816 list_add(&ie->list, &cache->unknown);
1817 }
1818
1819update:
1820 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001821 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001822 ie->name_state = NAME_KNOWN;
1823 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 }
1825
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001826 memcpy(&ie->data, data, sizeof(*data));
1827 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001829
1830 if (ie->name_state == NAME_NOT_KNOWN)
1831 return false;
1832
1833 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
1836static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1837{
Johan Hedberg30883512012-01-04 14:16:21 +02001838 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 struct inquiry_info *info = (struct inquiry_info *) buf;
1840 struct inquiry_entry *e;
1841 int copied = 0;
1842
Johan Hedberg561aafb2012-01-04 13:31:59 +02001843 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001845
1846 if (copied >= num)
1847 break;
1848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 bacpy(&info->bdaddr, &data->bdaddr);
1850 info->pscan_rep_mode = data->pscan_rep_mode;
1851 info->pscan_period_mode = data->pscan_period_mode;
1852 info->pscan_mode = data->pscan_mode;
1853 memcpy(info->dev_class, data->dev_class, 3);
1854 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001857 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 }
1859
1860 BT_DBG("cache %p, copied %d", cache, copied);
1861 return copied;
1862}
1863
Johan Hedberg42c6b122013-03-05 20:37:49 +02001864static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865{
1866 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001867 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 struct hci_cp_inquiry cp;
1869
1870 BT_DBG("%s", hdev->name);
1871
1872 if (test_bit(HCI_INQUIRY, &hdev->flags))
1873 return;
1874
1875 /* Start Inquiry */
1876 memcpy(&cp.lap, &ir->lap, 3);
1877 cp.length = ir->length;
1878 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001879 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880}
1881
Andre Guedes3e13fa12013-03-27 20:04:56 -03001882static int wait_inquiry(void *word)
1883{
1884 schedule();
1885 return signal_pending(current);
1886}
1887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888int hci_inquiry(void __user *arg)
1889{
1890 __u8 __user *ptr = arg;
1891 struct hci_inquiry_req ir;
1892 struct hci_dev *hdev;
1893 int err = 0, do_inquiry = 0, max_rsp;
1894 long timeo;
1895 __u8 *buf;
1896
1897 if (copy_from_user(&ir, ptr, sizeof(ir)))
1898 return -EFAULT;
1899
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001900 hdev = hci_dev_get(ir.dev_id);
1901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return -ENODEV;
1903
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001904 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1905 err = -EBUSY;
1906 goto done;
1907 }
1908
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001909 if (hdev->dev_type != HCI_BREDR) {
1910 err = -EOPNOTSUPP;
1911 goto done;
1912 }
1913
Johan Hedberg56f87902013-10-02 13:43:13 +03001914 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1915 err = -EOPNOTSUPP;
1916 goto done;
1917 }
1918
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001919 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001920 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001921 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001922 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 do_inquiry = 1;
1924 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001925 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
Marcel Holtmann04837f62006-07-03 10:02:33 +02001927 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001928
1929 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001930 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1931 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001932 if (err < 0)
1933 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001934
1935 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1936 * cleared). If it is interrupted by a signal, return -EINTR.
1937 */
1938 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1939 TASK_INTERRUPTIBLE))
1940 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001943 /* for unlimited number of responses we will use buffer with
1944 * 255 entries
1945 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1947
1948 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1949 * copy it to the user space.
1950 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001951 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001952 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 err = -ENOMEM;
1954 goto done;
1955 }
1956
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001957 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001959 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 BT_DBG("num_rsp %d", ir.num_rsp);
1962
1963 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1964 ptr += sizeof(ir);
1965 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001966 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001968 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 err = -EFAULT;
1970
1971 kfree(buf);
1972
1973done:
1974 hci_dev_put(hdev);
1975 return err;
1976}
1977
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001978static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 int ret = 0;
1981
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 BT_DBG("%s %p", hdev->name, hdev);
1983
1984 hci_req_lock(hdev);
1985
Johan Hovold94324962012-03-15 14:48:41 +01001986 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1987 ret = -ENODEV;
1988 goto done;
1989 }
1990
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001991 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1992 /* Check for rfkill but allow the HCI setup stage to
1993 * proceed (which in itself doesn't cause any RF activity).
1994 */
1995 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1996 ret = -ERFKILL;
1997 goto done;
1998 }
1999
2000 /* Check for valid public address or a configured static
2001 * random adddress, but let the HCI setup proceed to
2002 * be able to determine if there is a public address
2003 * or not.
2004 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002005 * In case of user channel usage, it is not important
2006 * if a public address or static random address is
2007 * available.
2008 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002009 * This check is only valid for BR/EDR controllers
2010 * since AMP controllers do not have an address.
2011 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002012 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2013 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002014 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2015 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2016 ret = -EADDRNOTAVAIL;
2017 goto done;
2018 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002019 }
2020
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 if (test_bit(HCI_UP, &hdev->flags)) {
2022 ret = -EALREADY;
2023 goto done;
2024 }
2025
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 if (hdev->open(hdev)) {
2027 ret = -EIO;
2028 goto done;
2029 }
2030
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002031 atomic_set(&hdev->cmd_cnt, 1);
2032 set_bit(HCI_INIT, &hdev->flags);
2033
2034 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2035 ret = hdev->setup(hdev);
2036
2037 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002038 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2039 set_bit(HCI_RAW, &hdev->flags);
2040
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002041 if (!test_bit(HCI_RAW, &hdev->flags) &&
2042 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002043 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 }
2045
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002046 clear_bit(HCI_INIT, &hdev->flags);
2047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 if (!ret) {
2049 hci_dev_hold(hdev);
2050 set_bit(HCI_UP, &hdev->flags);
2051 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002052 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002053 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002054 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002055 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002056 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002057 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002058 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002059 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002061 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002062 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002063 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
2065 skb_queue_purge(&hdev->cmd_q);
2066 skb_queue_purge(&hdev->rx_q);
2067
2068 if (hdev->flush)
2069 hdev->flush(hdev);
2070
2071 if (hdev->sent_cmd) {
2072 kfree_skb(hdev->sent_cmd);
2073 hdev->sent_cmd = NULL;
2074 }
2075
2076 hdev->close(hdev);
2077 hdev->flags = 0;
2078 }
2079
2080done:
2081 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 return ret;
2083}
2084
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002085/* ---- HCI ioctl helpers ---- */
2086
2087int hci_dev_open(__u16 dev)
2088{
2089 struct hci_dev *hdev;
2090 int err;
2091
2092 hdev = hci_dev_get(dev);
2093 if (!hdev)
2094 return -ENODEV;
2095
Johan Hedberge1d08f42013-10-01 22:44:50 +03002096 /* We need to ensure that no other power on/off work is pending
2097 * before proceeding to call hci_dev_do_open. This is
2098 * particularly important if the setup procedure has not yet
2099 * completed.
2100 */
2101 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2102 cancel_delayed_work(&hdev->power_off);
2103
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002104 /* After this call it is guaranteed that the setup procedure
2105 * has finished. This means that error conditions like RFKILL
2106 * or no valid public or static random address apply.
2107 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002108 flush_workqueue(hdev->req_workqueue);
2109
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002110 err = hci_dev_do_open(hdev);
2111
2112 hci_dev_put(hdev);
2113
2114 return err;
2115}
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117static int hci_dev_do_close(struct hci_dev *hdev)
2118{
2119 BT_DBG("%s %p", hdev->name, hdev);
2120
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002121 cancel_delayed_work(&hdev->power_off);
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 hci_req_cancel(hdev, ENODEV);
2124 hci_req_lock(hdev);
2125
2126 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002127 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 hci_req_unlock(hdev);
2129 return 0;
2130 }
2131
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002132 /* Flush RX and TX works */
2133 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002134 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002136 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002137 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002138 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002139 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002140 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002141 }
2142
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002143 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002144 cancel_delayed_work(&hdev->service_cache);
2145
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002146 cancel_delayed_work_sync(&hdev->le_scan_disable);
2147
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002148 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002149 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002151 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152
2153 hci_notify(hdev, HCI_DEV_DOWN);
2154
2155 if (hdev->flush)
2156 hdev->flush(hdev);
2157
2158 /* Reset device */
2159 skb_queue_purge(&hdev->cmd_q);
2160 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002161 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002162 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002163 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002165 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 clear_bit(HCI_INIT, &hdev->flags);
2167 }
2168
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002169 /* flush cmd work */
2170 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172 /* Drop queues */
2173 skb_queue_purge(&hdev->rx_q);
2174 skb_queue_purge(&hdev->cmd_q);
2175 skb_queue_purge(&hdev->raw_q);
2176
2177 /* Drop last sent command */
2178 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002179 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 kfree_skb(hdev->sent_cmd);
2181 hdev->sent_cmd = NULL;
2182 }
2183
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002184 kfree_skb(hdev->recv_evt);
2185 hdev->recv_evt = NULL;
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 /* After this point our queues are empty
2188 * and no tasks are scheduled. */
2189 hdev->close(hdev);
2190
Johan Hedberg35b973c2013-03-15 17:06:59 -05002191 /* Clear flags */
2192 hdev->flags = 0;
2193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2194
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002195 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2196 if (hdev->dev_type == HCI_BREDR) {
2197 hci_dev_lock(hdev);
2198 mgmt_powered(hdev, 0);
2199 hci_dev_unlock(hdev);
2200 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002201 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002202
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002203 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002204 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002205
Johan Hedberge59fda82012-02-22 18:11:53 +02002206 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002207 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_req_unlock(hdev);
2210
2211 hci_dev_put(hdev);
2212 return 0;
2213}
2214
2215int hci_dev_close(__u16 dev)
2216{
2217 struct hci_dev *hdev;
2218 int err;
2219
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002220 hdev = hci_dev_get(dev);
2221 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002223
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002224 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2225 err = -EBUSY;
2226 goto done;
2227 }
2228
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002229 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2230 cancel_delayed_work(&hdev->power_off);
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002233
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002234done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 hci_dev_put(hdev);
2236 return err;
2237}
2238
2239int hci_dev_reset(__u16 dev)
2240{
2241 struct hci_dev *hdev;
2242 int ret = 0;
2243
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002244 hdev = hci_dev_get(dev);
2245 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return -ENODEV;
2247
2248 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Marcel Holtmann808a0492013-08-26 20:57:58 -07002250 if (!test_bit(HCI_UP, &hdev->flags)) {
2251 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002255 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2256 ret = -EBUSY;
2257 goto done;
2258 }
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 /* Drop queues */
2261 skb_queue_purge(&hdev->rx_q);
2262 skb_queue_purge(&hdev->cmd_q);
2263
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002264 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002265 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002267 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 if (hdev->flush)
2270 hdev->flush(hdev);
2271
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002272 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002273 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
2275 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002276 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
2278done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 hci_req_unlock(hdev);
2280 hci_dev_put(hdev);
2281 return ret;
2282}
2283
2284int hci_dev_reset_stat(__u16 dev)
2285{
2286 struct hci_dev *hdev;
2287 int ret = 0;
2288
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002289 hdev = hci_dev_get(dev);
2290 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 return -ENODEV;
2292
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002293 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2294 ret = -EBUSY;
2295 goto done;
2296 }
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2299
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002300done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 return ret;
2303}
2304
2305int hci_dev_cmd(unsigned int cmd, void __user *arg)
2306{
2307 struct hci_dev *hdev;
2308 struct hci_dev_req dr;
2309 int err = 0;
2310
2311 if (copy_from_user(&dr, arg, sizeof(dr)))
2312 return -EFAULT;
2313
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002314 hdev = hci_dev_get(dr.dev_id);
2315 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 return -ENODEV;
2317
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002318 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2319 err = -EBUSY;
2320 goto done;
2321 }
2322
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002323 if (hdev->dev_type != HCI_BREDR) {
2324 err = -EOPNOTSUPP;
2325 goto done;
2326 }
2327
Johan Hedberg56f87902013-10-02 13:43:13 +03002328 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2329 err = -EOPNOTSUPP;
2330 goto done;
2331 }
2332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 switch (cmd) {
2334 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002335 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2336 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 break;
2338
2339 case HCISETENCRYPT:
2340 if (!lmp_encrypt_capable(hdev)) {
2341 err = -EOPNOTSUPP;
2342 break;
2343 }
2344
2345 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2346 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002347 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2348 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 if (err)
2350 break;
2351 }
2352
Johan Hedberg01178cd2013-03-05 20:37:41 +02002353 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2354 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 break;
2356
2357 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002358 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2359 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 break;
2361
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002362 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002363 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2364 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002365 break;
2366
2367 case HCISETLINKMODE:
2368 hdev->link_mode = ((__u16) dr.dev_opt) &
2369 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2370 break;
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 case HCISETPTYPE:
2373 hdev->pkt_type = (__u16) dr.dev_opt;
2374 break;
2375
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002377 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2378 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 break;
2380
2381 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002382 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2383 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 break;
2385
2386 default:
2387 err = -EINVAL;
2388 break;
2389 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002390
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002391done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 hci_dev_put(hdev);
2393 return err;
2394}
2395
2396int hci_get_dev_list(void __user *arg)
2397{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002398 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 struct hci_dev_list_req *dl;
2400 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 int n = 0, size, err;
2402 __u16 dev_num;
2403
2404 if (get_user(dev_num, (__u16 __user *) arg))
2405 return -EFAULT;
2406
2407 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2408 return -EINVAL;
2409
2410 size = sizeof(*dl) + dev_num * sizeof(*dr);
2411
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002412 dl = kzalloc(size, GFP_KERNEL);
2413 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 return -ENOMEM;
2415
2416 dr = dl->dev_req;
2417
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002418 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002419 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002420 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002421 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002422
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002423 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2424 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 (dr + n)->dev_id = hdev->id;
2427 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 if (++n >= dev_num)
2430 break;
2431 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002432 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 dl->dev_num = n;
2435 size = sizeof(*dl) + n * sizeof(*dr);
2436
2437 err = copy_to_user(arg, dl, size);
2438 kfree(dl);
2439
2440 return err ? -EFAULT : 0;
2441}
2442
2443int hci_get_dev_info(void __user *arg)
2444{
2445 struct hci_dev *hdev;
2446 struct hci_dev_info di;
2447 int err = 0;
2448
2449 if (copy_from_user(&di, arg, sizeof(di)))
2450 return -EFAULT;
2451
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002452 hdev = hci_dev_get(di.dev_id);
2453 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 return -ENODEV;
2455
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002456 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002457 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002458
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002459 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2460 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 strcpy(di.name, hdev->name);
2463 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002464 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 di.flags = hdev->flags;
2466 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002467 if (lmp_bredr_capable(hdev)) {
2468 di.acl_mtu = hdev->acl_mtu;
2469 di.acl_pkts = hdev->acl_pkts;
2470 di.sco_mtu = hdev->sco_mtu;
2471 di.sco_pkts = hdev->sco_pkts;
2472 } else {
2473 di.acl_mtu = hdev->le_mtu;
2474 di.acl_pkts = hdev->le_pkts;
2475 di.sco_mtu = 0;
2476 di.sco_pkts = 0;
2477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 di.link_policy = hdev->link_policy;
2479 di.link_mode = hdev->link_mode;
2480
2481 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2482 memcpy(&di.features, &hdev->features, sizeof(di.features));
2483
2484 if (copy_to_user(arg, &di, sizeof(di)))
2485 err = -EFAULT;
2486
2487 hci_dev_put(hdev);
2488
2489 return err;
2490}
2491
2492/* ---- Interface to HCI drivers ---- */
2493
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002494static int hci_rfkill_set_block(void *data, bool blocked)
2495{
2496 struct hci_dev *hdev = data;
2497
2498 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2499
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2501 return -EBUSY;
2502
Johan Hedberg5e130362013-09-13 08:58:17 +03002503 if (blocked) {
2504 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002505 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2506 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002507 } else {
2508 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002509 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002510
2511 return 0;
2512}
2513
2514static const struct rfkill_ops hci_rfkill_ops = {
2515 .set_block = hci_rfkill_set_block,
2516};
2517
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002518static void hci_power_on(struct work_struct *work)
2519{
2520 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002521 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002522
2523 BT_DBG("%s", hdev->name);
2524
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002525 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002526 if (err < 0) {
2527 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002528 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002529 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002530
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002531 /* During the HCI setup phase, a few error conditions are
2532 * ignored and they need to be checked now. If they are still
2533 * valid, it is important to turn the device back off.
2534 */
2535 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2536 (hdev->dev_type == HCI_BREDR &&
2537 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2538 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002539 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2540 hci_dev_do_close(hdev);
2541 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002542 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2543 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002544 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002545
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002546 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002547 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002548}
2549
2550static void hci_power_off(struct work_struct *work)
2551{
Johan Hedberg32435532011-11-07 22:16:04 +02002552 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002553 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002554
2555 BT_DBG("%s", hdev->name);
2556
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002557 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002558}
2559
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002560static void hci_discov_off(struct work_struct *work)
2561{
2562 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002563
2564 hdev = container_of(work, struct hci_dev, discov_off.work);
2565
2566 BT_DBG("%s", hdev->name);
2567
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002568 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002569}
2570
Johan Hedberg35f74982014-02-18 17:14:32 +02002571void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002572{
Johan Hedberg48210022013-01-27 00:31:28 +02002573 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002574
Johan Hedberg48210022013-01-27 00:31:28 +02002575 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2576 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002577 kfree(uuid);
2578 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002579}
2580
Johan Hedberg35f74982014-02-18 17:14:32 +02002581void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002582{
2583 struct list_head *p, *n;
2584
2585 list_for_each_safe(p, n, &hdev->link_keys) {
2586 struct link_key *key;
2587
2588 key = list_entry(p, struct link_key, list);
2589
2590 list_del(p);
2591 kfree(key);
2592 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002593}
2594
Johan Hedberg35f74982014-02-18 17:14:32 +02002595void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002596{
2597 struct smp_ltk *k, *tmp;
2598
2599 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2600 list_del(&k->list);
2601 kfree(k);
2602 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002603}
2604
Johan Hedberg970c4e42014-02-18 10:19:33 +02002605void hci_smp_irks_clear(struct hci_dev *hdev)
2606{
2607 struct smp_irk *k, *tmp;
2608
2609 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2610 list_del(&k->list);
2611 kfree(k);
2612 }
2613}
2614
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002615struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2616{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002617 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002618
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002619 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002620 if (bacmp(bdaddr, &k->bdaddr) == 0)
2621 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002622
2623 return NULL;
2624}
2625
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302626static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002627 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002628{
2629 /* Legacy key */
2630 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302631 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002632
2633 /* Debug keys are insecure so don't store them persistently */
2634 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302635 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002636
2637 /* Changed combination key and there's no previous one */
2638 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302639 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002640
2641 /* Security mode 3 case */
2642 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302643 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002644
2645 /* Neither local nor remote side had no-bonding as requirement */
2646 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302647 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002648
2649 /* Local side had dedicated bonding as requirement */
2650 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302651 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002652
2653 /* Remote side had dedicated bonding as requirement */
2654 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302655 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002656
2657 /* If none of the above criteria match, then don't store the key
2658 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302659 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002660}
2661
Johan Hedberg98a0b842014-01-30 19:40:00 -08002662static bool ltk_type_master(u8 type)
2663{
2664 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2665 return true;
2666
2667 return false;
2668}
2669
2670struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2671 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002672{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002673 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002674
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002675 list_for_each_entry(k, &hdev->long_term_keys, list) {
2676 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002677 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002678 continue;
2679
Johan Hedberg98a0b842014-01-30 19:40:00 -08002680 if (ltk_type_master(k->type) != master)
2681 continue;
2682
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002683 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002684 }
2685
2686 return NULL;
2687}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002688
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002689struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002690 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002691{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002692 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002693
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002694 list_for_each_entry(k, &hdev->long_term_keys, list)
2695 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002696 bacmp(bdaddr, &k->bdaddr) == 0 &&
2697 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002698 return k;
2699
2700 return NULL;
2701}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002702
Johan Hedberg970c4e42014-02-18 10:19:33 +02002703struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2704{
2705 struct smp_irk *irk;
2706
2707 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2708 if (!bacmp(&irk->rpa, rpa))
2709 return irk;
2710 }
2711
2712 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2713 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2714 bacpy(&irk->rpa, rpa);
2715 return irk;
2716 }
2717 }
2718
2719 return NULL;
2720}
2721
2722struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2723 u8 addr_type)
2724{
2725 struct smp_irk *irk;
2726
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002727 /* Identity Address must be public or static random */
2728 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2729 return NULL;
2730
Johan Hedberg970c4e42014-02-18 10:19:33 +02002731 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2732 if (addr_type == irk->addr_type &&
2733 bacmp(bdaddr, &irk->bdaddr) == 0)
2734 return irk;
2735 }
2736
2737 return NULL;
2738}
2739
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002740int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002741 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002742{
2743 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302744 u8 old_key_type;
2745 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002746
2747 old_key = hci_find_link_key(hdev, bdaddr);
2748 if (old_key) {
2749 old_key_type = old_key->type;
2750 key = old_key;
2751 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002752 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002753 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002754 if (!key)
2755 return -ENOMEM;
2756 list_add(&key->list, &hdev->link_keys);
2757 }
2758
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002759 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002760
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002761 /* Some buggy controller combinations generate a changed
2762 * combination key for legacy pairing even when there's no
2763 * previous key */
2764 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002765 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002766 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002767 if (conn)
2768 conn->key_type = type;
2769 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002770
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002771 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002772 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002773 key->pin_len = pin_len;
2774
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002775 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002776 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002777 else
2778 key->type = type;
2779
Johan Hedberg4df378a2011-04-28 11:29:03 -07002780 if (!new_key)
2781 return 0;
2782
2783 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2784
Johan Hedberg744cf192011-11-08 20:40:14 +02002785 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002786
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302787 if (conn)
2788 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002789
2790 return 0;
2791}
2792
Johan Hedbergca9142b2014-02-19 14:57:44 +02002793struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002794 u8 addr_type, u8 type, u8 authenticated,
2795 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002796{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002797 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002798 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002799
Johan Hedberg98a0b842014-01-30 19:40:00 -08002800 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002801 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002802 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002803 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002804 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002805 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002806 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002807 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002808 }
2809
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002810 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002811 key->bdaddr_type = addr_type;
2812 memcpy(key->val, tk, sizeof(key->val));
2813 key->authenticated = authenticated;
2814 key->ediv = ediv;
2815 key->enc_size = enc_size;
2816 key->type = type;
2817 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002818
Johan Hedbergca9142b2014-02-19 14:57:44 +02002819 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002820}
2821
Johan Hedbergca9142b2014-02-19 14:57:44 +02002822struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2823 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002824{
2825 struct smp_irk *irk;
2826
2827 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2828 if (!irk) {
2829 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2830 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002831 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002832
2833 bacpy(&irk->bdaddr, bdaddr);
2834 irk->addr_type = addr_type;
2835
2836 list_add(&irk->list, &hdev->identity_resolving_keys);
2837 }
2838
2839 memcpy(irk->val, val, 16);
2840 bacpy(&irk->rpa, rpa);
2841
Johan Hedbergca9142b2014-02-19 14:57:44 +02002842 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002843}
2844
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002845int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2846{
2847 struct link_key *key;
2848
2849 key = hci_find_link_key(hdev, bdaddr);
2850 if (!key)
2851 return -ENOENT;
2852
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002853 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002854
2855 list_del(&key->list);
2856 kfree(key);
2857
2858 return 0;
2859}
2860
Johan Hedberge0b2b272014-02-18 17:14:31 +02002861int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002862{
2863 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002864 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002865
2866 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002867 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002868 continue;
2869
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002870 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002871
2872 list_del(&k->list);
2873 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002874 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002875 }
2876
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002877 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002878}
2879
Johan Hedberga7ec7332014-02-18 17:14:35 +02002880void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2881{
2882 struct smp_irk *k, *tmp;
2883
2884 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2885 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2886 continue;
2887
2888 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2889
2890 list_del(&k->list);
2891 kfree(k);
2892 }
2893}
2894
Ville Tervo6bd32322011-02-16 16:32:41 +02002895/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002896static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002897{
2898 struct hci_dev *hdev = (void *) arg;
2899
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002900 if (hdev->sent_cmd) {
2901 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2902 u16 opcode = __le16_to_cpu(sent->opcode);
2903
2904 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2905 } else {
2906 BT_ERR("%s command tx timeout", hdev->name);
2907 }
2908
Ville Tervo6bd32322011-02-16 16:32:41 +02002909 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002910 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002911}
2912
Szymon Janc2763eda2011-03-22 13:12:22 +01002913struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002914 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002915{
2916 struct oob_data *data;
2917
2918 list_for_each_entry(data, &hdev->remote_oob_data, list)
2919 if (bacmp(bdaddr, &data->bdaddr) == 0)
2920 return data;
2921
2922 return NULL;
2923}
2924
2925int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2926{
2927 struct oob_data *data;
2928
2929 data = hci_find_remote_oob_data(hdev, bdaddr);
2930 if (!data)
2931 return -ENOENT;
2932
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002933 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002934
2935 list_del(&data->list);
2936 kfree(data);
2937
2938 return 0;
2939}
2940
Johan Hedberg35f74982014-02-18 17:14:32 +02002941void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002942{
2943 struct oob_data *data, *n;
2944
2945 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2946 list_del(&data->list);
2947 kfree(data);
2948 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002949}
2950
Marcel Holtmann07988722014-01-10 02:07:29 -08002951int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2952 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002953{
2954 struct oob_data *data;
2955
2956 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002957 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002958 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002959 if (!data)
2960 return -ENOMEM;
2961
2962 bacpy(&data->bdaddr, bdaddr);
2963 list_add(&data->list, &hdev->remote_oob_data);
2964 }
2965
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002966 memcpy(data->hash192, hash, sizeof(data->hash192));
2967 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002968
Marcel Holtmann07988722014-01-10 02:07:29 -08002969 memset(data->hash256, 0, sizeof(data->hash256));
2970 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2971
2972 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2973
2974 return 0;
2975}
2976
2977int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2978 u8 *hash192, u8 *randomizer192,
2979 u8 *hash256, u8 *randomizer256)
2980{
2981 struct oob_data *data;
2982
2983 data = hci_find_remote_oob_data(hdev, bdaddr);
2984 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002985 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08002986 if (!data)
2987 return -ENOMEM;
2988
2989 bacpy(&data->bdaddr, bdaddr);
2990 list_add(&data->list, &hdev->remote_oob_data);
2991 }
2992
2993 memcpy(data->hash192, hash192, sizeof(data->hash192));
2994 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2995
2996 memcpy(data->hash256, hash256, sizeof(data->hash256));
2997 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2998
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002999 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003000
3001 return 0;
3002}
3003
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003004struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3005 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003006{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003007 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003008
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003009 list_for_each_entry(b, &hdev->blacklist, list) {
3010 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003011 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003012 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003013
3014 return NULL;
3015}
3016
Johan Hedberg35f74982014-02-18 17:14:32 +02003017void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003018{
3019 struct list_head *p, *n;
3020
3021 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003022 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003023
3024 list_del(p);
3025 kfree(b);
3026 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003027}
3028
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003029int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003030{
3031 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003032
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003033 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003034 return -EBADF;
3035
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003036 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003037 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003038
3039 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003040 if (!entry)
3041 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003042
3043 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003044 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003045
3046 list_add(&entry->list, &hdev->blacklist);
3047
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003048 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003049}
3050
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003051int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003052{
3053 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003054
Johan Hedberg35f74982014-02-18 17:14:32 +02003055 if (!bacmp(bdaddr, BDADDR_ANY)) {
3056 hci_blacklist_clear(hdev);
3057 return 0;
3058 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003059
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003060 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003061 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003062 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003063
3064 list_del(&entry->list);
3065 kfree(entry);
3066
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003067 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003068}
3069
Andre Guedes15819a72014-02-03 13:56:18 -03003070/* This function requires the caller holds hdev->lock */
3071struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3072 bdaddr_t *addr, u8 addr_type)
3073{
3074 struct hci_conn_params *params;
3075
3076 list_for_each_entry(params, &hdev->le_conn_params, list) {
3077 if (bacmp(&params->addr, addr) == 0 &&
3078 params->addr_type == addr_type) {
3079 return params;
3080 }
3081 }
3082
3083 return NULL;
3084}
3085
3086/* This function requires the caller holds hdev->lock */
3087void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3088 u16 conn_min_interval, u16 conn_max_interval)
3089{
3090 struct hci_conn_params *params;
3091
3092 params = hci_conn_params_lookup(hdev, addr, addr_type);
3093 if (params) {
3094 params->conn_min_interval = conn_min_interval;
3095 params->conn_max_interval = conn_max_interval;
3096 return;
3097 }
3098
3099 params = kzalloc(sizeof(*params), GFP_KERNEL);
3100 if (!params) {
3101 BT_ERR("Out of memory");
3102 return;
3103 }
3104
3105 bacpy(&params->addr, addr);
3106 params->addr_type = addr_type;
3107 params->conn_min_interval = conn_min_interval;
3108 params->conn_max_interval = conn_max_interval;
3109
3110 list_add(&params->list, &hdev->le_conn_params);
3111
3112 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3113 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3114 conn_max_interval);
3115}
3116
3117/* This function requires the caller holds hdev->lock */
3118void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3119{
3120 struct hci_conn_params *params;
3121
3122 params = hci_conn_params_lookup(hdev, addr, addr_type);
3123 if (!params)
3124 return;
3125
3126 list_del(&params->list);
3127 kfree(params);
3128
3129 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3130}
3131
3132/* This function requires the caller holds hdev->lock */
3133void hci_conn_params_clear(struct hci_dev *hdev)
3134{
3135 struct hci_conn_params *params, *tmp;
3136
3137 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3138 list_del(&params->list);
3139 kfree(params);
3140 }
3141
3142 BT_DBG("All LE connection parameters were removed");
3143}
3144
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003145static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003146{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003147 if (status) {
3148 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003149
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003150 hci_dev_lock(hdev);
3151 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3152 hci_dev_unlock(hdev);
3153 return;
3154 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003155}
3156
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003157static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003158{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003159 /* General inquiry access code (GIAC) */
3160 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3161 struct hci_request req;
3162 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003163 int err;
3164
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003165 if (status) {
3166 BT_ERR("Failed to disable LE scanning: status %d", status);
3167 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003168 }
3169
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003170 switch (hdev->discovery.type) {
3171 case DISCOV_TYPE_LE:
3172 hci_dev_lock(hdev);
3173 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3174 hci_dev_unlock(hdev);
3175 break;
3176
3177 case DISCOV_TYPE_INTERLEAVED:
3178 hci_req_init(&req, hdev);
3179
3180 memset(&cp, 0, sizeof(cp));
3181 memcpy(&cp.lap, lap, sizeof(cp.lap));
3182 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3183 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3184
3185 hci_dev_lock(hdev);
3186
3187 hci_inquiry_cache_flush(hdev);
3188
3189 err = hci_req_run(&req, inquiry_complete);
3190 if (err) {
3191 BT_ERR("Inquiry request failed: err %d", err);
3192 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3193 }
3194
3195 hci_dev_unlock(hdev);
3196 break;
3197 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003198}
3199
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003200static void le_scan_disable_work(struct work_struct *work)
3201{
3202 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003203 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003204 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003205 struct hci_request req;
3206 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003207
3208 BT_DBG("%s", hdev->name);
3209
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003210 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003211
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003212 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003213 cp.enable = LE_SCAN_DISABLE;
3214 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003215
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003216 err = hci_req_run(&req, le_scan_disable_work_complete);
3217 if (err)
3218 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003219}
3220
David Herrmann9be0dab2012-04-22 14:39:57 +02003221/* Alloc HCI device */
3222struct hci_dev *hci_alloc_dev(void)
3223{
3224 struct hci_dev *hdev;
3225
3226 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3227 if (!hdev)
3228 return NULL;
3229
David Herrmannb1b813d2012-04-22 14:39:58 +02003230 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3231 hdev->esco_type = (ESCO_HV1);
3232 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003233 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3234 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003235 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3236 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003237
David Herrmannb1b813d2012-04-22 14:39:58 +02003238 hdev->sniff_max_interval = 800;
3239 hdev->sniff_min_interval = 80;
3240
Marcel Holtmannbef64732013-10-11 08:23:19 -07003241 hdev->le_scan_interval = 0x0060;
3242 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003243 hdev->le_conn_min_interval = 0x0028;
3244 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003245
David Herrmannb1b813d2012-04-22 14:39:58 +02003246 mutex_init(&hdev->lock);
3247 mutex_init(&hdev->req_lock);
3248
3249 INIT_LIST_HEAD(&hdev->mgmt_pending);
3250 INIT_LIST_HEAD(&hdev->blacklist);
3251 INIT_LIST_HEAD(&hdev->uuids);
3252 INIT_LIST_HEAD(&hdev->link_keys);
3253 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003254 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003255 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003256 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003257 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003258
3259 INIT_WORK(&hdev->rx_work, hci_rx_work);
3260 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3261 INIT_WORK(&hdev->tx_work, hci_tx_work);
3262 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003263
David Herrmannb1b813d2012-04-22 14:39:58 +02003264 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3265 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3266 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3267
David Herrmannb1b813d2012-04-22 14:39:58 +02003268 skb_queue_head_init(&hdev->rx_q);
3269 skb_queue_head_init(&hdev->cmd_q);
3270 skb_queue_head_init(&hdev->raw_q);
3271
3272 init_waitqueue_head(&hdev->req_wait_q);
3273
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003274 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003275
David Herrmannb1b813d2012-04-22 14:39:58 +02003276 hci_init_sysfs(hdev);
3277 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003278
3279 return hdev;
3280}
3281EXPORT_SYMBOL(hci_alloc_dev);
3282
3283/* Free HCI device */
3284void hci_free_dev(struct hci_dev *hdev)
3285{
David Herrmann9be0dab2012-04-22 14:39:57 +02003286 /* will free via device release */
3287 put_device(&hdev->dev);
3288}
3289EXPORT_SYMBOL(hci_free_dev);
3290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291/* Register HCI device */
3292int hci_register_dev(struct hci_dev *hdev)
3293{
David Herrmannb1b813d2012-04-22 14:39:58 +02003294 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295
David Herrmann010666a2012-01-07 15:47:07 +01003296 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 return -EINVAL;
3298
Mat Martineau08add512011-11-02 16:18:36 -07003299 /* Do not allow HCI_AMP devices to register at index 0,
3300 * so the index can be used as the AMP controller ID.
3301 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003302 switch (hdev->dev_type) {
3303 case HCI_BREDR:
3304 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3305 break;
3306 case HCI_AMP:
3307 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3308 break;
3309 default:
3310 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003312
Sasha Levin3df92b32012-05-27 22:36:56 +02003313 if (id < 0)
3314 return id;
3315
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 sprintf(hdev->name, "hci%d", id);
3317 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003318
3319 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3320
Kees Cookd8537542013-07-03 15:04:57 -07003321 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3322 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003323 if (!hdev->workqueue) {
3324 error = -ENOMEM;
3325 goto err;
3326 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003327
Kees Cookd8537542013-07-03 15:04:57 -07003328 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3329 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003330 if (!hdev->req_workqueue) {
3331 destroy_workqueue(hdev->workqueue);
3332 error = -ENOMEM;
3333 goto err;
3334 }
3335
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003336 if (!IS_ERR_OR_NULL(bt_debugfs))
3337 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3338
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003339 dev_set_name(&hdev->dev, "%s", hdev->name);
3340
Johan Hedberg99780a72014-02-18 10:40:07 +02003341 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3342 CRYPTO_ALG_ASYNC);
3343 if (IS_ERR(hdev->tfm_aes)) {
3344 BT_ERR("Unable to create crypto context");
3345 error = PTR_ERR(hdev->tfm_aes);
3346 hdev->tfm_aes = NULL;
3347 goto err_wqueue;
3348 }
3349
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003350 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003351 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003352 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003354 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003355 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3356 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003357 if (hdev->rfkill) {
3358 if (rfkill_register(hdev->rfkill) < 0) {
3359 rfkill_destroy(hdev->rfkill);
3360 hdev->rfkill = NULL;
3361 }
3362 }
3363
Johan Hedberg5e130362013-09-13 08:58:17 +03003364 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3365 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3366
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003367 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003368 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003369
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003370 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003371 /* Assume BR/EDR support until proven otherwise (such as
3372 * through reading supported features during init.
3373 */
3374 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3375 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003376
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003377 write_lock(&hci_dev_list_lock);
3378 list_add(&hdev->list, &hci_dev_list);
3379 write_unlock(&hci_dev_list_lock);
3380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003382 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
Johan Hedberg19202572013-01-14 22:33:51 +02003384 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003387
Johan Hedberg99780a72014-02-18 10:40:07 +02003388err_tfm:
3389 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003390err_wqueue:
3391 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003392 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003393err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003394 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003395
David Herrmann33ca9542011-10-08 14:58:49 +02003396 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397}
3398EXPORT_SYMBOL(hci_register_dev);
3399
3400/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003401void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
Sasha Levin3df92b32012-05-27 22:36:56 +02003403 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003404
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003405 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406
Johan Hovold94324962012-03-15 14:48:41 +01003407 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3408
Sasha Levin3df92b32012-05-27 22:36:56 +02003409 id = hdev->id;
3410
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003411 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003413 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
3415 hci_dev_do_close(hdev);
3416
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303417 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003418 kfree_skb(hdev->reassembly[i]);
3419
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003420 cancel_work_sync(&hdev->power_on);
3421
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003422 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003423 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003424 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003425 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003426 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003427 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003428
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003429 /* mgmt_index_removed should take care of emptying the
3430 * pending list */
3431 BUG_ON(!list_empty(&hdev->mgmt_pending));
3432
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 hci_notify(hdev, HCI_DEV_UNREG);
3434
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003435 if (hdev->rfkill) {
3436 rfkill_unregister(hdev->rfkill);
3437 rfkill_destroy(hdev->rfkill);
3438 }
3439
Johan Hedberg99780a72014-02-18 10:40:07 +02003440 if (hdev->tfm_aes)
3441 crypto_free_blkcipher(hdev->tfm_aes);
3442
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003443 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003444
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003445 debugfs_remove_recursive(hdev->debugfs);
3446
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003447 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003448 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003449
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003450 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003451 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003452 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003453 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003454 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003455 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003456 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003457 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003458 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003459
David Herrmanndc946bd2012-01-07 15:47:24 +01003460 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003461
3462 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463}
3464EXPORT_SYMBOL(hci_unregister_dev);
3465
3466/* Suspend HCI device */
3467int hci_suspend_dev(struct hci_dev *hdev)
3468{
3469 hci_notify(hdev, HCI_DEV_SUSPEND);
3470 return 0;
3471}
3472EXPORT_SYMBOL(hci_suspend_dev);
3473
3474/* Resume HCI device */
3475int hci_resume_dev(struct hci_dev *hdev)
3476{
3477 hci_notify(hdev, HCI_DEV_RESUME);
3478 return 0;
3479}
3480EXPORT_SYMBOL(hci_resume_dev);
3481
Marcel Holtmann76bca882009-11-18 00:40:39 +01003482/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003483int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003484{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003485 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003486 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003487 kfree_skb(skb);
3488 return -ENXIO;
3489 }
3490
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003491 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003492 bt_cb(skb)->incoming = 1;
3493
3494 /* Time stamp */
3495 __net_timestamp(skb);
3496
Marcel Holtmann76bca882009-11-18 00:40:39 +01003497 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003498 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003499
Marcel Holtmann76bca882009-11-18 00:40:39 +01003500 return 0;
3501}
3502EXPORT_SYMBOL(hci_recv_frame);
3503
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303504static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003505 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303506{
3507 int len = 0;
3508 int hlen = 0;
3509 int remain = count;
3510 struct sk_buff *skb;
3511 struct bt_skb_cb *scb;
3512
3513 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003514 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303515 return -EILSEQ;
3516
3517 skb = hdev->reassembly[index];
3518
3519 if (!skb) {
3520 switch (type) {
3521 case HCI_ACLDATA_PKT:
3522 len = HCI_MAX_FRAME_SIZE;
3523 hlen = HCI_ACL_HDR_SIZE;
3524 break;
3525 case HCI_EVENT_PKT:
3526 len = HCI_MAX_EVENT_SIZE;
3527 hlen = HCI_EVENT_HDR_SIZE;
3528 break;
3529 case HCI_SCODATA_PKT:
3530 len = HCI_MAX_SCO_SIZE;
3531 hlen = HCI_SCO_HDR_SIZE;
3532 break;
3533 }
3534
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003535 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303536 if (!skb)
3537 return -ENOMEM;
3538
3539 scb = (void *) skb->cb;
3540 scb->expect = hlen;
3541 scb->pkt_type = type;
3542
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303543 hdev->reassembly[index] = skb;
3544 }
3545
3546 while (count) {
3547 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003548 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303549
3550 memcpy(skb_put(skb, len), data, len);
3551
3552 count -= len;
3553 data += len;
3554 scb->expect -= len;
3555 remain = count;
3556
3557 switch (type) {
3558 case HCI_EVENT_PKT:
3559 if (skb->len == HCI_EVENT_HDR_SIZE) {
3560 struct hci_event_hdr *h = hci_event_hdr(skb);
3561 scb->expect = h->plen;
3562
3563 if (skb_tailroom(skb) < scb->expect) {
3564 kfree_skb(skb);
3565 hdev->reassembly[index] = NULL;
3566 return -ENOMEM;
3567 }
3568 }
3569 break;
3570
3571 case HCI_ACLDATA_PKT:
3572 if (skb->len == HCI_ACL_HDR_SIZE) {
3573 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3574 scb->expect = __le16_to_cpu(h->dlen);
3575
3576 if (skb_tailroom(skb) < scb->expect) {
3577 kfree_skb(skb);
3578 hdev->reassembly[index] = NULL;
3579 return -ENOMEM;
3580 }
3581 }
3582 break;
3583
3584 case HCI_SCODATA_PKT:
3585 if (skb->len == HCI_SCO_HDR_SIZE) {
3586 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3587 scb->expect = h->dlen;
3588
3589 if (skb_tailroom(skb) < scb->expect) {
3590 kfree_skb(skb);
3591 hdev->reassembly[index] = NULL;
3592 return -ENOMEM;
3593 }
3594 }
3595 break;
3596 }
3597
3598 if (scb->expect == 0) {
3599 /* Complete frame */
3600
3601 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003602 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303603
3604 hdev->reassembly[index] = NULL;
3605 return remain;
3606 }
3607 }
3608
3609 return remain;
3610}
3611
Marcel Holtmannef222012007-07-11 06:42:04 +02003612int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3613{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303614 int rem = 0;
3615
Marcel Holtmannef222012007-07-11 06:42:04 +02003616 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3617 return -EILSEQ;
3618
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003619 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003620 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303621 if (rem < 0)
3622 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003623
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303624 data += (count - rem);
3625 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003626 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003627
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303628 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003629}
3630EXPORT_SYMBOL(hci_recv_fragment);
3631
Suraj Sumangala99811512010-07-14 13:02:19 +05303632#define STREAM_REASSEMBLY 0
3633
3634int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3635{
3636 int type;
3637 int rem = 0;
3638
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003639 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303640 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3641
3642 if (!skb) {
3643 struct { char type; } *pkt;
3644
3645 /* Start of the frame */
3646 pkt = data;
3647 type = pkt->type;
3648
3649 data++;
3650 count--;
3651 } else
3652 type = bt_cb(skb)->pkt_type;
3653
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003654 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003655 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303656 if (rem < 0)
3657 return rem;
3658
3659 data += (count - rem);
3660 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003661 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303662
3663 return rem;
3664}
3665EXPORT_SYMBOL(hci_recv_stream_fragment);
3666
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667/* ---- Interface to upper protocols ---- */
3668
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669int hci_register_cb(struct hci_cb *cb)
3670{
3671 BT_DBG("%p name %s", cb, cb->name);
3672
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003673 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003675 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676
3677 return 0;
3678}
3679EXPORT_SYMBOL(hci_register_cb);
3680
3681int hci_unregister_cb(struct hci_cb *cb)
3682{
3683 BT_DBG("%p name %s", cb, cb->name);
3684
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003685 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003687 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688
3689 return 0;
3690}
3691EXPORT_SYMBOL(hci_unregister_cb);
3692
Marcel Holtmann51086992013-10-10 14:54:19 -07003693static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003695 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003697 /* Time stamp */
3698 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003700 /* Send copy to monitor */
3701 hci_send_to_monitor(hdev, skb);
3702
3703 if (atomic_read(&hdev->promisc)) {
3704 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003705 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 }
3707
3708 /* Get rid of skb owner, prior to sending to the driver. */
3709 skb_orphan(skb);
3710
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003711 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003712 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713}
3714
Johan Hedberg3119ae92013-03-05 20:37:44 +02003715void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3716{
3717 skb_queue_head_init(&req->cmd_q);
3718 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003719 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003720}
3721
3722int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3723{
3724 struct hci_dev *hdev = req->hdev;
3725 struct sk_buff *skb;
3726 unsigned long flags;
3727
3728 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3729
Andre Guedes5d73e032013-03-08 11:20:16 -03003730 /* If an error occured during request building, remove all HCI
3731 * commands queued on the HCI request queue.
3732 */
3733 if (req->err) {
3734 skb_queue_purge(&req->cmd_q);
3735 return req->err;
3736 }
3737
Johan Hedberg3119ae92013-03-05 20:37:44 +02003738 /* Do not allow empty requests */
3739 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003740 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003741
3742 skb = skb_peek_tail(&req->cmd_q);
3743 bt_cb(skb)->req.complete = complete;
3744
3745 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3746 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3747 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3748
3749 queue_work(hdev->workqueue, &hdev->cmd_work);
3750
3751 return 0;
3752}
3753
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003754static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003755 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756{
3757 int len = HCI_COMMAND_HDR_SIZE + plen;
3758 struct hci_command_hdr *hdr;
3759 struct sk_buff *skb;
3760
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003762 if (!skb)
3763 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764
3765 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003766 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 hdr->plen = plen;
3768
3769 if (plen)
3770 memcpy(skb_put(skb, plen), param, plen);
3771
3772 BT_DBG("skb len %d", skb->len);
3773
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003774 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003775
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003776 return skb;
3777}
3778
3779/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003780int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3781 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003782{
3783 struct sk_buff *skb;
3784
3785 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3786
3787 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3788 if (!skb) {
3789 BT_ERR("%s no memory for command", hdev->name);
3790 return -ENOMEM;
3791 }
3792
Johan Hedberg11714b32013-03-05 20:37:47 +02003793 /* Stand-alone HCI commands must be flaged as
3794 * single-command requests.
3795 */
3796 bt_cb(skb)->req.start = true;
3797
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003799 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800
3801 return 0;
3802}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803
Johan Hedberg71c76a12013-03-05 20:37:46 +02003804/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003805void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3806 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003807{
3808 struct hci_dev *hdev = req->hdev;
3809 struct sk_buff *skb;
3810
3811 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3812
Andre Guedes34739c12013-03-08 11:20:18 -03003813 /* If an error occured during request building, there is no point in
3814 * queueing the HCI command. We can simply return.
3815 */
3816 if (req->err)
3817 return;
3818
Johan Hedberg71c76a12013-03-05 20:37:46 +02003819 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3820 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003821 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3822 hdev->name, opcode);
3823 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003824 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003825 }
3826
3827 if (skb_queue_empty(&req->cmd_q))
3828 bt_cb(skb)->req.start = true;
3829
Johan Hedberg02350a72013-04-03 21:50:29 +03003830 bt_cb(skb)->req.event = event;
3831
Johan Hedberg71c76a12013-03-05 20:37:46 +02003832 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003833}
3834
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003835void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3836 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003837{
3838 hci_req_add_ev(req, opcode, plen, param, 0);
3839}
3840
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003842void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843{
3844 struct hci_command_hdr *hdr;
3845
3846 if (!hdev->sent_cmd)
3847 return NULL;
3848
3849 hdr = (void *) hdev->sent_cmd->data;
3850
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003851 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852 return NULL;
3853
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003854 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855
3856 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3857}
3858
3859/* Send ACL data */
3860static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3861{
3862 struct hci_acl_hdr *hdr;
3863 int len = skb->len;
3864
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003865 skb_push(skb, HCI_ACL_HDR_SIZE);
3866 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003867 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003868 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3869 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870}
3871
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003872static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003873 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003875 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 struct hci_dev *hdev = conn->hdev;
3877 struct sk_buff *list;
3878
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003879 skb->len = skb_headlen(skb);
3880 skb->data_len = 0;
3881
3882 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003883
3884 switch (hdev->dev_type) {
3885 case HCI_BREDR:
3886 hci_add_acl_hdr(skb, conn->handle, flags);
3887 break;
3888 case HCI_AMP:
3889 hci_add_acl_hdr(skb, chan->handle, flags);
3890 break;
3891 default:
3892 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3893 return;
3894 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003895
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003896 list = skb_shinfo(skb)->frag_list;
3897 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 /* Non fragmented */
3899 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3900
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003901 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902 } else {
3903 /* Fragmented */
3904 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3905
3906 skb_shinfo(skb)->frag_list = NULL;
3907
3908 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003909 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003911 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003912
3913 flags &= ~ACL_START;
3914 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 do {
3916 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003917
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003918 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003919 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920
3921 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3922
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003923 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 } while (list);
3925
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003926 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003928}
3929
3930void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3931{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003932 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003933
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003934 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003935
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003936 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003938 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940
3941/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003942void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943{
3944 struct hci_dev *hdev = conn->hdev;
3945 struct hci_sco_hdr hdr;
3946
3947 BT_DBG("%s len %d", hdev->name, skb->len);
3948
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003949 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 hdr.dlen = skb->len;
3951
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003952 skb_push(skb, HCI_SCO_HDR_SIZE);
3953 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003954 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003956 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003957
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003959 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961
3962/* ---- HCI TX task (outgoing data) ---- */
3963
3964/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003965static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3966 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967{
3968 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003969 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003970 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003972 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003974
3975 rcu_read_lock();
3976
3977 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003978 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003980
3981 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3982 continue;
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 num++;
3985
3986 if (c->sent < min) {
3987 min = c->sent;
3988 conn = c;
3989 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003990
3991 if (hci_conn_num(hdev, type) == num)
3992 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 }
3994
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003995 rcu_read_unlock();
3996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003998 int cnt, q;
3999
4000 switch (conn->type) {
4001 case ACL_LINK:
4002 cnt = hdev->acl_cnt;
4003 break;
4004 case SCO_LINK:
4005 case ESCO_LINK:
4006 cnt = hdev->sco_cnt;
4007 break;
4008 case LE_LINK:
4009 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4010 break;
4011 default:
4012 cnt = 0;
4013 BT_ERR("Unknown link type");
4014 }
4015
4016 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 *quote = q ? q : 1;
4018 } else
4019 *quote = 0;
4020
4021 BT_DBG("conn %p quote %d", conn, *quote);
4022 return conn;
4023}
4024
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004025static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026{
4027 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004028 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
Ville Tervobae1f5d92011-02-10 22:38:53 -03004030 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004032 rcu_read_lock();
4033
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004035 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004036 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004037 BT_ERR("%s killing stalled connection %pMR",
4038 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004039 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040 }
4041 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004042
4043 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044}
4045
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004046static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4047 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004048{
4049 struct hci_conn_hash *h = &hdev->conn_hash;
4050 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004051 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004052 struct hci_conn *conn;
4053 int cnt, q, conn_num = 0;
4054
4055 BT_DBG("%s", hdev->name);
4056
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004057 rcu_read_lock();
4058
4059 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004060 struct hci_chan *tmp;
4061
4062 if (conn->type != type)
4063 continue;
4064
4065 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4066 continue;
4067
4068 conn_num++;
4069
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004070 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004071 struct sk_buff *skb;
4072
4073 if (skb_queue_empty(&tmp->data_q))
4074 continue;
4075
4076 skb = skb_peek(&tmp->data_q);
4077 if (skb->priority < cur_prio)
4078 continue;
4079
4080 if (skb->priority > cur_prio) {
4081 num = 0;
4082 min = ~0;
4083 cur_prio = skb->priority;
4084 }
4085
4086 num++;
4087
4088 if (conn->sent < min) {
4089 min = conn->sent;
4090 chan = tmp;
4091 }
4092 }
4093
4094 if (hci_conn_num(hdev, type) == conn_num)
4095 break;
4096 }
4097
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004098 rcu_read_unlock();
4099
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004100 if (!chan)
4101 return NULL;
4102
4103 switch (chan->conn->type) {
4104 case ACL_LINK:
4105 cnt = hdev->acl_cnt;
4106 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004107 case AMP_LINK:
4108 cnt = hdev->block_cnt;
4109 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004110 case SCO_LINK:
4111 case ESCO_LINK:
4112 cnt = hdev->sco_cnt;
4113 break;
4114 case LE_LINK:
4115 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4116 break;
4117 default:
4118 cnt = 0;
4119 BT_ERR("Unknown link type");
4120 }
4121
4122 q = cnt / num;
4123 *quote = q ? q : 1;
4124 BT_DBG("chan %p quote %d", chan, *quote);
4125 return chan;
4126}
4127
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004128static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4129{
4130 struct hci_conn_hash *h = &hdev->conn_hash;
4131 struct hci_conn *conn;
4132 int num = 0;
4133
4134 BT_DBG("%s", hdev->name);
4135
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004136 rcu_read_lock();
4137
4138 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004139 struct hci_chan *chan;
4140
4141 if (conn->type != type)
4142 continue;
4143
4144 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4145 continue;
4146
4147 num++;
4148
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004149 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004150 struct sk_buff *skb;
4151
4152 if (chan->sent) {
4153 chan->sent = 0;
4154 continue;
4155 }
4156
4157 if (skb_queue_empty(&chan->data_q))
4158 continue;
4159
4160 skb = skb_peek(&chan->data_q);
4161 if (skb->priority >= HCI_PRIO_MAX - 1)
4162 continue;
4163
4164 skb->priority = HCI_PRIO_MAX - 1;
4165
4166 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004167 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004168 }
4169
4170 if (hci_conn_num(hdev, type) == num)
4171 break;
4172 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004173
4174 rcu_read_unlock();
4175
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004176}
4177
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004178static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4179{
4180 /* Calculate count of blocks used by this packet */
4181 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4182}
4183
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004184static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 if (!test_bit(HCI_RAW, &hdev->flags)) {
4187 /* ACL tx timeout must be longer than maximum
4188 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004189 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004190 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004191 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004193}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004195static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004196{
4197 unsigned int cnt = hdev->acl_cnt;
4198 struct hci_chan *chan;
4199 struct sk_buff *skb;
4200 int quote;
4201
4202 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004203
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004204 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004205 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004206 u32 priority = (skb_peek(&chan->data_q))->priority;
4207 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004208 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004209 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004210
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004211 /* Stop if priority has changed */
4212 if (skb->priority < priority)
4213 break;
4214
4215 skb = skb_dequeue(&chan->data_q);
4216
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004217 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004218 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004219
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004220 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 hdev->acl_last_tx = jiffies;
4222
4223 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004224 chan->sent++;
4225 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 }
4227 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004228
4229 if (cnt != hdev->acl_cnt)
4230 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231}
4232
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004233static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004234{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004235 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004236 struct hci_chan *chan;
4237 struct sk_buff *skb;
4238 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004239 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004240
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004241 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004242
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004243 BT_DBG("%s", hdev->name);
4244
4245 if (hdev->dev_type == HCI_AMP)
4246 type = AMP_LINK;
4247 else
4248 type = ACL_LINK;
4249
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004250 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004251 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004252 u32 priority = (skb_peek(&chan->data_q))->priority;
4253 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4254 int blocks;
4255
4256 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004257 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004258
4259 /* Stop if priority has changed */
4260 if (skb->priority < priority)
4261 break;
4262
4263 skb = skb_dequeue(&chan->data_q);
4264
4265 blocks = __get_blocks(hdev, skb);
4266 if (blocks > hdev->block_cnt)
4267 return;
4268
4269 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004270 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004271
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004272 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004273 hdev->acl_last_tx = jiffies;
4274
4275 hdev->block_cnt -= blocks;
4276 quote -= blocks;
4277
4278 chan->sent += blocks;
4279 chan->conn->sent += blocks;
4280 }
4281 }
4282
4283 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004284 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004285}
4286
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004287static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004288{
4289 BT_DBG("%s", hdev->name);
4290
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004291 /* No ACL link over BR/EDR controller */
4292 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4293 return;
4294
4295 /* No AMP link over AMP controller */
4296 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004297 return;
4298
4299 switch (hdev->flow_ctl_mode) {
4300 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4301 hci_sched_acl_pkt(hdev);
4302 break;
4303
4304 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4305 hci_sched_acl_blk(hdev);
4306 break;
4307 }
4308}
4309
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004311static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312{
4313 struct hci_conn *conn;
4314 struct sk_buff *skb;
4315 int quote;
4316
4317 BT_DBG("%s", hdev->name);
4318
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004319 if (!hci_conn_num(hdev, SCO_LINK))
4320 return;
4321
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4323 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4324 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004325 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326
4327 conn->sent++;
4328 if (conn->sent == ~0)
4329 conn->sent = 0;
4330 }
4331 }
4332}
4333
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004334static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004335{
4336 struct hci_conn *conn;
4337 struct sk_buff *skb;
4338 int quote;
4339
4340 BT_DBG("%s", hdev->name);
4341
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004342 if (!hci_conn_num(hdev, ESCO_LINK))
4343 return;
4344
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004345 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4346 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004347 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4348 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004349 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004350
4351 conn->sent++;
4352 if (conn->sent == ~0)
4353 conn->sent = 0;
4354 }
4355 }
4356}
4357
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004358static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004359{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004360 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004361 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004362 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004363
4364 BT_DBG("%s", hdev->name);
4365
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004366 if (!hci_conn_num(hdev, LE_LINK))
4367 return;
4368
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004369 if (!test_bit(HCI_RAW, &hdev->flags)) {
4370 /* LE tx timeout must be longer than maximum
4371 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004372 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004373 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004374 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004375 }
4376
4377 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004378 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004379 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004380 u32 priority = (skb_peek(&chan->data_q))->priority;
4381 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004382 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004383 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004384
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004385 /* Stop if priority has changed */
4386 if (skb->priority < priority)
4387 break;
4388
4389 skb = skb_dequeue(&chan->data_q);
4390
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004391 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004392 hdev->le_last_tx = jiffies;
4393
4394 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004395 chan->sent++;
4396 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004397 }
4398 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004399
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004400 if (hdev->le_pkts)
4401 hdev->le_cnt = cnt;
4402 else
4403 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004404
4405 if (cnt != tmp)
4406 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004407}
4408
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004409static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004411 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 struct sk_buff *skb;
4413
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004414 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004415 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416
Marcel Holtmann52de5992013-09-03 18:08:38 -07004417 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4418 /* Schedule queues and send stuff to HCI driver */
4419 hci_sched_acl(hdev);
4420 hci_sched_sco(hdev);
4421 hci_sched_esco(hdev);
4422 hci_sched_le(hdev);
4423 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004424
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 /* Send next queued raw (unknown type) packet */
4426 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004427 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428}
4429
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004430/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431
4432/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004433static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
4435 struct hci_acl_hdr *hdr = (void *) skb->data;
4436 struct hci_conn *conn;
4437 __u16 handle, flags;
4438
4439 skb_pull(skb, HCI_ACL_HDR_SIZE);
4440
4441 handle = __le16_to_cpu(hdr->handle);
4442 flags = hci_flags(handle);
4443 handle = hci_handle(handle);
4444
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004445 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004446 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447
4448 hdev->stat.acl_rx++;
4449
4450 hci_dev_lock(hdev);
4451 conn = hci_conn_hash_lookup_handle(hdev, handle);
4452 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004453
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004455 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004456
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004458 l2cap_recv_acldata(conn, skb, flags);
4459 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004461 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004462 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 }
4464
4465 kfree_skb(skb);
4466}
4467
4468/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004469static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470{
4471 struct hci_sco_hdr *hdr = (void *) skb->data;
4472 struct hci_conn *conn;
4473 __u16 handle;
4474
4475 skb_pull(skb, HCI_SCO_HDR_SIZE);
4476
4477 handle = __le16_to_cpu(hdr->handle);
4478
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004479 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480
4481 hdev->stat.sco_rx++;
4482
4483 hci_dev_lock(hdev);
4484 conn = hci_conn_hash_lookup_handle(hdev, handle);
4485 hci_dev_unlock(hdev);
4486
4487 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004489 sco_recv_scodata(conn, skb);
4490 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004492 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004493 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 }
4495
4496 kfree_skb(skb);
4497}
4498
Johan Hedberg9238f362013-03-05 20:37:48 +02004499static bool hci_req_is_complete(struct hci_dev *hdev)
4500{
4501 struct sk_buff *skb;
4502
4503 skb = skb_peek(&hdev->cmd_q);
4504 if (!skb)
4505 return true;
4506
4507 return bt_cb(skb)->req.start;
4508}
4509
Johan Hedberg42c6b122013-03-05 20:37:49 +02004510static void hci_resend_last(struct hci_dev *hdev)
4511{
4512 struct hci_command_hdr *sent;
4513 struct sk_buff *skb;
4514 u16 opcode;
4515
4516 if (!hdev->sent_cmd)
4517 return;
4518
4519 sent = (void *) hdev->sent_cmd->data;
4520 opcode = __le16_to_cpu(sent->opcode);
4521 if (opcode == HCI_OP_RESET)
4522 return;
4523
4524 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4525 if (!skb)
4526 return;
4527
4528 skb_queue_head(&hdev->cmd_q, skb);
4529 queue_work(hdev->workqueue, &hdev->cmd_work);
4530}
4531
Johan Hedberg9238f362013-03-05 20:37:48 +02004532void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4533{
4534 hci_req_complete_t req_complete = NULL;
4535 struct sk_buff *skb;
4536 unsigned long flags;
4537
4538 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4539
Johan Hedberg42c6b122013-03-05 20:37:49 +02004540 /* If the completed command doesn't match the last one that was
4541 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004542 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004543 if (!hci_sent_cmd_data(hdev, opcode)) {
4544 /* Some CSR based controllers generate a spontaneous
4545 * reset complete event during init and any pending
4546 * command will never be completed. In such a case we
4547 * need to resend whatever was the last sent
4548 * command.
4549 */
4550 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4551 hci_resend_last(hdev);
4552
Johan Hedberg9238f362013-03-05 20:37:48 +02004553 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004554 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004555
4556 /* If the command succeeded and there's still more commands in
4557 * this request the request is not yet complete.
4558 */
4559 if (!status && !hci_req_is_complete(hdev))
4560 return;
4561
4562 /* If this was the last command in a request the complete
4563 * callback would be found in hdev->sent_cmd instead of the
4564 * command queue (hdev->cmd_q).
4565 */
4566 if (hdev->sent_cmd) {
4567 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004568
4569 if (req_complete) {
4570 /* We must set the complete callback to NULL to
4571 * avoid calling the callback more than once if
4572 * this function gets called again.
4573 */
4574 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4575
Johan Hedberg9238f362013-03-05 20:37:48 +02004576 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004577 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004578 }
4579
4580 /* Remove all pending commands belonging to this request */
4581 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4582 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4583 if (bt_cb(skb)->req.start) {
4584 __skb_queue_head(&hdev->cmd_q, skb);
4585 break;
4586 }
4587
4588 req_complete = bt_cb(skb)->req.complete;
4589 kfree_skb(skb);
4590 }
4591 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4592
4593call_complete:
4594 if (req_complete)
4595 req_complete(hdev, status);
4596}
4597
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004598static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004600 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 struct sk_buff *skb;
4602
4603 BT_DBG("%s", hdev->name);
4604
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004606 /* Send copy to monitor */
4607 hci_send_to_monitor(hdev, skb);
4608
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 if (atomic_read(&hdev->promisc)) {
4610 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004611 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 }
4613
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004614 if (test_bit(HCI_RAW, &hdev->flags) ||
4615 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 kfree_skb(skb);
4617 continue;
4618 }
4619
4620 if (test_bit(HCI_INIT, &hdev->flags)) {
4621 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004622 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 case HCI_ACLDATA_PKT:
4624 case HCI_SCODATA_PKT:
4625 kfree_skb(skb);
4626 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 }
4629
4630 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004631 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004633 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634 hci_event_packet(hdev, skb);
4635 break;
4636
4637 case HCI_ACLDATA_PKT:
4638 BT_DBG("%s ACL data packet", hdev->name);
4639 hci_acldata_packet(hdev, skb);
4640 break;
4641
4642 case HCI_SCODATA_PKT:
4643 BT_DBG("%s SCO data packet", hdev->name);
4644 hci_scodata_packet(hdev, skb);
4645 break;
4646
4647 default:
4648 kfree_skb(skb);
4649 break;
4650 }
4651 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652}
4653
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004654static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004656 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 struct sk_buff *skb;
4658
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004659 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4660 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004663 if (atomic_read(&hdev->cmd_cnt)) {
4664 skb = skb_dequeue(&hdev->cmd_q);
4665 if (!skb)
4666 return;
4667
Wei Yongjun7585b972009-02-25 18:29:52 +08004668 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004670 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004671 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004673 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004674 if (test_bit(HCI_RESET, &hdev->flags))
4675 del_timer(&hdev->cmd_timer);
4676 else
4677 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004678 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 } else {
4680 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004681 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 }
4683 }
4684}