blob: b5c8cb3c96d2f50e5f60d1f04d4f6a55f207edcb [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
196 u32 data0, data5;
197 u16 data1, data2, data3, data4;
198
199 data5 = get_unaligned_le32(uuid);
200 data4 = get_unaligned_le16(uuid + 4);
201 data3 = get_unaligned_le16(uuid + 6);
202 data2 = get_unaligned_le16(uuid + 8);
203 data1 = get_unaligned_le16(uuid + 10);
204 data0 = get_unaligned_le32(uuid + 12);
205
206 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
207 data0, data1, data2, data3, data4, data5);
208 }
209 hci_dev_unlock(hdev);
210
211 return 0;
212}
213
214static int uuids_open(struct inode *inode, struct file *file)
215{
216 return single_open(file, uuids_show, inode->i_private);
217}
218
219static const struct file_operations uuids_fops = {
220 .open = uuids_open,
221 .read = seq_read,
222 .llseek = seq_lseek,
223 .release = single_release,
224};
225
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700226static int inquiry_cache_show(struct seq_file *f, void *p)
227{
228 struct hci_dev *hdev = f->private;
229 struct discovery_state *cache = &hdev->discovery;
230 struct inquiry_entry *e;
231
232 hci_dev_lock(hdev);
233
234 list_for_each_entry(e, &cache->all, all) {
235 struct inquiry_data *data = &e->data;
236 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
237 &data->bdaddr,
238 data->pscan_rep_mode, data->pscan_period_mode,
239 data->pscan_mode, data->dev_class[2],
240 data->dev_class[1], data->dev_class[0],
241 __le16_to_cpu(data->clock_offset),
242 data->rssi, data->ssp_mode, e->timestamp);
243 }
244
245 hci_dev_unlock(hdev);
246
247 return 0;
248}
249
250static int inquiry_cache_open(struct inode *inode, struct file *file)
251{
252 return single_open(file, inquiry_cache_show, inode->i_private);
253}
254
255static const struct file_operations inquiry_cache_fops = {
256 .open = inquiry_cache_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700262static int link_keys_show(struct seq_file *f, void *ptr)
263{
264 struct hci_dev *hdev = f->private;
265 struct list_head *p, *n;
266
267 hci_dev_lock(hdev);
268 list_for_each_safe(p, n, &hdev->link_keys) {
269 struct link_key *key = list_entry(p, struct link_key, list);
270 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
271 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
272 }
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278static int link_keys_open(struct inode *inode, struct file *file)
279{
280 return single_open(file, link_keys_show, inode->i_private);
281}
282
283static const struct file_operations link_keys_fops = {
284 .open = link_keys_open,
285 .read = seq_read,
286 .llseek = seq_lseek,
287 .release = single_release,
288};
289
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700290static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
291 size_t count, loff_t *ppos)
292{
293 struct hci_dev *hdev = file->private_data;
294 char buf[3];
295
296 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
297 buf[1] = '\n';
298 buf[2] = '\0';
299 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
300}
301
302static const struct file_operations use_debug_keys_fops = {
303 .open = simple_open,
304 .read = use_debug_keys_read,
305 .llseek = default_llseek,
306};
307
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700308static int dev_class_show(struct seq_file *f, void *ptr)
309{
310 struct hci_dev *hdev = f->private;
311
312 hci_dev_lock(hdev);
313 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
314 hdev->dev_class[1], hdev->dev_class[0]);
315 hci_dev_unlock(hdev);
316
317 return 0;
318}
319
320static int dev_class_open(struct inode *inode, struct file *file)
321{
322 return single_open(file, dev_class_show, inode->i_private);
323}
324
325static const struct file_operations dev_class_fops = {
326 .open = dev_class_open,
327 .read = seq_read,
328 .llseek = seq_lseek,
329 .release = single_release,
330};
331
Marcel Holtmann041000b2013-10-17 12:02:31 -0700332static int voice_setting_get(void *data, u64 *val)
333{
334 struct hci_dev *hdev = data;
335
336 hci_dev_lock(hdev);
337 *val = hdev->voice_setting;
338 hci_dev_unlock(hdev);
339
340 return 0;
341}
342
343DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
344 NULL, "0x%4.4llx\n");
345
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700346static int auto_accept_delay_set(void *data, u64 val)
347{
348 struct hci_dev *hdev = data;
349
350 hci_dev_lock(hdev);
351 hdev->auto_accept_delay = val;
352 hci_dev_unlock(hdev);
353
354 return 0;
355}
356
357static int auto_accept_delay_get(void *data, u64 *val)
358{
359 struct hci_dev *hdev = data;
360
361 hci_dev_lock(hdev);
362 *val = hdev->auto_accept_delay;
363 hci_dev_unlock(hdev);
364
365 return 0;
366}
367
368DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
369 auto_accept_delay_set, "%llu\n");
370
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700371static int ssp_debug_mode_set(void *data, u64 val)
372{
373 struct hci_dev *hdev = data;
374 struct sk_buff *skb;
375 __u8 mode;
376 int err;
377
378 if (val != 0 && val != 1)
379 return -EINVAL;
380
381 if (!test_bit(HCI_UP, &hdev->flags))
382 return -ENETDOWN;
383
384 hci_req_lock(hdev);
385 mode = val;
386 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
387 &mode, HCI_CMD_TIMEOUT);
388 hci_req_unlock(hdev);
389
390 if (IS_ERR(skb))
391 return PTR_ERR(skb);
392
393 err = -bt_to_errno(skb->data[0]);
394 kfree_skb(skb);
395
396 if (err < 0)
397 return err;
398
399 hci_dev_lock(hdev);
400 hdev->ssp_debug_mode = val;
401 hci_dev_unlock(hdev);
402
403 return 0;
404}
405
406static int ssp_debug_mode_get(void *data, u64 *val)
407{
408 struct hci_dev *hdev = data;
409
410 hci_dev_lock(hdev);
411 *val = hdev->ssp_debug_mode;
412 hci_dev_unlock(hdev);
413
414 return 0;
415}
416
417DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
418 ssp_debug_mode_set, "%llu\n");
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
428 hdev->idle_timeout= val;
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
448static int sniff_min_interval_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
453 return -EINVAL;
454
455 hci_dev_lock(hdev);
456 hdev->sniff_min_interval= val;
457 hci_dev_unlock(hdev);
458
459 return 0;
460}
461
462static int sniff_min_interval_get(void *data, u64 *val)
463{
464 struct hci_dev *hdev = data;
465
466 hci_dev_lock(hdev);
467 *val = hdev->sniff_min_interval;
468 hci_dev_unlock(hdev);
469
470 return 0;
471}
472
473DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
474 sniff_min_interval_set, "%llu\n");
475
476static int sniff_max_interval_set(void *data, u64 val)
477{
478 struct hci_dev *hdev = data;
479
480 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
481 return -EINVAL;
482
483 hci_dev_lock(hdev);
484 hdev->sniff_max_interval= val;
485 hci_dev_unlock(hdev);
486
487 return 0;
488}
489
490static int sniff_max_interval_get(void *data, u64 *val)
491{
492 struct hci_dev *hdev = data;
493
494 hci_dev_lock(hdev);
495 *val = hdev->sniff_max_interval;
496 hci_dev_unlock(hdev);
497
498 return 0;
499}
500
501DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
502 sniff_max_interval_set, "%llu\n");
503
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700504static int static_address_show(struct seq_file *f, void *p)
505{
506 struct hci_dev *hdev = f->private;
507
508 hci_dev_lock(hdev);
509 seq_printf(f, "%pMR\n", &hdev->static_addr);
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int static_address_open(struct inode *inode, struct file *file)
516{
517 return single_open(file, static_address_show, inode->i_private);
518}
519
520static const struct file_operations static_address_fops = {
521 .open = static_address_open,
522 .read = seq_read,
523 .llseek = seq_lseek,
524 .release = single_release,
525};
526
Marcel Holtmann92202182013-10-18 16:38:10 -0700527static int own_address_type_set(void *data, u64 val)
528{
529 struct hci_dev *hdev = data;
530
531 if (val != 0 && val != 1)
532 return -EINVAL;
533
534 hci_dev_lock(hdev);
535 hdev->own_addr_type = val;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541static int own_address_type_get(void *data, u64 *val)
542{
543 struct hci_dev *hdev = data;
544
545 hci_dev_lock(hdev);
546 *val = hdev->own_addr_type;
547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
553 own_address_type_set, "%llu\n");
554
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700555static int long_term_keys_show(struct seq_file *f, void *ptr)
556{
557 struct hci_dev *hdev = f->private;
558 struct list_head *p, *n;
559
560 hci_dev_lock(hdev);
561 list_for_each_safe(p, n, &hdev->link_keys) {
562 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
563 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
564 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
565 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
566 8, ltk->rand, 16, ltk->val);
567 }
568 hci_dev_unlock(hdev);
569
570 return 0;
571}
572
573static int long_term_keys_open(struct inode *inode, struct file *file)
574{
575 return single_open(file, long_term_keys_show, inode->i_private);
576}
577
578static const struct file_operations long_term_keys_fops = {
579 .open = long_term_keys_open,
580 .read = seq_read,
581 .llseek = seq_lseek,
582 .release = single_release,
583};
584
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700585static int conn_min_interval_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->le_conn_min_interval= val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_min_interval_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->le_conn_min_interval;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
611 conn_min_interval_set, "%llu\n");
612
613static int conn_max_interval_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->le_conn_max_interval= val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_max_interval_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->le_conn_max_interval;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
639 conn_max_interval_set, "%llu\n");
640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641/* ---- HCI requests ---- */
642
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 if (hdev->req_status == HCI_REQ_PEND) {
648 hdev->req_result = result;
649 hdev->req_status = HCI_REQ_DONE;
650 wake_up_interruptible(&hdev->req_wait_q);
651 }
652}
653
654static void hci_req_cancel(struct hci_dev *hdev, int err)
655{
656 BT_DBG("%s err 0x%2.2x", hdev->name, err);
657
658 if (hdev->req_status == HCI_REQ_PEND) {
659 hdev->req_result = err;
660 hdev->req_status = HCI_REQ_CANCELED;
661 wake_up_interruptible(&hdev->req_wait_q);
662 }
663}
664
Fengguang Wu77a63e02013-04-20 16:24:31 +0300665static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
666 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300667{
668 struct hci_ev_cmd_complete *ev;
669 struct hci_event_hdr *hdr;
670 struct sk_buff *skb;
671
672 hci_dev_lock(hdev);
673
674 skb = hdev->recv_evt;
675 hdev->recv_evt = NULL;
676
677 hci_dev_unlock(hdev);
678
679 if (!skb)
680 return ERR_PTR(-ENODATA);
681
682 if (skb->len < sizeof(*hdr)) {
683 BT_ERR("Too short HCI event");
684 goto failed;
685 }
686
687 hdr = (void *) skb->data;
688 skb_pull(skb, HCI_EVENT_HDR_SIZE);
689
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300690 if (event) {
691 if (hdr->evt != event)
692 goto failed;
693 return skb;
694 }
695
Johan Hedberg75e84b72013-04-02 13:35:04 +0300696 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
697 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
698 goto failed;
699 }
700
701 if (skb->len < sizeof(*ev)) {
702 BT_ERR("Too short cmd_complete event");
703 goto failed;
704 }
705
706 ev = (void *) skb->data;
707 skb_pull(skb, sizeof(*ev));
708
709 if (opcode == __le16_to_cpu(ev->opcode))
710 return skb;
711
712 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
713 __le16_to_cpu(ev->opcode));
714
715failed:
716 kfree_skb(skb);
717 return ERR_PTR(-ENODATA);
718}
719
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300720struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300721 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300722{
723 DECLARE_WAITQUEUE(wait, current);
724 struct hci_request req;
725 int err = 0;
726
727 BT_DBG("%s", hdev->name);
728
729 hci_req_init(&req, hdev);
730
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300731 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300732
733 hdev->req_status = HCI_REQ_PEND;
734
735 err = hci_req_run(&req, hci_req_sync_complete);
736 if (err < 0)
737 return ERR_PTR(err);
738
739 add_wait_queue(&hdev->req_wait_q, &wait);
740 set_current_state(TASK_INTERRUPTIBLE);
741
742 schedule_timeout(timeout);
743
744 remove_wait_queue(&hdev->req_wait_q, &wait);
745
746 if (signal_pending(current))
747 return ERR_PTR(-EINTR);
748
749 switch (hdev->req_status) {
750 case HCI_REQ_DONE:
751 err = -bt_to_errno(hdev->req_result);
752 break;
753
754 case HCI_REQ_CANCELED:
755 err = -hdev->req_result;
756 break;
757
758 default:
759 err = -ETIMEDOUT;
760 break;
761 }
762
763 hdev->req_status = hdev->req_result = 0;
764
765 BT_DBG("%s end: err %d", hdev->name, err);
766
767 if (err < 0)
768 return ERR_PTR(err);
769
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300770 return hci_get_cmd_complete(hdev, opcode, event);
771}
772EXPORT_SYMBOL(__hci_cmd_sync_ev);
773
774struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300775 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300776{
777 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300778}
779EXPORT_SYMBOL(__hci_cmd_sync);
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200782static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200783 void (*func)(struct hci_request *req,
784 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200785 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200787 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 DECLARE_WAITQUEUE(wait, current);
789 int err = 0;
790
791 BT_DBG("%s start", hdev->name);
792
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793 hci_req_init(&req, hdev);
794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 hdev->req_status = HCI_REQ_PEND;
796
Johan Hedberg42c6b122013-03-05 20:37:49 +0200797 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200798
Johan Hedberg42c6b122013-03-05 20:37:49 +0200799 err = hci_req_run(&req, hci_req_sync_complete);
800 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200801 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300802
803 /* ENODATA means the HCI request command queue is empty.
804 * This can happen when a request with conditionals doesn't
805 * trigger any commands to be sent. This is normal behavior
806 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 */
Andre Guedes920c8302013-03-08 11:20:15 -0300808 if (err == -ENODATA)
809 return 0;
810
811 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200812 }
813
Andre Guedesbc4445c2013-03-08 11:20:13 -0300814 add_wait_queue(&hdev->req_wait_q, &wait);
815 set_current_state(TASK_INTERRUPTIBLE);
816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 schedule_timeout(timeout);
818
819 remove_wait_queue(&hdev->req_wait_q, &wait);
820
821 if (signal_pending(current))
822 return -EINTR;
823
824 switch (hdev->req_status) {
825 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700826 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 break;
828
829 case HCI_REQ_CANCELED:
830 err = -hdev->req_result;
831 break;
832
833 default:
834 err = -ETIMEDOUT;
835 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Johan Hedberga5040ef2011-01-10 13:28:59 +0200838 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 BT_DBG("%s end: err %d", hdev->name, err);
841
842 return err;
843}
844
Johan Hedberg01178cd2013-03-05 20:37:41 +0200845static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200846 void (*req)(struct hci_request *req,
847 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200848 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 int ret;
851
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200852 if (!test_bit(HCI_UP, &hdev->flags))
853 return -ENETDOWN;
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 /* Serialize all requests */
856 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200857 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 hci_req_unlock(hdev);
859
860 return ret;
861}
862
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200865 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200868 set_bit(HCI_RESET, &req->hdev->flags);
869 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870}
871
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200874 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200879 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200881
882 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884}
885
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200887{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200889
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200890 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200891 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300892
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700893 /* Read Local Supported Commands */
894 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
895
896 /* Read Local Supported Features */
897 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
898
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300899 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200900 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300901
902 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700904
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700905 /* Read Flow Control Mode */
906 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
907
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700908 /* Read Location Data */
909 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200910}
911
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200913{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200914 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200915
916 BT_DBG("%s %ld", hdev->name, opt);
917
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300918 /* Reset */
919 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200920 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300921
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200922 switch (hdev->dev_type) {
923 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200924 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200925 break;
926
927 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200928 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200929 break;
930
931 default:
932 BT_ERR("Unknown device type %d", hdev->dev_type);
933 break;
934 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200935}
936
Johan Hedberg42c6b122013-03-05 20:37:49 +0200937static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200938{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700939 struct hci_dev *hdev = req->hdev;
940
Johan Hedberg2177bab2013-03-05 20:37:43 +0200941 __le16 param;
942 __u8 flt_type;
943
944 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200945 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200946
947 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200948 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200949
950 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200952
953 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200954 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200955
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700956 /* Read Number of Supported IAC */
957 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
958
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700959 /* Read Current IAC LAP */
960 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
961
Johan Hedberg2177bab2013-03-05 20:37:43 +0200962 /* Clear Event Filters */
963 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200965
966 /* Connection accept timeout ~20 secs */
967 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200969
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700970 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
971 * but it does not support page scan related HCI commands.
972 */
973 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500974 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
975 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
976 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200977}
978
Johan Hedberg42c6b122013-03-05 20:37:49 +0200979static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200980{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300981 struct hci_dev *hdev = req->hdev;
982
Johan Hedberg2177bab2013-03-05 20:37:43 +0200983 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200984 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200985
986 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200988
989 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200991
992 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200994
995 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300997
998 /* LE-only controllers have LE implicitly enabled */
999 if (!lmp_bredr_capable(hdev))
1000 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001001}
1002
1003static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1004{
1005 if (lmp_ext_inq_capable(hdev))
1006 return 0x02;
1007
1008 if (lmp_inq_rssi_capable(hdev))
1009 return 0x01;
1010
1011 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1012 hdev->lmp_subver == 0x0757)
1013 return 0x01;
1014
1015 if (hdev->manufacturer == 15) {
1016 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1017 return 0x01;
1018 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1019 return 0x01;
1020 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1021 return 0x01;
1022 }
1023
1024 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1025 hdev->lmp_subver == 0x1805)
1026 return 0x01;
1027
1028 return 0x00;
1029}
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001032{
1033 u8 mode;
1034
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
Johan Hedberg42c6b122013-03-05 20:37:49 +02001037 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001038}
1039
Johan Hedberg42c6b122013-03-05 20:37:49 +02001040static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001041{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042 struct hci_dev *hdev = req->hdev;
1043
Johan Hedberg2177bab2013-03-05 20:37:43 +02001044 /* The second byte is 0xff instead of 0x9f (two reserved bits
1045 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1046 * command otherwise.
1047 */
1048 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1049
1050 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1051 * any event mask for pre 1.2 devices.
1052 */
1053 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1054 return;
1055
1056 if (lmp_bredr_capable(hdev)) {
1057 events[4] |= 0x01; /* Flow Specification Complete */
1058 events[4] |= 0x02; /* Inquiry Result with RSSI */
1059 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1060 events[5] |= 0x08; /* Synchronous Connection Complete */
1061 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001062 } else {
1063 /* Use a different default for LE-only devices */
1064 memset(events, 0, sizeof(events));
1065 events[0] |= 0x10; /* Disconnection Complete */
1066 events[0] |= 0x80; /* Encryption Change */
1067 events[1] |= 0x08; /* Read Remote Version Information Complete */
1068 events[1] |= 0x20; /* Command Complete */
1069 events[1] |= 0x40; /* Command Status */
1070 events[1] |= 0x80; /* Hardware Error */
1071 events[2] |= 0x04; /* Number of Completed Packets */
1072 events[3] |= 0x02; /* Data Buffer Overflow */
1073 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001074 }
1075
1076 if (lmp_inq_rssi_capable(hdev))
1077 events[4] |= 0x02; /* Inquiry Result with RSSI */
1078
1079 if (lmp_sniffsubr_capable(hdev))
1080 events[5] |= 0x20; /* Sniff Subrating */
1081
1082 if (lmp_pause_enc_capable(hdev))
1083 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1084
1085 if (lmp_ext_inq_capable(hdev))
1086 events[5] |= 0x40; /* Extended Inquiry Result */
1087
1088 if (lmp_no_flush_capable(hdev))
1089 events[7] |= 0x01; /* Enhanced Flush Complete */
1090
1091 if (lmp_lsto_capable(hdev))
1092 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1093
1094 if (lmp_ssp_capable(hdev)) {
1095 events[6] |= 0x01; /* IO Capability Request */
1096 events[6] |= 0x02; /* IO Capability Response */
1097 events[6] |= 0x04; /* User Confirmation Request */
1098 events[6] |= 0x08; /* User Passkey Request */
1099 events[6] |= 0x10; /* Remote OOB Data Request */
1100 events[6] |= 0x20; /* Simple Pairing Complete */
1101 events[7] |= 0x04; /* User Passkey Notification */
1102 events[7] |= 0x08; /* Keypress Notification */
1103 events[7] |= 0x10; /* Remote Host Supported
1104 * Features Notification
1105 */
1106 }
1107
1108 if (lmp_le_capable(hdev))
1109 events[7] |= 0x20; /* LE Meta-Event */
1110
Johan Hedberg42c6b122013-03-05 20:37:49 +02001111 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001112
1113 if (lmp_le_capable(hdev)) {
1114 memset(events, 0, sizeof(events));
1115 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001116 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1117 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001118 }
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001122{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 struct hci_dev *hdev = req->hdev;
1124
Johan Hedberg2177bab2013-03-05 20:37:43 +02001125 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001127 else
1128 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001129
1130 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001132
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001134
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001135 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1136 * local supported commands HCI command.
1137 */
1138 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001139 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001140
1141 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001142 /* When SSP is available, then the host features page
1143 * should also be available as well. However some
1144 * controllers list the max_page as 0 as long as SSP
1145 * has not been enabled. To achieve proper debugging
1146 * output, force the minimum max_page to 1 at least.
1147 */
1148 hdev->max_page = 0x01;
1149
Johan Hedberg2177bab2013-03-05 20:37:43 +02001150 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1151 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1153 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001154 } else {
1155 struct hci_cp_write_eir cp;
1156
1157 memset(hdev->eir, 0, sizeof(hdev->eir));
1158 memset(&cp, 0, sizeof(cp));
1159
Johan Hedberg42c6b122013-03-05 20:37:49 +02001160 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001161 }
1162 }
1163
1164 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001165 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001166
1167 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001168 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001169
1170 if (lmp_ext_feat_capable(hdev)) {
1171 struct hci_cp_read_local_ext_features cp;
1172
1173 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1175 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001176 }
1177
1178 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1179 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1181 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001182 }
1183}
1184
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001186{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001188 struct hci_cp_write_def_link_policy cp;
1189 u16 link_policy = 0;
1190
1191 if (lmp_rswitch_capable(hdev))
1192 link_policy |= HCI_LP_RSWITCH;
1193 if (lmp_hold_capable(hdev))
1194 link_policy |= HCI_LP_HOLD;
1195 if (lmp_sniff_capable(hdev))
1196 link_policy |= HCI_LP_SNIFF;
1197 if (lmp_park_capable(hdev))
1198 link_policy |= HCI_LP_PARK;
1199
1200 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202}
1203
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001207 struct hci_cp_write_le_host_supported cp;
1208
Johan Hedbergc73eee92013-04-19 18:35:21 +03001209 /* LE-only devices do not support explicit enablement */
1210 if (!lmp_bredr_capable(hdev))
1211 return;
1212
Johan Hedberg2177bab2013-03-05 20:37:43 +02001213 memset(&cp, 0, sizeof(cp));
1214
1215 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1216 cp.le = 0x01;
1217 cp.simul = lmp_le_br_capable(hdev);
1218 }
1219
1220 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1222 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223}
1224
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001225static void hci_set_event_mask_page_2(struct hci_request *req)
1226{
1227 struct hci_dev *hdev = req->hdev;
1228 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1229
1230 /* If Connectionless Slave Broadcast master role is supported
1231 * enable all necessary events for it.
1232 */
1233 if (hdev->features[2][0] & 0x01) {
1234 events[1] |= 0x40; /* Triggered Clock Capture */
1235 events[1] |= 0x80; /* Synchronization Train Complete */
1236 events[2] |= 0x10; /* Slave Page Response Timeout */
1237 events[2] |= 0x20; /* CSB Channel Map Change */
1238 }
1239
1240 /* If Connectionless Slave Broadcast slave role is supported
1241 * enable all necessary events for it.
1242 */
1243 if (hdev->features[2][0] & 0x02) {
1244 events[2] |= 0x01; /* Synchronization Train Received */
1245 events[2] |= 0x02; /* CSB Receive */
1246 events[2] |= 0x04; /* CSB Timeout */
1247 events[2] |= 0x08; /* Truncated Page Complete */
1248 }
1249
1250 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1251}
1252
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001254{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001256 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001258 /* Some Broadcom based Bluetooth controllers do not support the
1259 * Delete Stored Link Key command. They are clearly indicating its
1260 * absence in the bit mask of supported commands.
1261 *
1262 * Check the supported commands and only if the the command is marked
1263 * as supported send it. If not supported assume that the controller
1264 * does not have actual support for stored link keys which makes this
1265 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001266 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001267 if (hdev->commands[6] & 0x80) {
1268 struct hci_cp_delete_stored_link_key cp;
1269
1270 bacpy(&cp.bdaddr, BDADDR_ANY);
1271 cp.delete_all = 0x01;
1272 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1273 sizeof(cp), &cp);
1274 }
1275
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001278
Marcel Holtmann79830f62013-10-18 16:38:09 -07001279 if (lmp_le_capable(hdev)) {
1280 /* If the controller has a public BD_ADDR, then by
1281 * default use that one. If this is a LE only
1282 * controller without one, default to the random
1283 * address.
1284 */
1285 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1286 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1287 else
1288 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1289
Johan Hedberg42c6b122013-03-05 20:37:49 +02001290 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001291 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001292
1293 /* Read features beyond page 1 if available */
1294 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1295 struct hci_cp_read_local_ext_features cp;
1296
1297 cp.page = p;
1298 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1299 sizeof(cp), &cp);
1300 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301}
1302
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001303static void hci_init4_req(struct hci_request *req, unsigned long opt)
1304{
1305 struct hci_dev *hdev = req->hdev;
1306
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001307 /* Set event mask page 2 if the HCI command for it is supported */
1308 if (hdev->commands[22] & 0x04)
1309 hci_set_event_mask_page_2(req);
1310
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001311 /* Check for Synchronization Train support */
1312 if (hdev->features[2][0] & 0x04)
1313 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1314}
1315
Johan Hedberg2177bab2013-03-05 20:37:43 +02001316static int __hci_init(struct hci_dev *hdev)
1317{
1318 int err;
1319
1320 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1321 if (err < 0)
1322 return err;
1323
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001324 /* The Device Under Test (DUT) mode is special and available for
1325 * all controller types. So just create it early on.
1326 */
1327 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1328 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1329 &dut_mode_fops);
1330 }
1331
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1333 * BR/EDR/LE type controllers. AMP controllers only need the
1334 * first stage init.
1335 */
1336 if (hdev->dev_type != HCI_BREDR)
1337 return 0;
1338
1339 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1340 if (err < 0)
1341 return err;
1342
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001343 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1344 if (err < 0)
1345 return err;
1346
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001347 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1348 if (err < 0)
1349 return err;
1350
1351 /* Only create debugfs entries during the initial setup
1352 * phase and not every time the controller gets powered on.
1353 */
1354 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1355 return 0;
1356
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001357 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1358 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001359 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1360 &hdev->manufacturer);
1361 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1362 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001363 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1364 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001365 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1366
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001367 if (lmp_bredr_capable(hdev)) {
1368 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1369 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001370 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1371 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001372 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1373 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001374 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1375 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001376 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1377 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001378 }
1379
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001380 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001381 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1382 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001383 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1384 hdev, &ssp_debug_mode_fops);
1385 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001386
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001387 if (lmp_sniff_capable(hdev)) {
1388 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1389 hdev, &idle_timeout_fops);
1390 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1391 hdev, &sniff_min_interval_fops);
1392 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1393 hdev, &sniff_max_interval_fops);
1394 }
1395
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001396 if (lmp_le_capable(hdev)) {
1397 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1398 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001399 debugfs_create_file("static_address", 0444, hdev->debugfs,
1400 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001401 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1402 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001403 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1404 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001405 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1406 hdev, &conn_min_interval_fops);
1407 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1408 hdev, &conn_max_interval_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001409 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001410
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001411 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001412}
1413
Johan Hedberg42c6b122013-03-05 20:37:49 +02001414static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
1416 __u8 scan = opt;
1417
Johan Hedberg42c6b122013-03-05 20:37:49 +02001418 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422}
1423
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
1426 __u8 auth = opt;
1427
Johan Hedberg42c6b122013-03-05 20:37:49 +02001428 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435{
1436 __u8 encrypt = opt;
1437
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001440 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001441 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442}
1443
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001445{
1446 __le16 policy = cpu_to_le16(opt);
1447
Johan Hedberg42c6b122013-03-05 20:37:49 +02001448 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001449
1450 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001451 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001452}
1453
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001454/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 * Device is held on return. */
1456struct hci_dev *hci_dev_get(int index)
1457{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001458 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
1460 BT_DBG("%d", index);
1461
1462 if (index < 0)
1463 return NULL;
1464
1465 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001466 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 if (d->id == index) {
1468 hdev = hci_dev_hold(d);
1469 break;
1470 }
1471 }
1472 read_unlock(&hci_dev_list_lock);
1473 return hdev;
1474}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001477
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001478bool hci_discovery_active(struct hci_dev *hdev)
1479{
1480 struct discovery_state *discov = &hdev->discovery;
1481
Andre Guedes6fbe1952012-02-03 17:47:58 -03001482 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001483 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001484 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001485 return true;
1486
Andre Guedes6fbe1952012-02-03 17:47:58 -03001487 default:
1488 return false;
1489 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001490}
1491
Johan Hedbergff9ef572012-01-04 14:23:45 +02001492void hci_discovery_set_state(struct hci_dev *hdev, int state)
1493{
1494 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1495
1496 if (hdev->discovery.state == state)
1497 return;
1498
1499 switch (state) {
1500 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001501 if (hdev->discovery.state != DISCOVERY_STARTING)
1502 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001503 break;
1504 case DISCOVERY_STARTING:
1505 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001506 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001507 mgmt_discovering(hdev, 1);
1508 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001509 case DISCOVERY_RESOLVING:
1510 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001511 case DISCOVERY_STOPPING:
1512 break;
1513 }
1514
1515 hdev->discovery.state = state;
1516}
1517
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001518void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
Johan Hedberg30883512012-01-04 14:16:21 +02001520 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001521 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Johan Hedberg561aafb2012-01-04 13:31:59 +02001523 list_for_each_entry_safe(p, n, &cache->all, all) {
1524 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001525 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001527
1528 INIT_LIST_HEAD(&cache->unknown);
1529 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530}
1531
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001532struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1533 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534{
Johan Hedberg30883512012-01-04 14:16:21 +02001535 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 struct inquiry_entry *e;
1537
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001538 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
Johan Hedberg561aafb2012-01-04 13:31:59 +02001540 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001542 return e;
1543 }
1544
1545 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Johan Hedberg561aafb2012-01-04 13:31:59 +02001548struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001549 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001550{
Johan Hedberg30883512012-01-04 14:16:21 +02001551 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001552 struct inquiry_entry *e;
1553
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001554 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001555
1556 list_for_each_entry(e, &cache->unknown, list) {
1557 if (!bacmp(&e->data.bdaddr, bdaddr))
1558 return e;
1559 }
1560
1561 return NULL;
1562}
1563
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001564struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001565 bdaddr_t *bdaddr,
1566 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001567{
1568 struct discovery_state *cache = &hdev->discovery;
1569 struct inquiry_entry *e;
1570
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001571 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001572
1573 list_for_each_entry(e, &cache->resolve, list) {
1574 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1575 return e;
1576 if (!bacmp(&e->data.bdaddr, bdaddr))
1577 return e;
1578 }
1579
1580 return NULL;
1581}
1582
Johan Hedberga3d4e202012-01-09 00:53:02 +02001583void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001584 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001585{
1586 struct discovery_state *cache = &hdev->discovery;
1587 struct list_head *pos = &cache->resolve;
1588 struct inquiry_entry *p;
1589
1590 list_del(&ie->list);
1591
1592 list_for_each_entry(p, &cache->resolve, list) {
1593 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001594 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001595 break;
1596 pos = &p->list;
1597 }
1598
1599 list_add(&ie->list, pos);
1600}
1601
Johan Hedberg31754052012-01-04 13:39:52 +02001602bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001603 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
Johan Hedberg30883512012-01-04 14:16:21 +02001605 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001606 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001608 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
Szymon Janc2b2fec42012-11-20 11:38:54 +01001610 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1611
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001612 if (ssp)
1613 *ssp = data->ssp_mode;
1614
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001615 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001616 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001617 if (ie->data.ssp_mode && ssp)
1618 *ssp = true;
1619
Johan Hedberga3d4e202012-01-09 00:53:02 +02001620 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001621 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001622 ie->data.rssi = data->rssi;
1623 hci_inquiry_cache_update_resolve(hdev, ie);
1624 }
1625
Johan Hedberg561aafb2012-01-04 13:31:59 +02001626 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001627 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001628
Johan Hedberg561aafb2012-01-04 13:31:59 +02001629 /* Entry not in the cache. Add new one. */
1630 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1631 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001632 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001633
1634 list_add(&ie->all, &cache->all);
1635
1636 if (name_known) {
1637 ie->name_state = NAME_KNOWN;
1638 } else {
1639 ie->name_state = NAME_NOT_KNOWN;
1640 list_add(&ie->list, &cache->unknown);
1641 }
1642
1643update:
1644 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001645 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001646 ie->name_state = NAME_KNOWN;
1647 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 }
1649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001650 memcpy(&ie->data, data, sizeof(*data));
1651 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001653
1654 if (ie->name_state == NAME_NOT_KNOWN)
1655 return false;
1656
1657 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658}
1659
1660static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1661{
Johan Hedberg30883512012-01-04 14:16:21 +02001662 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 struct inquiry_info *info = (struct inquiry_info *) buf;
1664 struct inquiry_entry *e;
1665 int copied = 0;
1666
Johan Hedberg561aafb2012-01-04 13:31:59 +02001667 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001669
1670 if (copied >= num)
1671 break;
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 bacpy(&info->bdaddr, &data->bdaddr);
1674 info->pscan_rep_mode = data->pscan_rep_mode;
1675 info->pscan_period_mode = data->pscan_period_mode;
1676 info->pscan_mode = data->pscan_mode;
1677 memcpy(info->dev_class, data->dev_class, 3);
1678 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001681 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 }
1683
1684 BT_DBG("cache %p, copied %d", cache, copied);
1685 return copied;
1686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
1690 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001691 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 struct hci_cp_inquiry cp;
1693
1694 BT_DBG("%s", hdev->name);
1695
1696 if (test_bit(HCI_INQUIRY, &hdev->flags))
1697 return;
1698
1699 /* Start Inquiry */
1700 memcpy(&cp.lap, &ir->lap, 3);
1701 cp.length = ir->length;
1702 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001703 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704}
1705
Andre Guedes3e13fa12013-03-27 20:04:56 -03001706static int wait_inquiry(void *word)
1707{
1708 schedule();
1709 return signal_pending(current);
1710}
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712int hci_inquiry(void __user *arg)
1713{
1714 __u8 __user *ptr = arg;
1715 struct hci_inquiry_req ir;
1716 struct hci_dev *hdev;
1717 int err = 0, do_inquiry = 0, max_rsp;
1718 long timeo;
1719 __u8 *buf;
1720
1721 if (copy_from_user(&ir, ptr, sizeof(ir)))
1722 return -EFAULT;
1723
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001724 hdev = hci_dev_get(ir.dev_id);
1725 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return -ENODEV;
1727
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001728 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1729 err = -EBUSY;
1730 goto done;
1731 }
1732
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001733 if (hdev->dev_type != HCI_BREDR) {
1734 err = -EOPNOTSUPP;
1735 goto done;
1736 }
1737
Johan Hedberg56f87902013-10-02 13:43:13 +03001738 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1739 err = -EOPNOTSUPP;
1740 goto done;
1741 }
1742
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001743 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001744 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001745 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001746 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 do_inquiry = 1;
1748 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001749 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Marcel Holtmann04837f62006-07-03 10:02:33 +02001751 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001752
1753 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001754 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1755 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001756 if (err < 0)
1757 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001758
1759 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1760 * cleared). If it is interrupted by a signal, return -EINTR.
1761 */
1762 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1763 TASK_INTERRUPTIBLE))
1764 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001767 /* for unlimited number of responses we will use buffer with
1768 * 255 entries
1769 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1771
1772 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1773 * copy it to the user space.
1774 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001775 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001776 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 err = -ENOMEM;
1778 goto done;
1779 }
1780
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001781 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001783 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 BT_DBG("num_rsp %d", ir.num_rsp);
1786
1787 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1788 ptr += sizeof(ir);
1789 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001790 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001792 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 err = -EFAULT;
1794
1795 kfree(buf);
1796
1797done:
1798 hci_dev_put(hdev);
1799 return err;
1800}
1801
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001802static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 int ret = 0;
1805
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 BT_DBG("%s %p", hdev->name, hdev);
1807
1808 hci_req_lock(hdev);
1809
Johan Hovold94324962012-03-15 14:48:41 +01001810 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1811 ret = -ENODEV;
1812 goto done;
1813 }
1814
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001815 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1816 /* Check for rfkill but allow the HCI setup stage to
1817 * proceed (which in itself doesn't cause any RF activity).
1818 */
1819 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1820 ret = -ERFKILL;
1821 goto done;
1822 }
1823
1824 /* Check for valid public address or a configured static
1825 * random adddress, but let the HCI setup proceed to
1826 * be able to determine if there is a public address
1827 * or not.
1828 *
1829 * This check is only valid for BR/EDR controllers
1830 * since AMP controllers do not have an address.
1831 */
1832 if (hdev->dev_type == HCI_BREDR &&
1833 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1834 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1835 ret = -EADDRNOTAVAIL;
1836 goto done;
1837 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001838 }
1839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 if (test_bit(HCI_UP, &hdev->flags)) {
1841 ret = -EALREADY;
1842 goto done;
1843 }
1844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 if (hdev->open(hdev)) {
1846 ret = -EIO;
1847 goto done;
1848 }
1849
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001850 atomic_set(&hdev->cmd_cnt, 1);
1851 set_bit(HCI_INIT, &hdev->flags);
1852
1853 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1854 ret = hdev->setup(hdev);
1855
1856 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001857 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858 set_bit(HCI_RAW, &hdev->flags);
1859
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001860 if (!test_bit(HCI_RAW, &hdev->flags) &&
1861 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001862 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 }
1864
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001865 clear_bit(HCI_INIT, &hdev->flags);
1866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 if (!ret) {
1868 hci_dev_hold(hdev);
1869 set_bit(HCI_UP, &hdev->flags);
1870 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001871 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001872 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001873 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001874 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001875 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001876 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001877 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001878 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001880 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001881 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001882 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 skb_queue_purge(&hdev->cmd_q);
1885 skb_queue_purge(&hdev->rx_q);
1886
1887 if (hdev->flush)
1888 hdev->flush(hdev);
1889
1890 if (hdev->sent_cmd) {
1891 kfree_skb(hdev->sent_cmd);
1892 hdev->sent_cmd = NULL;
1893 }
1894
1895 hdev->close(hdev);
1896 hdev->flags = 0;
1897 }
1898
1899done:
1900 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 return ret;
1902}
1903
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001904/* ---- HCI ioctl helpers ---- */
1905
1906int hci_dev_open(__u16 dev)
1907{
1908 struct hci_dev *hdev;
1909 int err;
1910
1911 hdev = hci_dev_get(dev);
1912 if (!hdev)
1913 return -ENODEV;
1914
Johan Hedberge1d08f42013-10-01 22:44:50 +03001915 /* We need to ensure that no other power on/off work is pending
1916 * before proceeding to call hci_dev_do_open. This is
1917 * particularly important if the setup procedure has not yet
1918 * completed.
1919 */
1920 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1921 cancel_delayed_work(&hdev->power_off);
1922
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001923 /* After this call it is guaranteed that the setup procedure
1924 * has finished. This means that error conditions like RFKILL
1925 * or no valid public or static random address apply.
1926 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001927 flush_workqueue(hdev->req_workqueue);
1928
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001929 err = hci_dev_do_open(hdev);
1930
1931 hci_dev_put(hdev);
1932
1933 return err;
1934}
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936static int hci_dev_do_close(struct hci_dev *hdev)
1937{
1938 BT_DBG("%s %p", hdev->name, hdev);
1939
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001940 cancel_delayed_work(&hdev->power_off);
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 hci_req_cancel(hdev, ENODEV);
1943 hci_req_lock(hdev);
1944
1945 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001946 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 hci_req_unlock(hdev);
1948 return 0;
1949 }
1950
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001951 /* Flush RX and TX works */
1952 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001953 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001955 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001956 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001957 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001958 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001959 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001960 }
1961
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001962 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001963 cancel_delayed_work(&hdev->service_cache);
1964
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001965 cancel_delayed_work_sync(&hdev->le_scan_disable);
1966
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001967 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001968 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001970 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 hci_notify(hdev, HCI_DEV_DOWN);
1973
1974 if (hdev->flush)
1975 hdev->flush(hdev);
1976
1977 /* Reset device */
1978 skb_queue_purge(&hdev->cmd_q);
1979 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001980 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001981 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001982 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001984 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 clear_bit(HCI_INIT, &hdev->flags);
1986 }
1987
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001988 /* flush cmd work */
1989 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
1991 /* Drop queues */
1992 skb_queue_purge(&hdev->rx_q);
1993 skb_queue_purge(&hdev->cmd_q);
1994 skb_queue_purge(&hdev->raw_q);
1995
1996 /* Drop last sent command */
1997 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001998 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 kfree_skb(hdev->sent_cmd);
2000 hdev->sent_cmd = NULL;
2001 }
2002
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002003 kfree_skb(hdev->recv_evt);
2004 hdev->recv_evt = NULL;
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 /* After this point our queues are empty
2007 * and no tasks are scheduled. */
2008 hdev->close(hdev);
2009
Johan Hedberg35b973c2013-03-15 17:06:59 -05002010 /* Clear flags */
2011 hdev->flags = 0;
2012 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2013
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002014 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2015 if (hdev->dev_type == HCI_BREDR) {
2016 hci_dev_lock(hdev);
2017 mgmt_powered(hdev, 0);
2018 hci_dev_unlock(hdev);
2019 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002020 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002021
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002022 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002023 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002024
Johan Hedberge59fda82012-02-22 18:11:53 +02002025 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002026 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 hci_req_unlock(hdev);
2029
2030 hci_dev_put(hdev);
2031 return 0;
2032}
2033
2034int hci_dev_close(__u16 dev)
2035{
2036 struct hci_dev *hdev;
2037 int err;
2038
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002039 hdev = hci_dev_get(dev);
2040 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002042
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002043 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2044 err = -EBUSY;
2045 goto done;
2046 }
2047
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2049 cancel_delayed_work(&hdev->power_off);
2050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002052
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002053done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 hci_dev_put(hdev);
2055 return err;
2056}
2057
2058int hci_dev_reset(__u16 dev)
2059{
2060 struct hci_dev *hdev;
2061 int ret = 0;
2062
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002063 hdev = hci_dev_get(dev);
2064 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 return -ENODEV;
2066
2067 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Marcel Holtmann808a0492013-08-26 20:57:58 -07002069 if (!test_bit(HCI_UP, &hdev->flags)) {
2070 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002074 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2075 ret = -EBUSY;
2076 goto done;
2077 }
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 /* Drop queues */
2080 skb_queue_purge(&hdev->rx_q);
2081 skb_queue_purge(&hdev->cmd_q);
2082
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002083 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002084 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002086 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088 if (hdev->flush)
2089 hdev->flush(hdev);
2090
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002091 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002092 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
2094 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002095 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
2097done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 hci_req_unlock(hdev);
2099 hci_dev_put(hdev);
2100 return ret;
2101}
2102
2103int hci_dev_reset_stat(__u16 dev)
2104{
2105 struct hci_dev *hdev;
2106 int ret = 0;
2107
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002108 hdev = hci_dev_get(dev);
2109 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 return -ENODEV;
2111
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002112 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2113 ret = -EBUSY;
2114 goto done;
2115 }
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2118
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002119done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 return ret;
2122}
2123
2124int hci_dev_cmd(unsigned int cmd, void __user *arg)
2125{
2126 struct hci_dev *hdev;
2127 struct hci_dev_req dr;
2128 int err = 0;
2129
2130 if (copy_from_user(&dr, arg, sizeof(dr)))
2131 return -EFAULT;
2132
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002133 hdev = hci_dev_get(dr.dev_id);
2134 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 return -ENODEV;
2136
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002137 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2138 err = -EBUSY;
2139 goto done;
2140 }
2141
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002142 if (hdev->dev_type != HCI_BREDR) {
2143 err = -EOPNOTSUPP;
2144 goto done;
2145 }
2146
Johan Hedberg56f87902013-10-02 13:43:13 +03002147 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2148 err = -EOPNOTSUPP;
2149 goto done;
2150 }
2151
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 switch (cmd) {
2153 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002154 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2155 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 break;
2157
2158 case HCISETENCRYPT:
2159 if (!lmp_encrypt_capable(hdev)) {
2160 err = -EOPNOTSUPP;
2161 break;
2162 }
2163
2164 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2165 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002166 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2167 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 if (err)
2169 break;
2170 }
2171
Johan Hedberg01178cd2013-03-05 20:37:41 +02002172 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2173 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 break;
2175
2176 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002177 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2178 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 break;
2180
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002181 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002182 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2183 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002184 break;
2185
2186 case HCISETLINKMODE:
2187 hdev->link_mode = ((__u16) dr.dev_opt) &
2188 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2189 break;
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 case HCISETPTYPE:
2192 hdev->pkt_type = (__u16) dr.dev_opt;
2193 break;
2194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002196 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2197 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 break;
2199
2200 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002201 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2202 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 break;
2204
2205 default:
2206 err = -EINVAL;
2207 break;
2208 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002209
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002210done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 hci_dev_put(hdev);
2212 return err;
2213}
2214
2215int hci_get_dev_list(void __user *arg)
2216{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002217 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 struct hci_dev_list_req *dl;
2219 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 int n = 0, size, err;
2221 __u16 dev_num;
2222
2223 if (get_user(dev_num, (__u16 __user *) arg))
2224 return -EFAULT;
2225
2226 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2227 return -EINVAL;
2228
2229 size = sizeof(*dl) + dev_num * sizeof(*dr);
2230
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002231 dl = kzalloc(size, GFP_KERNEL);
2232 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 return -ENOMEM;
2234
2235 dr = dl->dev_req;
2236
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002237 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002238 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002239 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002240 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002241
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002242 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2243 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002244
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 (dr + n)->dev_id = hdev->id;
2246 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 if (++n >= dev_num)
2249 break;
2250 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002251 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
2253 dl->dev_num = n;
2254 size = sizeof(*dl) + n * sizeof(*dr);
2255
2256 err = copy_to_user(arg, dl, size);
2257 kfree(dl);
2258
2259 return err ? -EFAULT : 0;
2260}
2261
2262int hci_get_dev_info(void __user *arg)
2263{
2264 struct hci_dev *hdev;
2265 struct hci_dev_info di;
2266 int err = 0;
2267
2268 if (copy_from_user(&di, arg, sizeof(di)))
2269 return -EFAULT;
2270
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002271 hdev = hci_dev_get(di.dev_id);
2272 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 return -ENODEV;
2274
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002275 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002276 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002277
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002278 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 strcpy(di.name, hdev->name);
2282 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002283 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 di.flags = hdev->flags;
2285 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002286 if (lmp_bredr_capable(hdev)) {
2287 di.acl_mtu = hdev->acl_mtu;
2288 di.acl_pkts = hdev->acl_pkts;
2289 di.sco_mtu = hdev->sco_mtu;
2290 di.sco_pkts = hdev->sco_pkts;
2291 } else {
2292 di.acl_mtu = hdev->le_mtu;
2293 di.acl_pkts = hdev->le_pkts;
2294 di.sco_mtu = 0;
2295 di.sco_pkts = 0;
2296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 di.link_policy = hdev->link_policy;
2298 di.link_mode = hdev->link_mode;
2299
2300 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2301 memcpy(&di.features, &hdev->features, sizeof(di.features));
2302
2303 if (copy_to_user(arg, &di, sizeof(di)))
2304 err = -EFAULT;
2305
2306 hci_dev_put(hdev);
2307
2308 return err;
2309}
2310
2311/* ---- Interface to HCI drivers ---- */
2312
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002313static int hci_rfkill_set_block(void *data, bool blocked)
2314{
2315 struct hci_dev *hdev = data;
2316
2317 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2318
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002319 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2320 return -EBUSY;
2321
Johan Hedberg5e130362013-09-13 08:58:17 +03002322 if (blocked) {
2323 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002324 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2325 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002326 } else {
2327 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002328 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002329
2330 return 0;
2331}
2332
2333static const struct rfkill_ops hci_rfkill_ops = {
2334 .set_block = hci_rfkill_set_block,
2335};
2336
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002337static void hci_power_on(struct work_struct *work)
2338{
2339 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002340 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002341
2342 BT_DBG("%s", hdev->name);
2343
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002344 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002345 if (err < 0) {
2346 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002347 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002348 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002349
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002350 /* During the HCI setup phase, a few error conditions are
2351 * ignored and they need to be checked now. If they are still
2352 * valid, it is important to turn the device back off.
2353 */
2354 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2355 (hdev->dev_type == HCI_BREDR &&
2356 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2357 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002358 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2359 hci_dev_do_close(hdev);
2360 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002361 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2362 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002363 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002364
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002365 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002366 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002367}
2368
2369static void hci_power_off(struct work_struct *work)
2370{
Johan Hedberg32435532011-11-07 22:16:04 +02002371 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002372 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002373
2374 BT_DBG("%s", hdev->name);
2375
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002376 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002377}
2378
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002379static void hci_discov_off(struct work_struct *work)
2380{
2381 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002382
2383 hdev = container_of(work, struct hci_dev, discov_off.work);
2384
2385 BT_DBG("%s", hdev->name);
2386
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002387 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002388}
2389
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002390int hci_uuids_clear(struct hci_dev *hdev)
2391{
Johan Hedberg48210022013-01-27 00:31:28 +02002392 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002393
Johan Hedberg48210022013-01-27 00:31:28 +02002394 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2395 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002396 kfree(uuid);
2397 }
2398
2399 return 0;
2400}
2401
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002402int hci_link_keys_clear(struct hci_dev *hdev)
2403{
2404 struct list_head *p, *n;
2405
2406 list_for_each_safe(p, n, &hdev->link_keys) {
2407 struct link_key *key;
2408
2409 key = list_entry(p, struct link_key, list);
2410
2411 list_del(p);
2412 kfree(key);
2413 }
2414
2415 return 0;
2416}
2417
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002418int hci_smp_ltks_clear(struct hci_dev *hdev)
2419{
2420 struct smp_ltk *k, *tmp;
2421
2422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2423 list_del(&k->list);
2424 kfree(k);
2425 }
2426
2427 return 0;
2428}
2429
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002430struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2431{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002432 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002433
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002434 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002435 if (bacmp(bdaddr, &k->bdaddr) == 0)
2436 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002437
2438 return NULL;
2439}
2440
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302441static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002442 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002443{
2444 /* Legacy key */
2445 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302446 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002447
2448 /* Debug keys are insecure so don't store them persistently */
2449 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302450 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002451
2452 /* Changed combination key and there's no previous one */
2453 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302454 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002455
2456 /* Security mode 3 case */
2457 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302458 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002459
2460 /* Neither local nor remote side had no-bonding as requirement */
2461 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302462 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002463
2464 /* Local side had dedicated bonding as requirement */
2465 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302466 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002467
2468 /* Remote side had dedicated bonding as requirement */
2469 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302470 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002471
2472 /* If none of the above criteria match, then don't store the key
2473 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302474 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002475}
2476
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002477struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002478{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002479 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002480
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002481 list_for_each_entry(k, &hdev->long_term_keys, list) {
2482 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002483 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002484 continue;
2485
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002486 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002487 }
2488
2489 return NULL;
2490}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002491
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002492struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002493 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002494{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002495 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002496
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002497 list_for_each_entry(k, &hdev->long_term_keys, list)
2498 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002499 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002500 return k;
2501
2502 return NULL;
2503}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002504
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002505int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002506 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002507{
2508 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302509 u8 old_key_type;
2510 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002511
2512 old_key = hci_find_link_key(hdev, bdaddr);
2513 if (old_key) {
2514 old_key_type = old_key->type;
2515 key = old_key;
2516 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002517 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002518 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2519 if (!key)
2520 return -ENOMEM;
2521 list_add(&key->list, &hdev->link_keys);
2522 }
2523
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002524 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002525
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002526 /* Some buggy controller combinations generate a changed
2527 * combination key for legacy pairing even when there's no
2528 * previous key */
2529 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002530 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002531 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002532 if (conn)
2533 conn->key_type = type;
2534 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002535
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002536 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002537 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002538 key->pin_len = pin_len;
2539
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002540 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002541 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002542 else
2543 key->type = type;
2544
Johan Hedberg4df378a2011-04-28 11:29:03 -07002545 if (!new_key)
2546 return 0;
2547
2548 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2549
Johan Hedberg744cf192011-11-08 20:40:14 +02002550 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002551
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302552 if (conn)
2553 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002554
2555 return 0;
2556}
2557
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002558int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002559 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002560 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002561{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002562 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002563
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002564 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2565 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002566
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002567 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2568 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002569 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002570 else {
2571 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002572 if (!key)
2573 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002574 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002575 }
2576
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002577 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002578 key->bdaddr_type = addr_type;
2579 memcpy(key->val, tk, sizeof(key->val));
2580 key->authenticated = authenticated;
2581 key->ediv = ediv;
2582 key->enc_size = enc_size;
2583 key->type = type;
2584 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002585
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002586 if (!new_key)
2587 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002588
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002589 if (type & HCI_SMP_LTK)
2590 mgmt_new_ltk(hdev, key, 1);
2591
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002592 return 0;
2593}
2594
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002595int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2596{
2597 struct link_key *key;
2598
2599 key = hci_find_link_key(hdev, bdaddr);
2600 if (!key)
2601 return -ENOENT;
2602
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002603 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002604
2605 list_del(&key->list);
2606 kfree(key);
2607
2608 return 0;
2609}
2610
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002611int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2612{
2613 struct smp_ltk *k, *tmp;
2614
2615 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2616 if (bacmp(bdaddr, &k->bdaddr))
2617 continue;
2618
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002619 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002620
2621 list_del(&k->list);
2622 kfree(k);
2623 }
2624
2625 return 0;
2626}
2627
Ville Tervo6bd32322011-02-16 16:32:41 +02002628/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002629static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002630{
2631 struct hci_dev *hdev = (void *) arg;
2632
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002633 if (hdev->sent_cmd) {
2634 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2635 u16 opcode = __le16_to_cpu(sent->opcode);
2636
2637 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2638 } else {
2639 BT_ERR("%s command tx timeout", hdev->name);
2640 }
2641
Ville Tervo6bd32322011-02-16 16:32:41 +02002642 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002643 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002644}
2645
Szymon Janc2763eda2011-03-22 13:12:22 +01002646struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002647 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002648{
2649 struct oob_data *data;
2650
2651 list_for_each_entry(data, &hdev->remote_oob_data, list)
2652 if (bacmp(bdaddr, &data->bdaddr) == 0)
2653 return data;
2654
2655 return NULL;
2656}
2657
2658int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2659{
2660 struct oob_data *data;
2661
2662 data = hci_find_remote_oob_data(hdev, bdaddr);
2663 if (!data)
2664 return -ENOENT;
2665
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002666 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002667
2668 list_del(&data->list);
2669 kfree(data);
2670
2671 return 0;
2672}
2673
2674int hci_remote_oob_data_clear(struct hci_dev *hdev)
2675{
2676 struct oob_data *data, *n;
2677
2678 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2679 list_del(&data->list);
2680 kfree(data);
2681 }
2682
2683 return 0;
2684}
2685
2686int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002687 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002688{
2689 struct oob_data *data;
2690
2691 data = hci_find_remote_oob_data(hdev, bdaddr);
2692
2693 if (!data) {
2694 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2695 if (!data)
2696 return -ENOMEM;
2697
2698 bacpy(&data->bdaddr, bdaddr);
2699 list_add(&data->list, &hdev->remote_oob_data);
2700 }
2701
2702 memcpy(data->hash, hash, sizeof(data->hash));
2703 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2704
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002705 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002706
2707 return 0;
2708}
2709
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002710struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2711 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002712{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002713 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002714
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002715 list_for_each_entry(b, &hdev->blacklist, list) {
2716 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002717 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002718 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002719
2720 return NULL;
2721}
2722
2723int hci_blacklist_clear(struct hci_dev *hdev)
2724{
2725 struct list_head *p, *n;
2726
2727 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002728 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002729
2730 list_del(p);
2731 kfree(b);
2732 }
2733
2734 return 0;
2735}
2736
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002737int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002738{
2739 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002740
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002741 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002742 return -EBADF;
2743
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002744 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002745 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002746
2747 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002748 if (!entry)
2749 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002750
2751 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002752 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002753
2754 list_add(&entry->list, &hdev->blacklist);
2755
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002756 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002757}
2758
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002759int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002760{
2761 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002762
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002763 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002764 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002765
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002766 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002767 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002768 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002769
2770 list_del(&entry->list);
2771 kfree(entry);
2772
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002773 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002774}
2775
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002776static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002777{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002778 if (status) {
2779 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002780
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002781 hci_dev_lock(hdev);
2782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2783 hci_dev_unlock(hdev);
2784 return;
2785 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002786}
2787
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002788static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002789{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002790 /* General inquiry access code (GIAC) */
2791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2792 struct hci_request req;
2793 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002794 int err;
2795
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002796 if (status) {
2797 BT_ERR("Failed to disable LE scanning: status %d", status);
2798 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002799 }
2800
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002801 switch (hdev->discovery.type) {
2802 case DISCOV_TYPE_LE:
2803 hci_dev_lock(hdev);
2804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2805 hci_dev_unlock(hdev);
2806 break;
2807
2808 case DISCOV_TYPE_INTERLEAVED:
2809 hci_req_init(&req, hdev);
2810
2811 memset(&cp, 0, sizeof(cp));
2812 memcpy(&cp.lap, lap, sizeof(cp.lap));
2813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2815
2816 hci_dev_lock(hdev);
2817
2818 hci_inquiry_cache_flush(hdev);
2819
2820 err = hci_req_run(&req, inquiry_complete);
2821 if (err) {
2822 BT_ERR("Inquiry request failed: err %d", err);
2823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2824 }
2825
2826 hci_dev_unlock(hdev);
2827 break;
2828 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002829}
2830
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002831static void le_scan_disable_work(struct work_struct *work)
2832{
2833 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002834 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002835 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002836 struct hci_request req;
2837 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002838
2839 BT_DBG("%s", hdev->name);
2840
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002841 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002842
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002843 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002844 cp.enable = LE_SCAN_DISABLE;
2845 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002846
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002847 err = hci_req_run(&req, le_scan_disable_work_complete);
2848 if (err)
2849 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002850}
2851
David Herrmann9be0dab2012-04-22 14:39:57 +02002852/* Alloc HCI device */
2853struct hci_dev *hci_alloc_dev(void)
2854{
2855 struct hci_dev *hdev;
2856
2857 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2858 if (!hdev)
2859 return NULL;
2860
David Herrmannb1b813d2012-04-22 14:39:58 +02002861 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2862 hdev->esco_type = (ESCO_HV1);
2863 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002864 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2865 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002866 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2867 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002868
David Herrmannb1b813d2012-04-22 14:39:58 +02002869 hdev->sniff_max_interval = 800;
2870 hdev->sniff_min_interval = 80;
2871
Marcel Holtmannbef64732013-10-11 08:23:19 -07002872 hdev->le_scan_interval = 0x0060;
2873 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002874 hdev->le_conn_min_interval = 0x0028;
2875 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002876
David Herrmannb1b813d2012-04-22 14:39:58 +02002877 mutex_init(&hdev->lock);
2878 mutex_init(&hdev->req_lock);
2879
2880 INIT_LIST_HEAD(&hdev->mgmt_pending);
2881 INIT_LIST_HEAD(&hdev->blacklist);
2882 INIT_LIST_HEAD(&hdev->uuids);
2883 INIT_LIST_HEAD(&hdev->link_keys);
2884 INIT_LIST_HEAD(&hdev->long_term_keys);
2885 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002886 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002887
2888 INIT_WORK(&hdev->rx_work, hci_rx_work);
2889 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2890 INIT_WORK(&hdev->tx_work, hci_tx_work);
2891 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002892
David Herrmannb1b813d2012-04-22 14:39:58 +02002893 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2894 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2895 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2896
David Herrmannb1b813d2012-04-22 14:39:58 +02002897 skb_queue_head_init(&hdev->rx_q);
2898 skb_queue_head_init(&hdev->cmd_q);
2899 skb_queue_head_init(&hdev->raw_q);
2900
2901 init_waitqueue_head(&hdev->req_wait_q);
2902
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002903 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002904
David Herrmannb1b813d2012-04-22 14:39:58 +02002905 hci_init_sysfs(hdev);
2906 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002907
2908 return hdev;
2909}
2910EXPORT_SYMBOL(hci_alloc_dev);
2911
2912/* Free HCI device */
2913void hci_free_dev(struct hci_dev *hdev)
2914{
David Herrmann9be0dab2012-04-22 14:39:57 +02002915 /* will free via device release */
2916 put_device(&hdev->dev);
2917}
2918EXPORT_SYMBOL(hci_free_dev);
2919
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920/* Register HCI device */
2921int hci_register_dev(struct hci_dev *hdev)
2922{
David Herrmannb1b813d2012-04-22 14:39:58 +02002923 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
David Herrmann010666a2012-01-07 15:47:07 +01002925 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 return -EINVAL;
2927
Mat Martineau08add512011-11-02 16:18:36 -07002928 /* Do not allow HCI_AMP devices to register at index 0,
2929 * so the index can be used as the AMP controller ID.
2930 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002931 switch (hdev->dev_type) {
2932 case HCI_BREDR:
2933 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2934 break;
2935 case HCI_AMP:
2936 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2937 break;
2938 default:
2939 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002941
Sasha Levin3df92b32012-05-27 22:36:56 +02002942 if (id < 0)
2943 return id;
2944
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 sprintf(hdev->name, "hci%d", id);
2946 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002947
2948 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2949
Kees Cookd8537542013-07-03 15:04:57 -07002950 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2951 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002952 if (!hdev->workqueue) {
2953 error = -ENOMEM;
2954 goto err;
2955 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002956
Kees Cookd8537542013-07-03 15:04:57 -07002957 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2958 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002959 if (!hdev->req_workqueue) {
2960 destroy_workqueue(hdev->workqueue);
2961 error = -ENOMEM;
2962 goto err;
2963 }
2964
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002965 if (!IS_ERR_OR_NULL(bt_debugfs))
2966 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2967
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002968 dev_set_name(&hdev->dev, "%s", hdev->name);
2969
2970 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002971 if (error < 0)
2972 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002974 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002975 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2976 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002977 if (hdev->rfkill) {
2978 if (rfkill_register(hdev->rfkill) < 0) {
2979 rfkill_destroy(hdev->rfkill);
2980 hdev->rfkill = NULL;
2981 }
2982 }
2983
Johan Hedberg5e130362013-09-13 08:58:17 +03002984 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2985 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002987 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002988 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002989
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002990 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002991 /* Assume BR/EDR support until proven otherwise (such as
2992 * through reading supported features during init.
2993 */
2994 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2995 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002996
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002997 write_lock(&hci_dev_list_lock);
2998 list_add(&hdev->list, &hci_dev_list);
2999 write_unlock(&hci_dev_list_lock);
3000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003002 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
Johan Hedberg19202572013-01-14 22:33:51 +02003004 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003005
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003007
David Herrmann33ca9542011-10-08 14:58:49 +02003008err_wqueue:
3009 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003010 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003011err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003012 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003013
David Herrmann33ca9542011-10-08 14:58:49 +02003014 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015}
3016EXPORT_SYMBOL(hci_register_dev);
3017
3018/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003019void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020{
Sasha Levin3df92b32012-05-27 22:36:56 +02003021 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003022
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003023 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024
Johan Hovold94324962012-03-15 14:48:41 +01003025 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3026
Sasha Levin3df92b32012-05-27 22:36:56 +02003027 id = hdev->id;
3028
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003029 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003031 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032
3033 hci_dev_do_close(hdev);
3034
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303035 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003036 kfree_skb(hdev->reassembly[i]);
3037
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003038 cancel_work_sync(&hdev->power_on);
3039
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003040 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003041 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003042 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003043 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003044 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003045 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003046
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003047 /* mgmt_index_removed should take care of emptying the
3048 * pending list */
3049 BUG_ON(!list_empty(&hdev->mgmt_pending));
3050
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 hci_notify(hdev, HCI_DEV_UNREG);
3052
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003053 if (hdev->rfkill) {
3054 rfkill_unregister(hdev->rfkill);
3055 rfkill_destroy(hdev->rfkill);
3056 }
3057
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003058 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003059
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003060 debugfs_remove_recursive(hdev->debugfs);
3061
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003062 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003063 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003064
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003065 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003066 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003067 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003068 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003069 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003070 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003071 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003072
David Herrmanndc946bd2012-01-07 15:47:24 +01003073 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003074
3075 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076}
3077EXPORT_SYMBOL(hci_unregister_dev);
3078
3079/* Suspend HCI device */
3080int hci_suspend_dev(struct hci_dev *hdev)
3081{
3082 hci_notify(hdev, HCI_DEV_SUSPEND);
3083 return 0;
3084}
3085EXPORT_SYMBOL(hci_suspend_dev);
3086
3087/* Resume HCI device */
3088int hci_resume_dev(struct hci_dev *hdev)
3089{
3090 hci_notify(hdev, HCI_DEV_RESUME);
3091 return 0;
3092}
3093EXPORT_SYMBOL(hci_resume_dev);
3094
Marcel Holtmann76bca882009-11-18 00:40:39 +01003095/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003096int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003097{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003098 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003099 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003100 kfree_skb(skb);
3101 return -ENXIO;
3102 }
3103
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003104 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003105 bt_cb(skb)->incoming = 1;
3106
3107 /* Time stamp */
3108 __net_timestamp(skb);
3109
Marcel Holtmann76bca882009-11-18 00:40:39 +01003110 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003111 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003112
Marcel Holtmann76bca882009-11-18 00:40:39 +01003113 return 0;
3114}
3115EXPORT_SYMBOL(hci_recv_frame);
3116
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303117static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003118 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303119{
3120 int len = 0;
3121 int hlen = 0;
3122 int remain = count;
3123 struct sk_buff *skb;
3124 struct bt_skb_cb *scb;
3125
3126 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003127 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303128 return -EILSEQ;
3129
3130 skb = hdev->reassembly[index];
3131
3132 if (!skb) {
3133 switch (type) {
3134 case HCI_ACLDATA_PKT:
3135 len = HCI_MAX_FRAME_SIZE;
3136 hlen = HCI_ACL_HDR_SIZE;
3137 break;
3138 case HCI_EVENT_PKT:
3139 len = HCI_MAX_EVENT_SIZE;
3140 hlen = HCI_EVENT_HDR_SIZE;
3141 break;
3142 case HCI_SCODATA_PKT:
3143 len = HCI_MAX_SCO_SIZE;
3144 hlen = HCI_SCO_HDR_SIZE;
3145 break;
3146 }
3147
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003148 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303149 if (!skb)
3150 return -ENOMEM;
3151
3152 scb = (void *) skb->cb;
3153 scb->expect = hlen;
3154 scb->pkt_type = type;
3155
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303156 hdev->reassembly[index] = skb;
3157 }
3158
3159 while (count) {
3160 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003161 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303162
3163 memcpy(skb_put(skb, len), data, len);
3164
3165 count -= len;
3166 data += len;
3167 scb->expect -= len;
3168 remain = count;
3169
3170 switch (type) {
3171 case HCI_EVENT_PKT:
3172 if (skb->len == HCI_EVENT_HDR_SIZE) {
3173 struct hci_event_hdr *h = hci_event_hdr(skb);
3174 scb->expect = h->plen;
3175
3176 if (skb_tailroom(skb) < scb->expect) {
3177 kfree_skb(skb);
3178 hdev->reassembly[index] = NULL;
3179 return -ENOMEM;
3180 }
3181 }
3182 break;
3183
3184 case HCI_ACLDATA_PKT:
3185 if (skb->len == HCI_ACL_HDR_SIZE) {
3186 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3187 scb->expect = __le16_to_cpu(h->dlen);
3188
3189 if (skb_tailroom(skb) < scb->expect) {
3190 kfree_skb(skb);
3191 hdev->reassembly[index] = NULL;
3192 return -ENOMEM;
3193 }
3194 }
3195 break;
3196
3197 case HCI_SCODATA_PKT:
3198 if (skb->len == HCI_SCO_HDR_SIZE) {
3199 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3200 scb->expect = h->dlen;
3201
3202 if (skb_tailroom(skb) < scb->expect) {
3203 kfree_skb(skb);
3204 hdev->reassembly[index] = NULL;
3205 return -ENOMEM;
3206 }
3207 }
3208 break;
3209 }
3210
3211 if (scb->expect == 0) {
3212 /* Complete frame */
3213
3214 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003215 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303216
3217 hdev->reassembly[index] = NULL;
3218 return remain;
3219 }
3220 }
3221
3222 return remain;
3223}
3224
Marcel Holtmannef222012007-07-11 06:42:04 +02003225int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3226{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303227 int rem = 0;
3228
Marcel Holtmannef222012007-07-11 06:42:04 +02003229 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3230 return -EILSEQ;
3231
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003232 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003233 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303234 if (rem < 0)
3235 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003236
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303237 data += (count - rem);
3238 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003239 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003240
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303241 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003242}
3243EXPORT_SYMBOL(hci_recv_fragment);
3244
Suraj Sumangala99811512010-07-14 13:02:19 +05303245#define STREAM_REASSEMBLY 0
3246
3247int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3248{
3249 int type;
3250 int rem = 0;
3251
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003252 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303253 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3254
3255 if (!skb) {
3256 struct { char type; } *pkt;
3257
3258 /* Start of the frame */
3259 pkt = data;
3260 type = pkt->type;
3261
3262 data++;
3263 count--;
3264 } else
3265 type = bt_cb(skb)->pkt_type;
3266
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003267 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003268 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303269 if (rem < 0)
3270 return rem;
3271
3272 data += (count - rem);
3273 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003274 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303275
3276 return rem;
3277}
3278EXPORT_SYMBOL(hci_recv_stream_fragment);
3279
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280/* ---- Interface to upper protocols ---- */
3281
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282int hci_register_cb(struct hci_cb *cb)
3283{
3284 BT_DBG("%p name %s", cb, cb->name);
3285
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003286 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003288 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
3290 return 0;
3291}
3292EXPORT_SYMBOL(hci_register_cb);
3293
3294int hci_unregister_cb(struct hci_cb *cb)
3295{
3296 BT_DBG("%p name %s", cb, cb->name);
3297
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003298 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003300 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301
3302 return 0;
3303}
3304EXPORT_SYMBOL(hci_unregister_cb);
3305
Marcel Holtmann51086992013-10-10 14:54:19 -07003306static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003308 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003310 /* Time stamp */
3311 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003313 /* Send copy to monitor */
3314 hci_send_to_monitor(hdev, skb);
3315
3316 if (atomic_read(&hdev->promisc)) {
3317 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003318 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 }
3320
3321 /* Get rid of skb owner, prior to sending to the driver. */
3322 skb_orphan(skb);
3323
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003324 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003325 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326}
3327
Johan Hedberg3119ae92013-03-05 20:37:44 +02003328void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3329{
3330 skb_queue_head_init(&req->cmd_q);
3331 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003332 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003333}
3334
3335int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3336{
3337 struct hci_dev *hdev = req->hdev;
3338 struct sk_buff *skb;
3339 unsigned long flags;
3340
3341 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3342
Andre Guedes5d73e032013-03-08 11:20:16 -03003343 /* If an error occured during request building, remove all HCI
3344 * commands queued on the HCI request queue.
3345 */
3346 if (req->err) {
3347 skb_queue_purge(&req->cmd_q);
3348 return req->err;
3349 }
3350
Johan Hedberg3119ae92013-03-05 20:37:44 +02003351 /* Do not allow empty requests */
3352 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003353 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003354
3355 skb = skb_peek_tail(&req->cmd_q);
3356 bt_cb(skb)->req.complete = complete;
3357
3358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3359 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3360 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3361
3362 queue_work(hdev->workqueue, &hdev->cmd_work);
3363
3364 return 0;
3365}
3366
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003367static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003368 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369{
3370 int len = HCI_COMMAND_HDR_SIZE + plen;
3371 struct hci_command_hdr *hdr;
3372 struct sk_buff *skb;
3373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003375 if (!skb)
3376 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377
3378 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003379 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 hdr->plen = plen;
3381
3382 if (plen)
3383 memcpy(skb_put(skb, plen), param, plen);
3384
3385 BT_DBG("skb len %d", skb->len);
3386
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003387 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003388
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003389 return skb;
3390}
3391
3392/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003393int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3394 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003395{
3396 struct sk_buff *skb;
3397
3398 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3399
3400 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3401 if (!skb) {
3402 BT_ERR("%s no memory for command", hdev->name);
3403 return -ENOMEM;
3404 }
3405
Johan Hedberg11714b32013-03-05 20:37:47 +02003406 /* Stand-alone HCI commands must be flaged as
3407 * single-command requests.
3408 */
3409 bt_cb(skb)->req.start = true;
3410
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003412 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413
3414 return 0;
3415}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416
Johan Hedberg71c76a12013-03-05 20:37:46 +02003417/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003418void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3419 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003420{
3421 struct hci_dev *hdev = req->hdev;
3422 struct sk_buff *skb;
3423
3424 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3425
Andre Guedes34739c12013-03-08 11:20:18 -03003426 /* If an error occured during request building, there is no point in
3427 * queueing the HCI command. We can simply return.
3428 */
3429 if (req->err)
3430 return;
3431
Johan Hedberg71c76a12013-03-05 20:37:46 +02003432 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3433 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003434 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3435 hdev->name, opcode);
3436 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003437 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003438 }
3439
3440 if (skb_queue_empty(&req->cmd_q))
3441 bt_cb(skb)->req.start = true;
3442
Johan Hedberg02350a72013-04-03 21:50:29 +03003443 bt_cb(skb)->req.event = event;
3444
Johan Hedberg71c76a12013-03-05 20:37:46 +02003445 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003446}
3447
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003448void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3449 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003450{
3451 hci_req_add_ev(req, opcode, plen, param, 0);
3452}
3453
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003455void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456{
3457 struct hci_command_hdr *hdr;
3458
3459 if (!hdev->sent_cmd)
3460 return NULL;
3461
3462 hdr = (void *) hdev->sent_cmd->data;
3463
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003464 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 return NULL;
3466
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003467 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468
3469 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3470}
3471
3472/* Send ACL data */
3473static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3474{
3475 struct hci_acl_hdr *hdr;
3476 int len = skb->len;
3477
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003478 skb_push(skb, HCI_ACL_HDR_SIZE);
3479 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003480 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003481 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3482 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483}
3484
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003485static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003486 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003488 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 struct hci_dev *hdev = conn->hdev;
3490 struct sk_buff *list;
3491
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003492 skb->len = skb_headlen(skb);
3493 skb->data_len = 0;
3494
3495 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003496
3497 switch (hdev->dev_type) {
3498 case HCI_BREDR:
3499 hci_add_acl_hdr(skb, conn->handle, flags);
3500 break;
3501 case HCI_AMP:
3502 hci_add_acl_hdr(skb, chan->handle, flags);
3503 break;
3504 default:
3505 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3506 return;
3507 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003508
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003509 list = skb_shinfo(skb)->frag_list;
3510 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 /* Non fragmented */
3512 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3513
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003514 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 } else {
3516 /* Fragmented */
3517 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3518
3519 skb_shinfo(skb)->frag_list = NULL;
3520
3521 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003522 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003524 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003525
3526 flags &= ~ACL_START;
3527 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 do {
3529 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003530
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003531 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003532 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533
3534 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3535
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003536 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 } while (list);
3538
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003539 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003541}
3542
3543void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3544{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003545 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003546
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003547 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003548
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003549 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003551 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553
3554/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003555void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556{
3557 struct hci_dev *hdev = conn->hdev;
3558 struct hci_sco_hdr hdr;
3559
3560 BT_DBG("%s len %d", hdev->name, skb->len);
3561
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003562 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 hdr.dlen = skb->len;
3564
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003565 skb_push(skb, HCI_SCO_HDR_SIZE);
3566 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003567 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003569 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003570
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003572 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574
3575/* ---- HCI TX task (outgoing data) ---- */
3576
3577/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003578static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3579 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580{
3581 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003582 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003583 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003585 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003587
3588 rcu_read_lock();
3589
3590 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003591 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003593
3594 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3595 continue;
3596
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 num++;
3598
3599 if (c->sent < min) {
3600 min = c->sent;
3601 conn = c;
3602 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003603
3604 if (hci_conn_num(hdev, type) == num)
3605 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 }
3607
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003608 rcu_read_unlock();
3609
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003611 int cnt, q;
3612
3613 switch (conn->type) {
3614 case ACL_LINK:
3615 cnt = hdev->acl_cnt;
3616 break;
3617 case SCO_LINK:
3618 case ESCO_LINK:
3619 cnt = hdev->sco_cnt;
3620 break;
3621 case LE_LINK:
3622 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3623 break;
3624 default:
3625 cnt = 0;
3626 BT_ERR("Unknown link type");
3627 }
3628
3629 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 *quote = q ? q : 1;
3631 } else
3632 *quote = 0;
3633
3634 BT_DBG("conn %p quote %d", conn, *quote);
3635 return conn;
3636}
3637
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003638static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639{
3640 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003641 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642
Ville Tervobae1f5d92011-02-10 22:38:53 -03003643 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003645 rcu_read_lock();
3646
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003648 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003649 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003650 BT_ERR("%s killing stalled connection %pMR",
3651 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003652 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 }
3654 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003655
3656 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657}
3658
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003659static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3660 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003661{
3662 struct hci_conn_hash *h = &hdev->conn_hash;
3663 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003664 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003665 struct hci_conn *conn;
3666 int cnt, q, conn_num = 0;
3667
3668 BT_DBG("%s", hdev->name);
3669
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003670 rcu_read_lock();
3671
3672 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003673 struct hci_chan *tmp;
3674
3675 if (conn->type != type)
3676 continue;
3677
3678 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3679 continue;
3680
3681 conn_num++;
3682
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003683 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003684 struct sk_buff *skb;
3685
3686 if (skb_queue_empty(&tmp->data_q))
3687 continue;
3688
3689 skb = skb_peek(&tmp->data_q);
3690 if (skb->priority < cur_prio)
3691 continue;
3692
3693 if (skb->priority > cur_prio) {
3694 num = 0;
3695 min = ~0;
3696 cur_prio = skb->priority;
3697 }
3698
3699 num++;
3700
3701 if (conn->sent < min) {
3702 min = conn->sent;
3703 chan = tmp;
3704 }
3705 }
3706
3707 if (hci_conn_num(hdev, type) == conn_num)
3708 break;
3709 }
3710
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003711 rcu_read_unlock();
3712
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003713 if (!chan)
3714 return NULL;
3715
3716 switch (chan->conn->type) {
3717 case ACL_LINK:
3718 cnt = hdev->acl_cnt;
3719 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003720 case AMP_LINK:
3721 cnt = hdev->block_cnt;
3722 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003723 case SCO_LINK:
3724 case ESCO_LINK:
3725 cnt = hdev->sco_cnt;
3726 break;
3727 case LE_LINK:
3728 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3729 break;
3730 default:
3731 cnt = 0;
3732 BT_ERR("Unknown link type");
3733 }
3734
3735 q = cnt / num;
3736 *quote = q ? q : 1;
3737 BT_DBG("chan %p quote %d", chan, *quote);
3738 return chan;
3739}
3740
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003741static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3742{
3743 struct hci_conn_hash *h = &hdev->conn_hash;
3744 struct hci_conn *conn;
3745 int num = 0;
3746
3747 BT_DBG("%s", hdev->name);
3748
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003749 rcu_read_lock();
3750
3751 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003752 struct hci_chan *chan;
3753
3754 if (conn->type != type)
3755 continue;
3756
3757 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3758 continue;
3759
3760 num++;
3761
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003762 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003763 struct sk_buff *skb;
3764
3765 if (chan->sent) {
3766 chan->sent = 0;
3767 continue;
3768 }
3769
3770 if (skb_queue_empty(&chan->data_q))
3771 continue;
3772
3773 skb = skb_peek(&chan->data_q);
3774 if (skb->priority >= HCI_PRIO_MAX - 1)
3775 continue;
3776
3777 skb->priority = HCI_PRIO_MAX - 1;
3778
3779 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003780 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003781 }
3782
3783 if (hci_conn_num(hdev, type) == num)
3784 break;
3785 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003786
3787 rcu_read_unlock();
3788
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003789}
3790
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003791static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3792{
3793 /* Calculate count of blocks used by this packet */
3794 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3795}
3796
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003797static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 if (!test_bit(HCI_RAW, &hdev->flags)) {
3800 /* ACL tx timeout must be longer than maximum
3801 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003802 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003803 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003804 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003806}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003808static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003809{
3810 unsigned int cnt = hdev->acl_cnt;
3811 struct hci_chan *chan;
3812 struct sk_buff *skb;
3813 int quote;
3814
3815 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003816
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003817 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003818 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003819 u32 priority = (skb_peek(&chan->data_q))->priority;
3820 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003821 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003822 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003823
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003824 /* Stop if priority has changed */
3825 if (skb->priority < priority)
3826 break;
3827
3828 skb = skb_dequeue(&chan->data_q);
3829
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003830 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003831 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003832
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003833 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 hdev->acl_last_tx = jiffies;
3835
3836 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003837 chan->sent++;
3838 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 }
3840 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003841
3842 if (cnt != hdev->acl_cnt)
3843 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844}
3845
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003846static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003847{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003848 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003849 struct hci_chan *chan;
3850 struct sk_buff *skb;
3851 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003852 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003853
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003854 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003855
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003856 BT_DBG("%s", hdev->name);
3857
3858 if (hdev->dev_type == HCI_AMP)
3859 type = AMP_LINK;
3860 else
3861 type = ACL_LINK;
3862
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003863 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003864 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003865 u32 priority = (skb_peek(&chan->data_q))->priority;
3866 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3867 int blocks;
3868
3869 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003870 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003871
3872 /* Stop if priority has changed */
3873 if (skb->priority < priority)
3874 break;
3875
3876 skb = skb_dequeue(&chan->data_q);
3877
3878 blocks = __get_blocks(hdev, skb);
3879 if (blocks > hdev->block_cnt)
3880 return;
3881
3882 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003883 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003884
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003885 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003886 hdev->acl_last_tx = jiffies;
3887
3888 hdev->block_cnt -= blocks;
3889 quote -= blocks;
3890
3891 chan->sent += blocks;
3892 chan->conn->sent += blocks;
3893 }
3894 }
3895
3896 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003897 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003898}
3899
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003900static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003901{
3902 BT_DBG("%s", hdev->name);
3903
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003904 /* No ACL link over BR/EDR controller */
3905 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3906 return;
3907
3908 /* No AMP link over AMP controller */
3909 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003910 return;
3911
3912 switch (hdev->flow_ctl_mode) {
3913 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3914 hci_sched_acl_pkt(hdev);
3915 break;
3916
3917 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3918 hci_sched_acl_blk(hdev);
3919 break;
3920 }
3921}
3922
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003924static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925{
3926 struct hci_conn *conn;
3927 struct sk_buff *skb;
3928 int quote;
3929
3930 BT_DBG("%s", hdev->name);
3931
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003932 if (!hci_conn_num(hdev, SCO_LINK))
3933 return;
3934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3936 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3937 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003938 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939
3940 conn->sent++;
3941 if (conn->sent == ~0)
3942 conn->sent = 0;
3943 }
3944 }
3945}
3946
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003947static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003948{
3949 struct hci_conn *conn;
3950 struct sk_buff *skb;
3951 int quote;
3952
3953 BT_DBG("%s", hdev->name);
3954
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003955 if (!hci_conn_num(hdev, ESCO_LINK))
3956 return;
3957
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003958 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3959 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003960 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3961 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003962 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003963
3964 conn->sent++;
3965 if (conn->sent == ~0)
3966 conn->sent = 0;
3967 }
3968 }
3969}
3970
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003971static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003972{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003973 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003974 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003975 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003976
3977 BT_DBG("%s", hdev->name);
3978
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003979 if (!hci_conn_num(hdev, LE_LINK))
3980 return;
3981
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003982 if (!test_bit(HCI_RAW, &hdev->flags)) {
3983 /* LE tx timeout must be longer than maximum
3984 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003985 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003986 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003987 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003988 }
3989
3990 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003991 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003992 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003993 u32 priority = (skb_peek(&chan->data_q))->priority;
3994 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003995 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003996 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003997
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003998 /* Stop if priority has changed */
3999 if (skb->priority < priority)
4000 break;
4001
4002 skb = skb_dequeue(&chan->data_q);
4003
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004004 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004005 hdev->le_last_tx = jiffies;
4006
4007 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004008 chan->sent++;
4009 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004010 }
4011 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004012
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004013 if (hdev->le_pkts)
4014 hdev->le_cnt = cnt;
4015 else
4016 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004017
4018 if (cnt != tmp)
4019 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004020}
4021
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004022static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004024 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 struct sk_buff *skb;
4026
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004027 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004028 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
Marcel Holtmann52de5992013-09-03 18:08:38 -07004030 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4031 /* Schedule queues and send stuff to HCI driver */
4032 hci_sched_acl(hdev);
4033 hci_sched_sco(hdev);
4034 hci_sched_esco(hdev);
4035 hci_sched_le(hdev);
4036 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004037
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 /* Send next queued raw (unknown type) packet */
4039 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004040 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041}
4042
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004043/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044
4045/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004046static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047{
4048 struct hci_acl_hdr *hdr = (void *) skb->data;
4049 struct hci_conn *conn;
4050 __u16 handle, flags;
4051
4052 skb_pull(skb, HCI_ACL_HDR_SIZE);
4053
4054 handle = __le16_to_cpu(hdr->handle);
4055 flags = hci_flags(handle);
4056 handle = hci_handle(handle);
4057
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004058 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004059 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060
4061 hdev->stat.acl_rx++;
4062
4063 hci_dev_lock(hdev);
4064 conn = hci_conn_hash_lookup_handle(hdev, handle);
4065 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004066
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004068 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004069
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004071 l2cap_recv_acldata(conn, skb, flags);
4072 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004074 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004075 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 }
4077
4078 kfree_skb(skb);
4079}
4080
4081/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004082static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083{
4084 struct hci_sco_hdr *hdr = (void *) skb->data;
4085 struct hci_conn *conn;
4086 __u16 handle;
4087
4088 skb_pull(skb, HCI_SCO_HDR_SIZE);
4089
4090 handle = __le16_to_cpu(hdr->handle);
4091
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004092 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093
4094 hdev->stat.sco_rx++;
4095
4096 hci_dev_lock(hdev);
4097 conn = hci_conn_hash_lookup_handle(hdev, handle);
4098 hci_dev_unlock(hdev);
4099
4100 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004102 sco_recv_scodata(conn, skb);
4103 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004105 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004106 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107 }
4108
4109 kfree_skb(skb);
4110}
4111
Johan Hedberg9238f362013-03-05 20:37:48 +02004112static bool hci_req_is_complete(struct hci_dev *hdev)
4113{
4114 struct sk_buff *skb;
4115
4116 skb = skb_peek(&hdev->cmd_q);
4117 if (!skb)
4118 return true;
4119
4120 return bt_cb(skb)->req.start;
4121}
4122
Johan Hedberg42c6b122013-03-05 20:37:49 +02004123static void hci_resend_last(struct hci_dev *hdev)
4124{
4125 struct hci_command_hdr *sent;
4126 struct sk_buff *skb;
4127 u16 opcode;
4128
4129 if (!hdev->sent_cmd)
4130 return;
4131
4132 sent = (void *) hdev->sent_cmd->data;
4133 opcode = __le16_to_cpu(sent->opcode);
4134 if (opcode == HCI_OP_RESET)
4135 return;
4136
4137 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4138 if (!skb)
4139 return;
4140
4141 skb_queue_head(&hdev->cmd_q, skb);
4142 queue_work(hdev->workqueue, &hdev->cmd_work);
4143}
4144
Johan Hedberg9238f362013-03-05 20:37:48 +02004145void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4146{
4147 hci_req_complete_t req_complete = NULL;
4148 struct sk_buff *skb;
4149 unsigned long flags;
4150
4151 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4152
Johan Hedberg42c6b122013-03-05 20:37:49 +02004153 /* If the completed command doesn't match the last one that was
4154 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004155 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004156 if (!hci_sent_cmd_data(hdev, opcode)) {
4157 /* Some CSR based controllers generate a spontaneous
4158 * reset complete event during init and any pending
4159 * command will never be completed. In such a case we
4160 * need to resend whatever was the last sent
4161 * command.
4162 */
4163 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4164 hci_resend_last(hdev);
4165
Johan Hedberg9238f362013-03-05 20:37:48 +02004166 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004167 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004168
4169 /* If the command succeeded and there's still more commands in
4170 * this request the request is not yet complete.
4171 */
4172 if (!status && !hci_req_is_complete(hdev))
4173 return;
4174
4175 /* If this was the last command in a request the complete
4176 * callback would be found in hdev->sent_cmd instead of the
4177 * command queue (hdev->cmd_q).
4178 */
4179 if (hdev->sent_cmd) {
4180 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004181
4182 if (req_complete) {
4183 /* We must set the complete callback to NULL to
4184 * avoid calling the callback more than once if
4185 * this function gets called again.
4186 */
4187 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4188
Johan Hedberg9238f362013-03-05 20:37:48 +02004189 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004190 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004191 }
4192
4193 /* Remove all pending commands belonging to this request */
4194 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4195 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4196 if (bt_cb(skb)->req.start) {
4197 __skb_queue_head(&hdev->cmd_q, skb);
4198 break;
4199 }
4200
4201 req_complete = bt_cb(skb)->req.complete;
4202 kfree_skb(skb);
4203 }
4204 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4205
4206call_complete:
4207 if (req_complete)
4208 req_complete(hdev, status);
4209}
4210
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004211static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004213 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 struct sk_buff *skb;
4215
4216 BT_DBG("%s", hdev->name);
4217
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004219 /* Send copy to monitor */
4220 hci_send_to_monitor(hdev, skb);
4221
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 if (atomic_read(&hdev->promisc)) {
4223 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004224 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 }
4226
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004227 if (test_bit(HCI_RAW, &hdev->flags) ||
4228 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 kfree_skb(skb);
4230 continue;
4231 }
4232
4233 if (test_bit(HCI_INIT, &hdev->flags)) {
4234 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004235 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 case HCI_ACLDATA_PKT:
4237 case HCI_SCODATA_PKT:
4238 kfree_skb(skb);
4239 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004240 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 }
4242
4243 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004244 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004246 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 hci_event_packet(hdev, skb);
4248 break;
4249
4250 case HCI_ACLDATA_PKT:
4251 BT_DBG("%s ACL data packet", hdev->name);
4252 hci_acldata_packet(hdev, skb);
4253 break;
4254
4255 case HCI_SCODATA_PKT:
4256 BT_DBG("%s SCO data packet", hdev->name);
4257 hci_scodata_packet(hdev, skb);
4258 break;
4259
4260 default:
4261 kfree_skb(skb);
4262 break;
4263 }
4264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265}
4266
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004267static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004269 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 struct sk_buff *skb;
4271
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004272 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4273 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004276 if (atomic_read(&hdev->cmd_cnt)) {
4277 skb = skb_dequeue(&hdev->cmd_q);
4278 if (!skb)
4279 return;
4280
Wei Yongjun7585b972009-02-25 18:29:52 +08004281 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004283 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004284 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004286 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004287 if (test_bit(HCI_RESET, &hdev->flags))
4288 del_timer(&hdev->cmd_timer);
4289 else
4290 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004291 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 } else {
4293 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004294 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 }
4296 }
4297}