blob: b3b619a448b549f31c807da386d84acaac98e6aa [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700426 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700454 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700482 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
Marcel Holtmann92202182013-10-18 16:38:10 -0700525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700591 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700619 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
Jukka Rissanen89863102013-12-11 17:05:38 +0200639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682/* ---- HCI requests ---- */
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
692 }
693}
694
695static void hci_req_cancel(struct hci_dev *hdev, int err)
696{
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
703 }
704}
705
Fengguang Wu77a63e02013-04-20 16:24:31 +0300706static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300708{
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
712
713 hci_dev_lock(hdev);
714
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
717
718 hci_dev_unlock(hdev);
719
720 if (!skb)
721 return ERR_PTR(-ENODATA);
722
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
726 }
727
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
735 }
736
Johan Hedberg75e84b72013-04-02 13:35:04 +0300737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
740 }
741
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
745 }
746
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
749
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
752
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
755
756failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
759}
760
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300761struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300762 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300763{
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
767
768 BT_DBG("%s", hdev->name);
769
770 hci_req_init(&req, hdev);
771
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300772 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300773
774 hdev->req_status = HCI_REQ_PEND;
775
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
779
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
782
783 schedule_timeout(timeout);
784
785 remove_wait_queue(&hdev->req_wait_q, &wait);
786
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
789
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
794
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
798
799 default:
800 err = -ETIMEDOUT;
801 break;
802 }
803
804 hdev->req_status = hdev->req_result = 0;
805
806 BT_DBG("%s end: err %d", hdev->name, err);
807
808 if (err < 0)
809 return ERR_PTR(err);
810
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300811 return hci_get_cmd_complete(hdev, opcode, event);
812}
813EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300816 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300817{
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819}
820EXPORT_SYMBOL(__hci_cmd_sync);
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200823static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200824 void (*func)(struct hci_request *req,
825 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200826 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200828 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
831
832 BT_DBG("%s start", hdev->name);
833
Johan Hedberg42c6b122013-03-05 20:37:49 +0200834 hci_req_init(&req, hdev);
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 hdev->req_status = HCI_REQ_PEND;
837
Johan Hedberg42c6b122013-03-05 20:37:49 +0200838 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200839
Johan Hedberg42c6b122013-03-05 20:37:49 +0200840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200842 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300843
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200848 */
Andre Guedes920c8302013-03-08 11:20:15 -0300849 if (err == -ENODATA)
850 return 0;
851
852 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200853 }
854
Andre Guedesbc4445c2013-03-08 11:20:13 -0300855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 schedule_timeout(timeout);
859
860 remove_wait_queue(&hdev->req_wait_q, &wait);
861
862 if (signal_pending(current))
863 return -EINTR;
864
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700867 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 break;
869
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
873
874 default:
875 err = -ETIMEDOUT;
876 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Johan Hedberga5040ef2011-01-10 13:28:59 +0200879 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 BT_DBG("%s end: err %d", hdev->name, err);
882
883 return err;
884}
885
Johan Hedberg01178cd2013-03-05 20:37:41 +0200886static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 void (*req)(struct hci_request *req,
888 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200889 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890{
891 int ret;
892
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 /* Serialize all requests */
897 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200898 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 hci_req_unlock(hdev);
900
901 return ret;
902}
903
Johan Hedberg42c6b122013-03-05 20:37:49 +0200904static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200906 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200920 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200922
923 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200928{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200930
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200931 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300933
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300940 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300942
943 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700945
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200956
957 BT_DBG("%s %ld", hdev->name, opt);
958
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300962
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200963 switch (hdev->dev_type) {
964 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200965 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200966 break;
967
968 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200970 break;
971
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
975 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200976}
977
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200979{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700980 struct hci_dev *hdev = req->hdev;
981
Johan Hedberg2177bab2013-03-05 20:37:43 +0200982 __le16 param;
983 __u8 flt_type;
984
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200987
988 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200990
991 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200993
994 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200996
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
Johan Hedberg2177bab2013-03-05 20:37:43 +02001003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001006
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001010
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1013 */
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001018}
1019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001021{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001022 struct hci_dev *hdev = req->hdev;
1023
Johan Hedberg2177bab2013-03-05 20:37:43 +02001024 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001026
1027 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001029
1030 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001032
1033 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001035
1036 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001038
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042}
1043
1044static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045{
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1048
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1051
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1055
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1063 }
1064
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1068
1069 return 0x00;
1070}
1071
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073{
1074 u8 mode;
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001077
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001079}
1080
Johan Hedberg42c6b122013-03-05 20:37:49 +02001081static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 struct hci_dev *hdev = req->hdev;
1084
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1088 */
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1093 */
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1096
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001115 }
1116
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1122
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1146 */
1147 }
1148
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001153
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159 }
1160}
1161
Johan Hedberg42c6b122013-03-05 20:37:49 +02001162static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001163{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001164 struct hci_dev *hdev = req->hdev;
1165
Johan Hedberg2177bab2013-03-05 20:37:43 +02001166 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001170
1171 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001173
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001175
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1178 */
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001181
1182 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1188 */
1189 hdev->max_page = 0x01;
1190
Johan Hedberg2177bab2013-03-05 20:37:43 +02001191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001195 } else {
1196 struct hci_cp_write_eir cp;
1197
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202 }
1203 }
1204
1205 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001207
1208 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1213
1214 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217 }
1218
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223 }
1224}
1225
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1231
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1240
1241 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 struct hci_cp_write_le_host_supported cp;
1249
Johan Hedbergc73eee92013-04-19 18:35:21 +03001250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1253
Johan Hedberg2177bab2013-03-05 20:37:43 +02001254 memset(&cp, 0, sizeof(cp));
1255
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1259 }
1260
1261 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001264}
1265
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001266static void hci_set_event_mask_page_2(struct hci_request *req)
1267{
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1273 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001274 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1279 }
1280
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1283 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001284 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1289 }
1290
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001291 /* Enable Authenticated Payload Timeout Expired event if supported */
1292 if (lmp_ping_capable(hdev))
1293 events[2] |= 0x80;
1294
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001295 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1296}
1297
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001301 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001303 /* Some Broadcom based Bluetooth controllers do not support the
1304 * Delete Stored Link Key command. They are clearly indicating its
1305 * absence in the bit mask of supported commands.
1306 *
1307 * Check the supported commands and only if the the command is marked
1308 * as supported send it. If not supported assume that the controller
1309 * does not have actual support for stored link keys which makes this
1310 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001311 *
1312 * Some controllers indicate that they support handling deleting
1313 * stored link keys, but they don't. The quirk lets a driver
1314 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001315 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001316 if (hdev->commands[6] & 0x80 &&
1317 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001318 struct hci_cp_delete_stored_link_key cp;
1319
1320 bacpy(&cp.bdaddr, BDADDR_ANY);
1321 cp.delete_all = 0x01;
1322 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1323 sizeof(cp), &cp);
1324 }
1325
Johan Hedberg2177bab2013-03-05 20:37:43 +02001326 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann79830f62013-10-18 16:38:09 -07001329 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001330 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1331 /* If the controller has a public BD_ADDR, then
1332 * by default use that one. If this is a LE only
1333 * controller without a public address, default
1334 * to the random address.
1335 */
1336 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1337 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1338 else
1339 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1340 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001341
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001343 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001344
1345 /* Read features beyond page 1 if available */
1346 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1347 struct hci_cp_read_local_ext_features cp;
1348
1349 cp.page = p;
1350 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1351 sizeof(cp), &cp);
1352 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353}
1354
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001355static void hci_init4_req(struct hci_request *req, unsigned long opt)
1356{
1357 struct hci_dev *hdev = req->hdev;
1358
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001359 /* Set event mask page 2 if the HCI command for it is supported */
1360 if (hdev->commands[22] & 0x04)
1361 hci_set_event_mask_page_2(req);
1362
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001363 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001364 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001365 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001366
1367 /* Enable Secure Connections if supported and configured */
1368 if (lmp_sc_capable(hdev) &&
1369 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1370 u8 support = 0x01;
1371 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1372 sizeof(support), &support);
1373 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001374}
1375
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376static int __hci_init(struct hci_dev *hdev)
1377{
1378 int err;
1379
1380 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1381 if (err < 0)
1382 return err;
1383
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001384 /* The Device Under Test (DUT) mode is special and available for
1385 * all controller types. So just create it early on.
1386 */
1387 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1388 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1389 &dut_mode_fops);
1390 }
1391
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1393 * BR/EDR/LE type controllers. AMP controllers only need the
1394 * first stage init.
1395 */
1396 if (hdev->dev_type != HCI_BREDR)
1397 return 0;
1398
1399 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1400 if (err < 0)
1401 return err;
1402
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001403 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1404 if (err < 0)
1405 return err;
1406
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001407 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1408 if (err < 0)
1409 return err;
1410
1411 /* Only create debugfs entries during the initial setup
1412 * phase and not every time the controller gets powered on.
1413 */
1414 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1415 return 0;
1416
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001417 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1418 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001419 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1420 &hdev->manufacturer);
1421 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1422 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001423 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1424 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001425 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1426
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001427 if (lmp_bredr_capable(hdev)) {
1428 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1429 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001430 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1431 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001432 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1433 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001434 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1435 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001436 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1437 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001438 }
1439
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001440 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001441 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1442 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001443 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1444 hdev, &ssp_debug_mode_fops);
1445 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001446
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001447 if (lmp_sniff_capable(hdev)) {
1448 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1449 hdev, &idle_timeout_fops);
1450 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1451 hdev, &sniff_min_interval_fops);
1452 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1453 hdev, &sniff_max_interval_fops);
1454 }
1455
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001456 if (lmp_le_capable(hdev)) {
1457 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1458 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001459 debugfs_create_file("static_address", 0444, hdev->debugfs,
1460 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001461 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1462 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001463 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1464 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001465 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1466 hdev, &conn_min_interval_fops);
1467 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1468 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001469 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1470 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001471 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001472
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001473 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474}
1475
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477{
1478 __u8 scan = opt;
1479
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001483 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484}
1485
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
1488 __u8 auth = opt;
1489
Johan Hedberg42c6b122013-03-05 20:37:49 +02001490 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001493 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494}
1495
Johan Hedberg42c6b122013-03-05 20:37:49 +02001496static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497{
1498 __u8 encrypt = opt;
1499
Johan Hedberg42c6b122013-03-05 20:37:49 +02001500 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001502 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001503 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504}
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001507{
1508 __le16 policy = cpu_to_le16(opt);
1509
Johan Hedberg42c6b122013-03-05 20:37:49 +02001510 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001511
1512 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001514}
1515
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001516/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 * Device is held on return. */
1518struct hci_dev *hci_dev_get(int index)
1519{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001520 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 BT_DBG("%d", index);
1523
1524 if (index < 0)
1525 return NULL;
1526
1527 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001528 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (d->id == index) {
1530 hdev = hci_dev_hold(d);
1531 break;
1532 }
1533 }
1534 read_unlock(&hci_dev_list_lock);
1535 return hdev;
1536}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001539
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001540bool hci_discovery_active(struct hci_dev *hdev)
1541{
1542 struct discovery_state *discov = &hdev->discovery;
1543
Andre Guedes6fbe1952012-02-03 17:47:58 -03001544 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001545 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001546 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001547 return true;
1548
Andre Guedes6fbe1952012-02-03 17:47:58 -03001549 default:
1550 return false;
1551 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001552}
1553
Johan Hedbergff9ef572012-01-04 14:23:45 +02001554void hci_discovery_set_state(struct hci_dev *hdev, int state)
1555{
1556 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1557
1558 if (hdev->discovery.state == state)
1559 return;
1560
1561 switch (state) {
1562 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001563 if (hdev->discovery.state != DISCOVERY_STARTING)
1564 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001565 break;
1566 case DISCOVERY_STARTING:
1567 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001568 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001569 mgmt_discovering(hdev, 1);
1570 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001571 case DISCOVERY_RESOLVING:
1572 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001573 case DISCOVERY_STOPPING:
1574 break;
1575 }
1576
1577 hdev->discovery.state = state;
1578}
1579
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001580void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581{
Johan Hedberg30883512012-01-04 14:16:21 +02001582 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001583 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Johan Hedberg561aafb2012-01-04 13:31:59 +02001585 list_for_each_entry_safe(p, n, &cache->all, all) {
1586 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001587 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001589
1590 INIT_LIST_HEAD(&cache->unknown);
1591 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592}
1593
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001594struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1595 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596{
Johan Hedberg30883512012-01-04 14:16:21 +02001597 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 struct inquiry_entry *e;
1599
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001600 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
Johan Hedberg561aafb2012-01-04 13:31:59 +02001602 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001604 return e;
1605 }
1606
1607 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
1609
Johan Hedberg561aafb2012-01-04 13:31:59 +02001610struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001611 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001612{
Johan Hedberg30883512012-01-04 14:16:21 +02001613 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001614 struct inquiry_entry *e;
1615
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001616 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001617
1618 list_for_each_entry(e, &cache->unknown, list) {
1619 if (!bacmp(&e->data.bdaddr, bdaddr))
1620 return e;
1621 }
1622
1623 return NULL;
1624}
1625
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001626struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001627 bdaddr_t *bdaddr,
1628 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001629{
1630 struct discovery_state *cache = &hdev->discovery;
1631 struct inquiry_entry *e;
1632
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001633 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001634
1635 list_for_each_entry(e, &cache->resolve, list) {
1636 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1637 return e;
1638 if (!bacmp(&e->data.bdaddr, bdaddr))
1639 return e;
1640 }
1641
1642 return NULL;
1643}
1644
Johan Hedberga3d4e202012-01-09 00:53:02 +02001645void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001646 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001647{
1648 struct discovery_state *cache = &hdev->discovery;
1649 struct list_head *pos = &cache->resolve;
1650 struct inquiry_entry *p;
1651
1652 list_del(&ie->list);
1653
1654 list_for_each_entry(p, &cache->resolve, list) {
1655 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001656 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001657 break;
1658 pos = &p->list;
1659 }
1660
1661 list_add(&ie->list, pos);
1662}
1663
Johan Hedberg31754052012-01-04 13:39:52 +02001664bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001665 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666{
Johan Hedberg30883512012-01-04 14:16:21 +02001667 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001668 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001670 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
Szymon Janc2b2fec42012-11-20 11:38:54 +01001672 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1673
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001674 if (ssp)
1675 *ssp = data->ssp_mode;
1676
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001677 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001678 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001679 if (ie->data.ssp_mode && ssp)
1680 *ssp = true;
1681
Johan Hedberga3d4e202012-01-09 00:53:02 +02001682 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001683 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001684 ie->data.rssi = data->rssi;
1685 hci_inquiry_cache_update_resolve(hdev, ie);
1686 }
1687
Johan Hedberg561aafb2012-01-04 13:31:59 +02001688 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001689 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001690
Johan Hedberg561aafb2012-01-04 13:31:59 +02001691 /* Entry not in the cache. Add new one. */
1692 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1693 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001694 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001695
1696 list_add(&ie->all, &cache->all);
1697
1698 if (name_known) {
1699 ie->name_state = NAME_KNOWN;
1700 } else {
1701 ie->name_state = NAME_NOT_KNOWN;
1702 list_add(&ie->list, &cache->unknown);
1703 }
1704
1705update:
1706 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001707 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001708 ie->name_state = NAME_KNOWN;
1709 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 }
1711
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001712 memcpy(&ie->data, data, sizeof(*data));
1713 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001715
1716 if (ie->name_state == NAME_NOT_KNOWN)
1717 return false;
1718
1719 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720}
1721
1722static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1723{
Johan Hedberg30883512012-01-04 14:16:21 +02001724 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 struct inquiry_info *info = (struct inquiry_info *) buf;
1726 struct inquiry_entry *e;
1727 int copied = 0;
1728
Johan Hedberg561aafb2012-01-04 13:31:59 +02001729 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001731
1732 if (copied >= num)
1733 break;
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 bacpy(&info->bdaddr, &data->bdaddr);
1736 info->pscan_rep_mode = data->pscan_rep_mode;
1737 info->pscan_period_mode = data->pscan_period_mode;
1738 info->pscan_mode = data->pscan_mode;
1739 memcpy(info->dev_class, data->dev_class, 3);
1740 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001743 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 }
1745
1746 BT_DBG("cache %p, copied %d", cache, copied);
1747 return copied;
1748}
1749
Johan Hedberg42c6b122013-03-05 20:37:49 +02001750static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751{
1752 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001753 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 struct hci_cp_inquiry cp;
1755
1756 BT_DBG("%s", hdev->name);
1757
1758 if (test_bit(HCI_INQUIRY, &hdev->flags))
1759 return;
1760
1761 /* Start Inquiry */
1762 memcpy(&cp.lap, &ir->lap, 3);
1763 cp.length = ir->length;
1764 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001765 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766}
1767
Andre Guedes3e13fa12013-03-27 20:04:56 -03001768static int wait_inquiry(void *word)
1769{
1770 schedule();
1771 return signal_pending(current);
1772}
1773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774int hci_inquiry(void __user *arg)
1775{
1776 __u8 __user *ptr = arg;
1777 struct hci_inquiry_req ir;
1778 struct hci_dev *hdev;
1779 int err = 0, do_inquiry = 0, max_rsp;
1780 long timeo;
1781 __u8 *buf;
1782
1783 if (copy_from_user(&ir, ptr, sizeof(ir)))
1784 return -EFAULT;
1785
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001786 hdev = hci_dev_get(ir.dev_id);
1787 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 return -ENODEV;
1789
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001790 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1791 err = -EBUSY;
1792 goto done;
1793 }
1794
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001795 if (hdev->dev_type != HCI_BREDR) {
1796 err = -EOPNOTSUPP;
1797 goto done;
1798 }
1799
Johan Hedberg56f87902013-10-02 13:43:13 +03001800 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1801 err = -EOPNOTSUPP;
1802 goto done;
1803 }
1804
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001805 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001806 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001807 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001808 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 do_inquiry = 1;
1810 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001811 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Marcel Holtmann04837f62006-07-03 10:02:33 +02001813 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001814
1815 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001816 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1817 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001818 if (err < 0)
1819 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001820
1821 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1822 * cleared). If it is interrupted by a signal, return -EINTR.
1823 */
1824 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1825 TASK_INTERRUPTIBLE))
1826 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001827 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001829 /* for unlimited number of responses we will use buffer with
1830 * 255 entries
1831 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1833
1834 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1835 * copy it to the user space.
1836 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001837 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001838 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 err = -ENOMEM;
1840 goto done;
1841 }
1842
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001843 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001845 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
1847 BT_DBG("num_rsp %d", ir.num_rsp);
1848
1849 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1850 ptr += sizeof(ir);
1851 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001852 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001854 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 err = -EFAULT;
1856
1857 kfree(buf);
1858
1859done:
1860 hci_dev_put(hdev);
1861 return err;
1862}
1863
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001864static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 int ret = 0;
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 BT_DBG("%s %p", hdev->name, hdev);
1869
1870 hci_req_lock(hdev);
1871
Johan Hovold94324962012-03-15 14:48:41 +01001872 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1873 ret = -ENODEV;
1874 goto done;
1875 }
1876
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001877 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1878 /* Check for rfkill but allow the HCI setup stage to
1879 * proceed (which in itself doesn't cause any RF activity).
1880 */
1881 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1882 ret = -ERFKILL;
1883 goto done;
1884 }
1885
1886 /* Check for valid public address or a configured static
1887 * random adddress, but let the HCI setup proceed to
1888 * be able to determine if there is a public address
1889 * or not.
1890 *
1891 * This check is only valid for BR/EDR controllers
1892 * since AMP controllers do not have an address.
1893 */
1894 if (hdev->dev_type == HCI_BREDR &&
1895 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1896 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1897 ret = -EADDRNOTAVAIL;
1898 goto done;
1899 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001900 }
1901
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 if (test_bit(HCI_UP, &hdev->flags)) {
1903 ret = -EALREADY;
1904 goto done;
1905 }
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 if (hdev->open(hdev)) {
1908 ret = -EIO;
1909 goto done;
1910 }
1911
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001912 atomic_set(&hdev->cmd_cnt, 1);
1913 set_bit(HCI_INIT, &hdev->flags);
1914
1915 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1916 ret = hdev->setup(hdev);
1917
1918 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001919 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1920 set_bit(HCI_RAW, &hdev->flags);
1921
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001922 if (!test_bit(HCI_RAW, &hdev->flags) &&
1923 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001924 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 }
1926
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001927 clear_bit(HCI_INIT, &hdev->flags);
1928
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 if (!ret) {
1930 hci_dev_hold(hdev);
1931 set_bit(HCI_UP, &hdev->flags);
1932 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001933 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001934 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001935 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001936 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001937 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001938 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001939 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001940 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001942 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001943 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001944 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
1946 skb_queue_purge(&hdev->cmd_q);
1947 skb_queue_purge(&hdev->rx_q);
1948
1949 if (hdev->flush)
1950 hdev->flush(hdev);
1951
1952 if (hdev->sent_cmd) {
1953 kfree_skb(hdev->sent_cmd);
1954 hdev->sent_cmd = NULL;
1955 }
1956
1957 hdev->close(hdev);
1958 hdev->flags = 0;
1959 }
1960
1961done:
1962 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 return ret;
1964}
1965
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001966/* ---- HCI ioctl helpers ---- */
1967
1968int hci_dev_open(__u16 dev)
1969{
1970 struct hci_dev *hdev;
1971 int err;
1972
1973 hdev = hci_dev_get(dev);
1974 if (!hdev)
1975 return -ENODEV;
1976
Johan Hedberge1d08f42013-10-01 22:44:50 +03001977 /* We need to ensure that no other power on/off work is pending
1978 * before proceeding to call hci_dev_do_open. This is
1979 * particularly important if the setup procedure has not yet
1980 * completed.
1981 */
1982 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1983 cancel_delayed_work(&hdev->power_off);
1984
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001985 /* After this call it is guaranteed that the setup procedure
1986 * has finished. This means that error conditions like RFKILL
1987 * or no valid public or static random address apply.
1988 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001989 flush_workqueue(hdev->req_workqueue);
1990
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001991 err = hci_dev_do_open(hdev);
1992
1993 hci_dev_put(hdev);
1994
1995 return err;
1996}
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998static int hci_dev_do_close(struct hci_dev *hdev)
1999{
2000 BT_DBG("%s %p", hdev->name, hdev);
2001
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002002 cancel_delayed_work(&hdev->power_off);
2003
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 hci_req_cancel(hdev, ENODEV);
2005 hci_req_lock(hdev);
2006
2007 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002008 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 hci_req_unlock(hdev);
2010 return 0;
2011 }
2012
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002013 /* Flush RX and TX works */
2014 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002015 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002017 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002018 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002019 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002020 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002021 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002022 }
2023
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002024 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002025 cancel_delayed_work(&hdev->service_cache);
2026
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002027 cancel_delayed_work_sync(&hdev->le_scan_disable);
2028
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002029 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002030 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002032 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 hci_notify(hdev, HCI_DEV_DOWN);
2035
2036 if (hdev->flush)
2037 hdev->flush(hdev);
2038
2039 /* Reset device */
2040 skb_queue_purge(&hdev->cmd_q);
2041 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002042 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002043 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002044 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002046 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 clear_bit(HCI_INIT, &hdev->flags);
2048 }
2049
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002050 /* flush cmd work */
2051 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
2053 /* Drop queues */
2054 skb_queue_purge(&hdev->rx_q);
2055 skb_queue_purge(&hdev->cmd_q);
2056 skb_queue_purge(&hdev->raw_q);
2057
2058 /* Drop last sent command */
2059 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002060 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 kfree_skb(hdev->sent_cmd);
2062 hdev->sent_cmd = NULL;
2063 }
2064
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002065 kfree_skb(hdev->recv_evt);
2066 hdev->recv_evt = NULL;
2067
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 /* After this point our queues are empty
2069 * and no tasks are scheduled. */
2070 hdev->close(hdev);
2071
Johan Hedberg35b973c2013-03-15 17:06:59 -05002072 /* Clear flags */
2073 hdev->flags = 0;
2074 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2075
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002076 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2077 if (hdev->dev_type == HCI_BREDR) {
2078 hci_dev_lock(hdev);
2079 mgmt_powered(hdev, 0);
2080 hci_dev_unlock(hdev);
2081 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002082 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002083
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002084 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002085 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002086
Johan Hedberge59fda82012-02-22 18:11:53 +02002087 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002088 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002089
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 hci_req_unlock(hdev);
2091
2092 hci_dev_put(hdev);
2093 return 0;
2094}
2095
2096int hci_dev_close(__u16 dev)
2097{
2098 struct hci_dev *hdev;
2099 int err;
2100
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002101 hdev = hci_dev_get(dev);
2102 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002104
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002105 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2106 err = -EBUSY;
2107 goto done;
2108 }
2109
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002110 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2111 cancel_delayed_work(&hdev->power_off);
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002114
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002115done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 hci_dev_put(hdev);
2117 return err;
2118}
2119
2120int hci_dev_reset(__u16 dev)
2121{
2122 struct hci_dev *hdev;
2123 int ret = 0;
2124
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002125 hdev = hci_dev_get(dev);
2126 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 return -ENODEV;
2128
2129 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Marcel Holtmann808a0492013-08-26 20:57:58 -07002131 if (!test_bit(HCI_UP, &hdev->flags)) {
2132 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002136 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2137 ret = -EBUSY;
2138 goto done;
2139 }
2140
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 /* Drop queues */
2142 skb_queue_purge(&hdev->rx_q);
2143 skb_queue_purge(&hdev->cmd_q);
2144
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002145 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002146 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002148 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150 if (hdev->flush)
2151 hdev->flush(hdev);
2152
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002153 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002154 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002157 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 hci_req_unlock(hdev);
2161 hci_dev_put(hdev);
2162 return ret;
2163}
2164
2165int hci_dev_reset_stat(__u16 dev)
2166{
2167 struct hci_dev *hdev;
2168 int ret = 0;
2169
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002170 hdev = hci_dev_get(dev);
2171 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 return -ENODEV;
2173
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002174 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2175 ret = -EBUSY;
2176 goto done;
2177 }
2178
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2180
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002181done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 return ret;
2184}
2185
2186int hci_dev_cmd(unsigned int cmd, void __user *arg)
2187{
2188 struct hci_dev *hdev;
2189 struct hci_dev_req dr;
2190 int err = 0;
2191
2192 if (copy_from_user(&dr, arg, sizeof(dr)))
2193 return -EFAULT;
2194
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002195 hdev = hci_dev_get(dr.dev_id);
2196 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 return -ENODEV;
2198
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002199 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2200 err = -EBUSY;
2201 goto done;
2202 }
2203
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002204 if (hdev->dev_type != HCI_BREDR) {
2205 err = -EOPNOTSUPP;
2206 goto done;
2207 }
2208
Johan Hedberg56f87902013-10-02 13:43:13 +03002209 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2210 err = -EOPNOTSUPP;
2211 goto done;
2212 }
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 switch (cmd) {
2215 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002216 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2217 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 break;
2219
2220 case HCISETENCRYPT:
2221 if (!lmp_encrypt_capable(hdev)) {
2222 err = -EOPNOTSUPP;
2223 break;
2224 }
2225
2226 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2227 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002228 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2229 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 if (err)
2231 break;
2232 }
2233
Johan Hedberg01178cd2013-03-05 20:37:41 +02002234 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2235 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 break;
2237
2238 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002239 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2240 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 break;
2242
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002243 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002244 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2245 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002246 break;
2247
2248 case HCISETLINKMODE:
2249 hdev->link_mode = ((__u16) dr.dev_opt) &
2250 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2251 break;
2252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 case HCISETPTYPE:
2254 hdev->pkt_type = (__u16) dr.dev_opt;
2255 break;
2256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002258 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2259 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 break;
2261
2262 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002263 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2264 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 break;
2266
2267 default:
2268 err = -EINVAL;
2269 break;
2270 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002271
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002272done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 hci_dev_put(hdev);
2274 return err;
2275}
2276
2277int hci_get_dev_list(void __user *arg)
2278{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002279 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 struct hci_dev_list_req *dl;
2281 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 int n = 0, size, err;
2283 __u16 dev_num;
2284
2285 if (get_user(dev_num, (__u16 __user *) arg))
2286 return -EFAULT;
2287
2288 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2289 return -EINVAL;
2290
2291 size = sizeof(*dl) + dev_num * sizeof(*dr);
2292
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002293 dl = kzalloc(size, GFP_KERNEL);
2294 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 return -ENOMEM;
2296
2297 dr = dl->dev_req;
2298
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002299 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002300 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002301 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002302 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002303
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002304 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2305 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 (dr + n)->dev_id = hdev->id;
2308 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 if (++n >= dev_num)
2311 break;
2312 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002313 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314
2315 dl->dev_num = n;
2316 size = sizeof(*dl) + n * sizeof(*dr);
2317
2318 err = copy_to_user(arg, dl, size);
2319 kfree(dl);
2320
2321 return err ? -EFAULT : 0;
2322}
2323
2324int hci_get_dev_info(void __user *arg)
2325{
2326 struct hci_dev *hdev;
2327 struct hci_dev_info di;
2328 int err = 0;
2329
2330 if (copy_from_user(&di, arg, sizeof(di)))
2331 return -EFAULT;
2332
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002333 hdev = hci_dev_get(di.dev_id);
2334 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 return -ENODEV;
2336
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002337 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002338 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002339
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002340 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2341 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 strcpy(di.name, hdev->name);
2344 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002345 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 di.flags = hdev->flags;
2347 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002348 if (lmp_bredr_capable(hdev)) {
2349 di.acl_mtu = hdev->acl_mtu;
2350 di.acl_pkts = hdev->acl_pkts;
2351 di.sco_mtu = hdev->sco_mtu;
2352 di.sco_pkts = hdev->sco_pkts;
2353 } else {
2354 di.acl_mtu = hdev->le_mtu;
2355 di.acl_pkts = hdev->le_pkts;
2356 di.sco_mtu = 0;
2357 di.sco_pkts = 0;
2358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 di.link_policy = hdev->link_policy;
2360 di.link_mode = hdev->link_mode;
2361
2362 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2363 memcpy(&di.features, &hdev->features, sizeof(di.features));
2364
2365 if (copy_to_user(arg, &di, sizeof(di)))
2366 err = -EFAULT;
2367
2368 hci_dev_put(hdev);
2369
2370 return err;
2371}
2372
2373/* ---- Interface to HCI drivers ---- */
2374
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002375static int hci_rfkill_set_block(void *data, bool blocked)
2376{
2377 struct hci_dev *hdev = data;
2378
2379 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2380
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002381 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2382 return -EBUSY;
2383
Johan Hedberg5e130362013-09-13 08:58:17 +03002384 if (blocked) {
2385 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002386 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2387 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002388 } else {
2389 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002390 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002391
2392 return 0;
2393}
2394
2395static const struct rfkill_ops hci_rfkill_ops = {
2396 .set_block = hci_rfkill_set_block,
2397};
2398
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002399static void hci_power_on(struct work_struct *work)
2400{
2401 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002402 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002403
2404 BT_DBG("%s", hdev->name);
2405
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002406 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002407 if (err < 0) {
2408 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002409 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002410 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002411
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002412 /* During the HCI setup phase, a few error conditions are
2413 * ignored and they need to be checked now. If they are still
2414 * valid, it is important to turn the device back off.
2415 */
2416 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2417 (hdev->dev_type == HCI_BREDR &&
2418 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2419 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002420 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2421 hci_dev_do_close(hdev);
2422 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002423 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2424 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002425 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002426
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002427 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002428 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002429}
2430
2431static void hci_power_off(struct work_struct *work)
2432{
Johan Hedberg32435532011-11-07 22:16:04 +02002433 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002434 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002435
2436 BT_DBG("%s", hdev->name);
2437
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002438 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002439}
2440
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002441static void hci_discov_off(struct work_struct *work)
2442{
2443 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002444
2445 hdev = container_of(work, struct hci_dev, discov_off.work);
2446
2447 BT_DBG("%s", hdev->name);
2448
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002449 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002450}
2451
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002452int hci_uuids_clear(struct hci_dev *hdev)
2453{
Johan Hedberg48210022013-01-27 00:31:28 +02002454 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002455
Johan Hedberg48210022013-01-27 00:31:28 +02002456 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2457 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002458 kfree(uuid);
2459 }
2460
2461 return 0;
2462}
2463
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002464int hci_link_keys_clear(struct hci_dev *hdev)
2465{
2466 struct list_head *p, *n;
2467
2468 list_for_each_safe(p, n, &hdev->link_keys) {
2469 struct link_key *key;
2470
2471 key = list_entry(p, struct link_key, list);
2472
2473 list_del(p);
2474 kfree(key);
2475 }
2476
2477 return 0;
2478}
2479
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002480int hci_smp_ltks_clear(struct hci_dev *hdev)
2481{
2482 struct smp_ltk *k, *tmp;
2483
2484 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2485 list_del(&k->list);
2486 kfree(k);
2487 }
2488
2489 return 0;
2490}
2491
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002492struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2493{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002494 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002495
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002496 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002497 if (bacmp(bdaddr, &k->bdaddr) == 0)
2498 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002499
2500 return NULL;
2501}
2502
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302503static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002504 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002505{
2506 /* Legacy key */
2507 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302508 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002509
2510 /* Debug keys are insecure so don't store them persistently */
2511 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302512 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002513
2514 /* Changed combination key and there's no previous one */
2515 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302516 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002517
2518 /* Security mode 3 case */
2519 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302520 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002521
2522 /* Neither local nor remote side had no-bonding as requirement */
2523 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302524 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002525
2526 /* Local side had dedicated bonding as requirement */
2527 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302528 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002529
2530 /* Remote side had dedicated bonding as requirement */
2531 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302532 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002533
2534 /* If none of the above criteria match, then don't store the key
2535 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302536 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002537}
2538
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002539struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002540{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002541 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002542
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002543 list_for_each_entry(k, &hdev->long_term_keys, list) {
2544 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002545 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002546 continue;
2547
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002548 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002549 }
2550
2551 return NULL;
2552}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002553
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002554struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002555 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002556{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002557 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002558
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002559 list_for_each_entry(k, &hdev->long_term_keys, list)
2560 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002561 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002562 return k;
2563
2564 return NULL;
2565}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002566
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002567int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002568 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002569{
2570 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302571 u8 old_key_type;
2572 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002573
2574 old_key = hci_find_link_key(hdev, bdaddr);
2575 if (old_key) {
2576 old_key_type = old_key->type;
2577 key = old_key;
2578 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002579 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002580 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2581 if (!key)
2582 return -ENOMEM;
2583 list_add(&key->list, &hdev->link_keys);
2584 }
2585
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002586 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002587
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002588 /* Some buggy controller combinations generate a changed
2589 * combination key for legacy pairing even when there's no
2590 * previous key */
2591 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002592 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002593 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002594 if (conn)
2595 conn->key_type = type;
2596 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002597
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002598 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002599 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002600 key->pin_len = pin_len;
2601
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002602 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002603 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002604 else
2605 key->type = type;
2606
Johan Hedberg4df378a2011-04-28 11:29:03 -07002607 if (!new_key)
2608 return 0;
2609
2610 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2611
Johan Hedberg744cf192011-11-08 20:40:14 +02002612 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002613
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302614 if (conn)
2615 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002616
2617 return 0;
2618}
2619
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002620int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002621 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002622 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002623{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002624 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002625
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002626 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2627 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002628
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002629 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2630 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002631 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002632 else {
2633 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002634 if (!key)
2635 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002636 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002637 }
2638
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002639 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002640 key->bdaddr_type = addr_type;
2641 memcpy(key->val, tk, sizeof(key->val));
2642 key->authenticated = authenticated;
2643 key->ediv = ediv;
2644 key->enc_size = enc_size;
2645 key->type = type;
2646 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002647
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002648 if (!new_key)
2649 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002650
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002651 if (type & HCI_SMP_LTK)
2652 mgmt_new_ltk(hdev, key, 1);
2653
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002654 return 0;
2655}
2656
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002657int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2658{
2659 struct link_key *key;
2660
2661 key = hci_find_link_key(hdev, bdaddr);
2662 if (!key)
2663 return -ENOENT;
2664
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002665 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002666
2667 list_del(&key->list);
2668 kfree(key);
2669
2670 return 0;
2671}
2672
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002673int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2674{
2675 struct smp_ltk *k, *tmp;
2676
2677 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2678 if (bacmp(bdaddr, &k->bdaddr))
2679 continue;
2680
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002681 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002682
2683 list_del(&k->list);
2684 kfree(k);
2685 }
2686
2687 return 0;
2688}
2689
Ville Tervo6bd32322011-02-16 16:32:41 +02002690/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002691static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002692{
2693 struct hci_dev *hdev = (void *) arg;
2694
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002695 if (hdev->sent_cmd) {
2696 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2697 u16 opcode = __le16_to_cpu(sent->opcode);
2698
2699 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2700 } else {
2701 BT_ERR("%s command tx timeout", hdev->name);
2702 }
2703
Ville Tervo6bd32322011-02-16 16:32:41 +02002704 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002705 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002706}
2707
Szymon Janc2763eda2011-03-22 13:12:22 +01002708struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002709 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002710{
2711 struct oob_data *data;
2712
2713 list_for_each_entry(data, &hdev->remote_oob_data, list)
2714 if (bacmp(bdaddr, &data->bdaddr) == 0)
2715 return data;
2716
2717 return NULL;
2718}
2719
2720int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2721{
2722 struct oob_data *data;
2723
2724 data = hci_find_remote_oob_data(hdev, bdaddr);
2725 if (!data)
2726 return -ENOENT;
2727
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002728 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002729
2730 list_del(&data->list);
2731 kfree(data);
2732
2733 return 0;
2734}
2735
2736int hci_remote_oob_data_clear(struct hci_dev *hdev)
2737{
2738 struct oob_data *data, *n;
2739
2740 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2741 list_del(&data->list);
2742 kfree(data);
2743 }
2744
2745 return 0;
2746}
2747
2748int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002749 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002750{
2751 struct oob_data *data;
2752
2753 data = hci_find_remote_oob_data(hdev, bdaddr);
2754
2755 if (!data) {
2756 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2757 if (!data)
2758 return -ENOMEM;
2759
2760 bacpy(&data->bdaddr, bdaddr);
2761 list_add(&data->list, &hdev->remote_oob_data);
2762 }
2763
2764 memcpy(data->hash, hash, sizeof(data->hash));
2765 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2766
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002767 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002768
2769 return 0;
2770}
2771
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002772struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2773 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002774{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002775 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002776
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002777 list_for_each_entry(b, &hdev->blacklist, list) {
2778 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002779 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002780 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002781
2782 return NULL;
2783}
2784
2785int hci_blacklist_clear(struct hci_dev *hdev)
2786{
2787 struct list_head *p, *n;
2788
2789 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002790 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002791
2792 list_del(p);
2793 kfree(b);
2794 }
2795
2796 return 0;
2797}
2798
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002799int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002800{
2801 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002802
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002803 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002804 return -EBADF;
2805
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002806 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002807 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002808
2809 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002810 if (!entry)
2811 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002812
2813 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002814 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002815
2816 list_add(&entry->list, &hdev->blacklist);
2817
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002818 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002819}
2820
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002821int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002822{
2823 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002824
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002825 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002826 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002827
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002828 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002829 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002830 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002831
2832 list_del(&entry->list);
2833 kfree(entry);
2834
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002835 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002836}
2837
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002838static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002839{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002840 if (status) {
2841 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002842
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002843 hci_dev_lock(hdev);
2844 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2845 hci_dev_unlock(hdev);
2846 return;
2847 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002848}
2849
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002850static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002851{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002852 /* General inquiry access code (GIAC) */
2853 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2854 struct hci_request req;
2855 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002856 int err;
2857
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002858 if (status) {
2859 BT_ERR("Failed to disable LE scanning: status %d", status);
2860 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002861 }
2862
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002863 switch (hdev->discovery.type) {
2864 case DISCOV_TYPE_LE:
2865 hci_dev_lock(hdev);
2866 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2867 hci_dev_unlock(hdev);
2868 break;
2869
2870 case DISCOV_TYPE_INTERLEAVED:
2871 hci_req_init(&req, hdev);
2872
2873 memset(&cp, 0, sizeof(cp));
2874 memcpy(&cp.lap, lap, sizeof(cp.lap));
2875 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2876 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2877
2878 hci_dev_lock(hdev);
2879
2880 hci_inquiry_cache_flush(hdev);
2881
2882 err = hci_req_run(&req, inquiry_complete);
2883 if (err) {
2884 BT_ERR("Inquiry request failed: err %d", err);
2885 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2886 }
2887
2888 hci_dev_unlock(hdev);
2889 break;
2890 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002891}
2892
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002893static void le_scan_disable_work(struct work_struct *work)
2894{
2895 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002896 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002897 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002898 struct hci_request req;
2899 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002900
2901 BT_DBG("%s", hdev->name);
2902
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002903 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002904
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002905 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002906 cp.enable = LE_SCAN_DISABLE;
2907 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002908
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002909 err = hci_req_run(&req, le_scan_disable_work_complete);
2910 if (err)
2911 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002912}
2913
David Herrmann9be0dab2012-04-22 14:39:57 +02002914/* Alloc HCI device */
2915struct hci_dev *hci_alloc_dev(void)
2916{
2917 struct hci_dev *hdev;
2918
2919 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2920 if (!hdev)
2921 return NULL;
2922
David Herrmannb1b813d2012-04-22 14:39:58 +02002923 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2924 hdev->esco_type = (ESCO_HV1);
2925 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002926 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2927 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002928 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2929 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002930
David Herrmannb1b813d2012-04-22 14:39:58 +02002931 hdev->sniff_max_interval = 800;
2932 hdev->sniff_min_interval = 80;
2933
Marcel Holtmannbef64732013-10-11 08:23:19 -07002934 hdev->le_scan_interval = 0x0060;
2935 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002936 hdev->le_conn_min_interval = 0x0028;
2937 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002938
David Herrmannb1b813d2012-04-22 14:39:58 +02002939 mutex_init(&hdev->lock);
2940 mutex_init(&hdev->req_lock);
2941
2942 INIT_LIST_HEAD(&hdev->mgmt_pending);
2943 INIT_LIST_HEAD(&hdev->blacklist);
2944 INIT_LIST_HEAD(&hdev->uuids);
2945 INIT_LIST_HEAD(&hdev->link_keys);
2946 INIT_LIST_HEAD(&hdev->long_term_keys);
2947 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002948 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002949
2950 INIT_WORK(&hdev->rx_work, hci_rx_work);
2951 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2952 INIT_WORK(&hdev->tx_work, hci_tx_work);
2953 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002954
David Herrmannb1b813d2012-04-22 14:39:58 +02002955 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2956 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2957 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2958
David Herrmannb1b813d2012-04-22 14:39:58 +02002959 skb_queue_head_init(&hdev->rx_q);
2960 skb_queue_head_init(&hdev->cmd_q);
2961 skb_queue_head_init(&hdev->raw_q);
2962
2963 init_waitqueue_head(&hdev->req_wait_q);
2964
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002965 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002966
David Herrmannb1b813d2012-04-22 14:39:58 +02002967 hci_init_sysfs(hdev);
2968 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002969
2970 return hdev;
2971}
2972EXPORT_SYMBOL(hci_alloc_dev);
2973
2974/* Free HCI device */
2975void hci_free_dev(struct hci_dev *hdev)
2976{
David Herrmann9be0dab2012-04-22 14:39:57 +02002977 /* will free via device release */
2978 put_device(&hdev->dev);
2979}
2980EXPORT_SYMBOL(hci_free_dev);
2981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982/* Register HCI device */
2983int hci_register_dev(struct hci_dev *hdev)
2984{
David Herrmannb1b813d2012-04-22 14:39:58 +02002985 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
David Herrmann010666a2012-01-07 15:47:07 +01002987 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 return -EINVAL;
2989
Mat Martineau08add512011-11-02 16:18:36 -07002990 /* Do not allow HCI_AMP devices to register at index 0,
2991 * so the index can be used as the AMP controller ID.
2992 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002993 switch (hdev->dev_type) {
2994 case HCI_BREDR:
2995 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2996 break;
2997 case HCI_AMP:
2998 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2999 break;
3000 default:
3001 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003003
Sasha Levin3df92b32012-05-27 22:36:56 +02003004 if (id < 0)
3005 return id;
3006
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 sprintf(hdev->name, "hci%d", id);
3008 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003009
3010 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3011
Kees Cookd8537542013-07-03 15:04:57 -07003012 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3013 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003014 if (!hdev->workqueue) {
3015 error = -ENOMEM;
3016 goto err;
3017 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003018
Kees Cookd8537542013-07-03 15:04:57 -07003019 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3020 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003021 if (!hdev->req_workqueue) {
3022 destroy_workqueue(hdev->workqueue);
3023 error = -ENOMEM;
3024 goto err;
3025 }
3026
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003027 if (!IS_ERR_OR_NULL(bt_debugfs))
3028 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3029
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003030 dev_set_name(&hdev->dev, "%s", hdev->name);
3031
3032 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003033 if (error < 0)
3034 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003036 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003037 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3038 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003039 if (hdev->rfkill) {
3040 if (rfkill_register(hdev->rfkill) < 0) {
3041 rfkill_destroy(hdev->rfkill);
3042 hdev->rfkill = NULL;
3043 }
3044 }
3045
Johan Hedberg5e130362013-09-13 08:58:17 +03003046 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3047 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3048
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003049 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003050 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003051
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003052 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003053 /* Assume BR/EDR support until proven otherwise (such as
3054 * through reading supported features during init.
3055 */
3056 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3057 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003058
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003059 write_lock(&hci_dev_list_lock);
3060 list_add(&hdev->list, &hci_dev_list);
3061 write_unlock(&hci_dev_list_lock);
3062
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003064 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065
Johan Hedberg19202572013-01-14 22:33:51 +02003066 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003067
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003069
David Herrmann33ca9542011-10-08 14:58:49 +02003070err_wqueue:
3071 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003072 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003073err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003074 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003075
David Herrmann33ca9542011-10-08 14:58:49 +02003076 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077}
3078EXPORT_SYMBOL(hci_register_dev);
3079
3080/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003081void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082{
Sasha Levin3df92b32012-05-27 22:36:56 +02003083 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003084
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003085 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Johan Hovold94324962012-03-15 14:48:41 +01003087 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3088
Sasha Levin3df92b32012-05-27 22:36:56 +02003089 id = hdev->id;
3090
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003091 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003093 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094
3095 hci_dev_do_close(hdev);
3096
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303097 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003098 kfree_skb(hdev->reassembly[i]);
3099
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003100 cancel_work_sync(&hdev->power_on);
3101
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003102 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003103 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003104 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003105 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003106 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003107 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003108
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003109 /* mgmt_index_removed should take care of emptying the
3110 * pending list */
3111 BUG_ON(!list_empty(&hdev->mgmt_pending));
3112
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 hci_notify(hdev, HCI_DEV_UNREG);
3114
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003115 if (hdev->rfkill) {
3116 rfkill_unregister(hdev->rfkill);
3117 rfkill_destroy(hdev->rfkill);
3118 }
3119
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003120 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003121
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003122 debugfs_remove_recursive(hdev->debugfs);
3123
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003124 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003125 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003126
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003127 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003128 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003129 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003130 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003131 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003132 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003133 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003134
David Herrmanndc946bd2012-01-07 15:47:24 +01003135 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003136
3137 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138}
3139EXPORT_SYMBOL(hci_unregister_dev);
3140
3141/* Suspend HCI device */
3142int hci_suspend_dev(struct hci_dev *hdev)
3143{
3144 hci_notify(hdev, HCI_DEV_SUSPEND);
3145 return 0;
3146}
3147EXPORT_SYMBOL(hci_suspend_dev);
3148
3149/* Resume HCI device */
3150int hci_resume_dev(struct hci_dev *hdev)
3151{
3152 hci_notify(hdev, HCI_DEV_RESUME);
3153 return 0;
3154}
3155EXPORT_SYMBOL(hci_resume_dev);
3156
Marcel Holtmann76bca882009-11-18 00:40:39 +01003157/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003158int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003159{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003160 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003161 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003162 kfree_skb(skb);
3163 return -ENXIO;
3164 }
3165
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003166 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003167 bt_cb(skb)->incoming = 1;
3168
3169 /* Time stamp */
3170 __net_timestamp(skb);
3171
Marcel Holtmann76bca882009-11-18 00:40:39 +01003172 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003173 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003174
Marcel Holtmann76bca882009-11-18 00:40:39 +01003175 return 0;
3176}
3177EXPORT_SYMBOL(hci_recv_frame);
3178
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303179static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003180 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303181{
3182 int len = 0;
3183 int hlen = 0;
3184 int remain = count;
3185 struct sk_buff *skb;
3186 struct bt_skb_cb *scb;
3187
3188 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003189 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303190 return -EILSEQ;
3191
3192 skb = hdev->reassembly[index];
3193
3194 if (!skb) {
3195 switch (type) {
3196 case HCI_ACLDATA_PKT:
3197 len = HCI_MAX_FRAME_SIZE;
3198 hlen = HCI_ACL_HDR_SIZE;
3199 break;
3200 case HCI_EVENT_PKT:
3201 len = HCI_MAX_EVENT_SIZE;
3202 hlen = HCI_EVENT_HDR_SIZE;
3203 break;
3204 case HCI_SCODATA_PKT:
3205 len = HCI_MAX_SCO_SIZE;
3206 hlen = HCI_SCO_HDR_SIZE;
3207 break;
3208 }
3209
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003210 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303211 if (!skb)
3212 return -ENOMEM;
3213
3214 scb = (void *) skb->cb;
3215 scb->expect = hlen;
3216 scb->pkt_type = type;
3217
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303218 hdev->reassembly[index] = skb;
3219 }
3220
3221 while (count) {
3222 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003223 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303224
3225 memcpy(skb_put(skb, len), data, len);
3226
3227 count -= len;
3228 data += len;
3229 scb->expect -= len;
3230 remain = count;
3231
3232 switch (type) {
3233 case HCI_EVENT_PKT:
3234 if (skb->len == HCI_EVENT_HDR_SIZE) {
3235 struct hci_event_hdr *h = hci_event_hdr(skb);
3236 scb->expect = h->plen;
3237
3238 if (skb_tailroom(skb) < scb->expect) {
3239 kfree_skb(skb);
3240 hdev->reassembly[index] = NULL;
3241 return -ENOMEM;
3242 }
3243 }
3244 break;
3245
3246 case HCI_ACLDATA_PKT:
3247 if (skb->len == HCI_ACL_HDR_SIZE) {
3248 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3249 scb->expect = __le16_to_cpu(h->dlen);
3250
3251 if (skb_tailroom(skb) < scb->expect) {
3252 kfree_skb(skb);
3253 hdev->reassembly[index] = NULL;
3254 return -ENOMEM;
3255 }
3256 }
3257 break;
3258
3259 case HCI_SCODATA_PKT:
3260 if (skb->len == HCI_SCO_HDR_SIZE) {
3261 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3262 scb->expect = h->dlen;
3263
3264 if (skb_tailroom(skb) < scb->expect) {
3265 kfree_skb(skb);
3266 hdev->reassembly[index] = NULL;
3267 return -ENOMEM;
3268 }
3269 }
3270 break;
3271 }
3272
3273 if (scb->expect == 0) {
3274 /* Complete frame */
3275
3276 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003277 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303278
3279 hdev->reassembly[index] = NULL;
3280 return remain;
3281 }
3282 }
3283
3284 return remain;
3285}
3286
Marcel Holtmannef222012007-07-11 06:42:04 +02003287int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3288{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303289 int rem = 0;
3290
Marcel Holtmannef222012007-07-11 06:42:04 +02003291 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3292 return -EILSEQ;
3293
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003294 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003295 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303296 if (rem < 0)
3297 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003298
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303299 data += (count - rem);
3300 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003301 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003302
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303303 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003304}
3305EXPORT_SYMBOL(hci_recv_fragment);
3306
Suraj Sumangala99811512010-07-14 13:02:19 +05303307#define STREAM_REASSEMBLY 0
3308
3309int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3310{
3311 int type;
3312 int rem = 0;
3313
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003314 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303315 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3316
3317 if (!skb) {
3318 struct { char type; } *pkt;
3319
3320 /* Start of the frame */
3321 pkt = data;
3322 type = pkt->type;
3323
3324 data++;
3325 count--;
3326 } else
3327 type = bt_cb(skb)->pkt_type;
3328
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003329 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003330 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303331 if (rem < 0)
3332 return rem;
3333
3334 data += (count - rem);
3335 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003336 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303337
3338 return rem;
3339}
3340EXPORT_SYMBOL(hci_recv_stream_fragment);
3341
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342/* ---- Interface to upper protocols ---- */
3343
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344int hci_register_cb(struct hci_cb *cb)
3345{
3346 BT_DBG("%p name %s", cb, cb->name);
3347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003348 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003350 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 return 0;
3353}
3354EXPORT_SYMBOL(hci_register_cb);
3355
3356int hci_unregister_cb(struct hci_cb *cb)
3357{
3358 BT_DBG("%p name %s", cb, cb->name);
3359
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003360 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003362 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
3364 return 0;
3365}
3366EXPORT_SYMBOL(hci_unregister_cb);
3367
Marcel Holtmann51086992013-10-10 14:54:19 -07003368static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003370 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003372 /* Time stamp */
3373 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003375 /* Send copy to monitor */
3376 hci_send_to_monitor(hdev, skb);
3377
3378 if (atomic_read(&hdev->promisc)) {
3379 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003380 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 }
3382
3383 /* Get rid of skb owner, prior to sending to the driver. */
3384 skb_orphan(skb);
3385
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003386 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003387 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388}
3389
Johan Hedberg3119ae92013-03-05 20:37:44 +02003390void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3391{
3392 skb_queue_head_init(&req->cmd_q);
3393 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003394 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003395}
3396
3397int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3398{
3399 struct hci_dev *hdev = req->hdev;
3400 struct sk_buff *skb;
3401 unsigned long flags;
3402
3403 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3404
Andre Guedes5d73e032013-03-08 11:20:16 -03003405 /* If an error occured during request building, remove all HCI
3406 * commands queued on the HCI request queue.
3407 */
3408 if (req->err) {
3409 skb_queue_purge(&req->cmd_q);
3410 return req->err;
3411 }
3412
Johan Hedberg3119ae92013-03-05 20:37:44 +02003413 /* Do not allow empty requests */
3414 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003415 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003416
3417 skb = skb_peek_tail(&req->cmd_q);
3418 bt_cb(skb)->req.complete = complete;
3419
3420 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3421 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3422 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3423
3424 queue_work(hdev->workqueue, &hdev->cmd_work);
3425
3426 return 0;
3427}
3428
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003429static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003430 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
3432 int len = HCI_COMMAND_HDR_SIZE + plen;
3433 struct hci_command_hdr *hdr;
3434 struct sk_buff *skb;
3435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003437 if (!skb)
3438 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
3440 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003441 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 hdr->plen = plen;
3443
3444 if (plen)
3445 memcpy(skb_put(skb, plen), param, plen);
3446
3447 BT_DBG("skb len %d", skb->len);
3448
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003449 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003450
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003451 return skb;
3452}
3453
3454/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003455int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3456 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003457{
3458 struct sk_buff *skb;
3459
3460 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3461
3462 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3463 if (!skb) {
3464 BT_ERR("%s no memory for command", hdev->name);
3465 return -ENOMEM;
3466 }
3467
Johan Hedberg11714b32013-03-05 20:37:47 +02003468 /* Stand-alone HCI commands must be flaged as
3469 * single-command requests.
3470 */
3471 bt_cb(skb)->req.start = true;
3472
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003474 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
3476 return 0;
3477}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
Johan Hedberg71c76a12013-03-05 20:37:46 +02003479/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003480void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3481 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003482{
3483 struct hci_dev *hdev = req->hdev;
3484 struct sk_buff *skb;
3485
3486 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3487
Andre Guedes34739c12013-03-08 11:20:18 -03003488 /* If an error occured during request building, there is no point in
3489 * queueing the HCI command. We can simply return.
3490 */
3491 if (req->err)
3492 return;
3493
Johan Hedberg71c76a12013-03-05 20:37:46 +02003494 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3495 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003496 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3497 hdev->name, opcode);
3498 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003499 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003500 }
3501
3502 if (skb_queue_empty(&req->cmd_q))
3503 bt_cb(skb)->req.start = true;
3504
Johan Hedberg02350a72013-04-03 21:50:29 +03003505 bt_cb(skb)->req.event = event;
3506
Johan Hedberg71c76a12013-03-05 20:37:46 +02003507 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003508}
3509
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003510void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3511 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003512{
3513 hci_req_add_ev(req, opcode, plen, param, 0);
3514}
3515
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003517void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518{
3519 struct hci_command_hdr *hdr;
3520
3521 if (!hdev->sent_cmd)
3522 return NULL;
3523
3524 hdr = (void *) hdev->sent_cmd->data;
3525
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003526 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 return NULL;
3528
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003529 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530
3531 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3532}
3533
3534/* Send ACL data */
3535static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3536{
3537 struct hci_acl_hdr *hdr;
3538 int len = skb->len;
3539
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003540 skb_push(skb, HCI_ACL_HDR_SIZE);
3541 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003542 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003543 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3544 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545}
3546
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003547static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003548 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003550 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 struct hci_dev *hdev = conn->hdev;
3552 struct sk_buff *list;
3553
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003554 skb->len = skb_headlen(skb);
3555 skb->data_len = 0;
3556
3557 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003558
3559 switch (hdev->dev_type) {
3560 case HCI_BREDR:
3561 hci_add_acl_hdr(skb, conn->handle, flags);
3562 break;
3563 case HCI_AMP:
3564 hci_add_acl_hdr(skb, chan->handle, flags);
3565 break;
3566 default:
3567 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3568 return;
3569 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003570
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003571 list = skb_shinfo(skb)->frag_list;
3572 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 /* Non fragmented */
3574 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3575
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003576 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 } else {
3578 /* Fragmented */
3579 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3580
3581 skb_shinfo(skb)->frag_list = NULL;
3582
3583 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003584 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003586 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003587
3588 flags &= ~ACL_START;
3589 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 do {
3591 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003592
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003593 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003594 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595
3596 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3597
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003598 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 } while (list);
3600
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003601 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003603}
3604
3605void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3606{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003607 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003608
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003609 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003610
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003611 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003613 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
3616/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003617void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618{
3619 struct hci_dev *hdev = conn->hdev;
3620 struct hci_sco_hdr hdr;
3621
3622 BT_DBG("%s len %d", hdev->name, skb->len);
3623
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003624 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 hdr.dlen = skb->len;
3626
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003627 skb_push(skb, HCI_SCO_HDR_SIZE);
3628 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003629 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003631 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003632
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003634 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636
3637/* ---- HCI TX task (outgoing data) ---- */
3638
3639/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003640static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3641 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642{
3643 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003644 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003645 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003647 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003649
3650 rcu_read_lock();
3651
3652 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003653 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003655
3656 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3657 continue;
3658
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 num++;
3660
3661 if (c->sent < min) {
3662 min = c->sent;
3663 conn = c;
3664 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003665
3666 if (hci_conn_num(hdev, type) == num)
3667 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 }
3669
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003670 rcu_read_unlock();
3671
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003673 int cnt, q;
3674
3675 switch (conn->type) {
3676 case ACL_LINK:
3677 cnt = hdev->acl_cnt;
3678 break;
3679 case SCO_LINK:
3680 case ESCO_LINK:
3681 cnt = hdev->sco_cnt;
3682 break;
3683 case LE_LINK:
3684 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3685 break;
3686 default:
3687 cnt = 0;
3688 BT_ERR("Unknown link type");
3689 }
3690
3691 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 *quote = q ? q : 1;
3693 } else
3694 *quote = 0;
3695
3696 BT_DBG("conn %p quote %d", conn, *quote);
3697 return conn;
3698}
3699
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003700static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701{
3702 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003703 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
Ville Tervobae1f5d92011-02-10 22:38:53 -03003705 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003707 rcu_read_lock();
3708
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003710 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003711 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003712 BT_ERR("%s killing stalled connection %pMR",
3713 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003714 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 }
3716 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003717
3718 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719}
3720
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003721static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3722 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003723{
3724 struct hci_conn_hash *h = &hdev->conn_hash;
3725 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003726 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003727 struct hci_conn *conn;
3728 int cnt, q, conn_num = 0;
3729
3730 BT_DBG("%s", hdev->name);
3731
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003732 rcu_read_lock();
3733
3734 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003735 struct hci_chan *tmp;
3736
3737 if (conn->type != type)
3738 continue;
3739
3740 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3741 continue;
3742
3743 conn_num++;
3744
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003745 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003746 struct sk_buff *skb;
3747
3748 if (skb_queue_empty(&tmp->data_q))
3749 continue;
3750
3751 skb = skb_peek(&tmp->data_q);
3752 if (skb->priority < cur_prio)
3753 continue;
3754
3755 if (skb->priority > cur_prio) {
3756 num = 0;
3757 min = ~0;
3758 cur_prio = skb->priority;
3759 }
3760
3761 num++;
3762
3763 if (conn->sent < min) {
3764 min = conn->sent;
3765 chan = tmp;
3766 }
3767 }
3768
3769 if (hci_conn_num(hdev, type) == conn_num)
3770 break;
3771 }
3772
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003773 rcu_read_unlock();
3774
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003775 if (!chan)
3776 return NULL;
3777
3778 switch (chan->conn->type) {
3779 case ACL_LINK:
3780 cnt = hdev->acl_cnt;
3781 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003782 case AMP_LINK:
3783 cnt = hdev->block_cnt;
3784 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003785 case SCO_LINK:
3786 case ESCO_LINK:
3787 cnt = hdev->sco_cnt;
3788 break;
3789 case LE_LINK:
3790 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3791 break;
3792 default:
3793 cnt = 0;
3794 BT_ERR("Unknown link type");
3795 }
3796
3797 q = cnt / num;
3798 *quote = q ? q : 1;
3799 BT_DBG("chan %p quote %d", chan, *quote);
3800 return chan;
3801}
3802
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003803static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3804{
3805 struct hci_conn_hash *h = &hdev->conn_hash;
3806 struct hci_conn *conn;
3807 int num = 0;
3808
3809 BT_DBG("%s", hdev->name);
3810
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003811 rcu_read_lock();
3812
3813 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003814 struct hci_chan *chan;
3815
3816 if (conn->type != type)
3817 continue;
3818
3819 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3820 continue;
3821
3822 num++;
3823
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003824 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003825 struct sk_buff *skb;
3826
3827 if (chan->sent) {
3828 chan->sent = 0;
3829 continue;
3830 }
3831
3832 if (skb_queue_empty(&chan->data_q))
3833 continue;
3834
3835 skb = skb_peek(&chan->data_q);
3836 if (skb->priority >= HCI_PRIO_MAX - 1)
3837 continue;
3838
3839 skb->priority = HCI_PRIO_MAX - 1;
3840
3841 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003842 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003843 }
3844
3845 if (hci_conn_num(hdev, type) == num)
3846 break;
3847 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003848
3849 rcu_read_unlock();
3850
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003851}
3852
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003853static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3854{
3855 /* Calculate count of blocks used by this packet */
3856 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3857}
3858
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003859static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 if (!test_bit(HCI_RAW, &hdev->flags)) {
3862 /* ACL tx timeout must be longer than maximum
3863 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003864 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003865 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003866 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003868}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003870static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003871{
3872 unsigned int cnt = hdev->acl_cnt;
3873 struct hci_chan *chan;
3874 struct sk_buff *skb;
3875 int quote;
3876
3877 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003878
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003879 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003880 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003881 u32 priority = (skb_peek(&chan->data_q))->priority;
3882 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003883 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003884 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003885
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003886 /* Stop if priority has changed */
3887 if (skb->priority < priority)
3888 break;
3889
3890 skb = skb_dequeue(&chan->data_q);
3891
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003892 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003893 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003894
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003895 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 hdev->acl_last_tx = jiffies;
3897
3898 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003899 chan->sent++;
3900 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 }
3902 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003903
3904 if (cnt != hdev->acl_cnt)
3905 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906}
3907
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003908static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003909{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003910 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003911 struct hci_chan *chan;
3912 struct sk_buff *skb;
3913 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003914 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003915
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003916 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003917
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003918 BT_DBG("%s", hdev->name);
3919
3920 if (hdev->dev_type == HCI_AMP)
3921 type = AMP_LINK;
3922 else
3923 type = ACL_LINK;
3924
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003925 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003926 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003927 u32 priority = (skb_peek(&chan->data_q))->priority;
3928 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3929 int blocks;
3930
3931 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003932 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003933
3934 /* Stop if priority has changed */
3935 if (skb->priority < priority)
3936 break;
3937
3938 skb = skb_dequeue(&chan->data_q);
3939
3940 blocks = __get_blocks(hdev, skb);
3941 if (blocks > hdev->block_cnt)
3942 return;
3943
3944 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003945 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003946
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003947 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003948 hdev->acl_last_tx = jiffies;
3949
3950 hdev->block_cnt -= blocks;
3951 quote -= blocks;
3952
3953 chan->sent += blocks;
3954 chan->conn->sent += blocks;
3955 }
3956 }
3957
3958 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003959 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003960}
3961
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003962static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003963{
3964 BT_DBG("%s", hdev->name);
3965
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003966 /* No ACL link over BR/EDR controller */
3967 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3968 return;
3969
3970 /* No AMP link over AMP controller */
3971 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003972 return;
3973
3974 switch (hdev->flow_ctl_mode) {
3975 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3976 hci_sched_acl_pkt(hdev);
3977 break;
3978
3979 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3980 hci_sched_acl_blk(hdev);
3981 break;
3982 }
3983}
3984
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003986static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987{
3988 struct hci_conn *conn;
3989 struct sk_buff *skb;
3990 int quote;
3991
3992 BT_DBG("%s", hdev->name);
3993
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003994 if (!hci_conn_num(hdev, SCO_LINK))
3995 return;
3996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3998 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3999 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004000 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001
4002 conn->sent++;
4003 if (conn->sent == ~0)
4004 conn->sent = 0;
4005 }
4006 }
4007}
4008
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004009static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004010{
4011 struct hci_conn *conn;
4012 struct sk_buff *skb;
4013 int quote;
4014
4015 BT_DBG("%s", hdev->name);
4016
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004017 if (!hci_conn_num(hdev, ESCO_LINK))
4018 return;
4019
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004020 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4021 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004022 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4023 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004024 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004025
4026 conn->sent++;
4027 if (conn->sent == ~0)
4028 conn->sent = 0;
4029 }
4030 }
4031}
4032
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004033static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004034{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004035 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004036 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004037 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004038
4039 BT_DBG("%s", hdev->name);
4040
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004041 if (!hci_conn_num(hdev, LE_LINK))
4042 return;
4043
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004044 if (!test_bit(HCI_RAW, &hdev->flags)) {
4045 /* LE tx timeout must be longer than maximum
4046 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004047 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004048 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004049 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004050 }
4051
4052 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004053 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004054 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004055 u32 priority = (skb_peek(&chan->data_q))->priority;
4056 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004057 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004058 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004059
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004060 /* Stop if priority has changed */
4061 if (skb->priority < priority)
4062 break;
4063
4064 skb = skb_dequeue(&chan->data_q);
4065
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004066 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004067 hdev->le_last_tx = jiffies;
4068
4069 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004070 chan->sent++;
4071 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004072 }
4073 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004074
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004075 if (hdev->le_pkts)
4076 hdev->le_cnt = cnt;
4077 else
4078 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004079
4080 if (cnt != tmp)
4081 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004082}
4083
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004084static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004086 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 struct sk_buff *skb;
4088
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004089 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004090 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091
Marcel Holtmann52de5992013-09-03 18:08:38 -07004092 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4093 /* Schedule queues and send stuff to HCI driver */
4094 hci_sched_acl(hdev);
4095 hci_sched_sco(hdev);
4096 hci_sched_esco(hdev);
4097 hci_sched_le(hdev);
4098 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004099
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 /* Send next queued raw (unknown type) packet */
4101 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004102 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103}
4104
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004105/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
4107/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004108static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109{
4110 struct hci_acl_hdr *hdr = (void *) skb->data;
4111 struct hci_conn *conn;
4112 __u16 handle, flags;
4113
4114 skb_pull(skb, HCI_ACL_HDR_SIZE);
4115
4116 handle = __le16_to_cpu(hdr->handle);
4117 flags = hci_flags(handle);
4118 handle = hci_handle(handle);
4119
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004120 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004121 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
4123 hdev->stat.acl_rx++;
4124
4125 hci_dev_lock(hdev);
4126 conn = hci_conn_hash_lookup_handle(hdev, handle);
4127 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004128
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004130 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004131
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004133 l2cap_recv_acldata(conn, skb, flags);
4134 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004136 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004137 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 }
4139
4140 kfree_skb(skb);
4141}
4142
4143/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004144static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145{
4146 struct hci_sco_hdr *hdr = (void *) skb->data;
4147 struct hci_conn *conn;
4148 __u16 handle;
4149
4150 skb_pull(skb, HCI_SCO_HDR_SIZE);
4151
4152 handle = __le16_to_cpu(hdr->handle);
4153
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004154 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155
4156 hdev->stat.sco_rx++;
4157
4158 hci_dev_lock(hdev);
4159 conn = hci_conn_hash_lookup_handle(hdev, handle);
4160 hci_dev_unlock(hdev);
4161
4162 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004164 sco_recv_scodata(conn, skb);
4165 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004167 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004168 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 }
4170
4171 kfree_skb(skb);
4172}
4173
Johan Hedberg9238f362013-03-05 20:37:48 +02004174static bool hci_req_is_complete(struct hci_dev *hdev)
4175{
4176 struct sk_buff *skb;
4177
4178 skb = skb_peek(&hdev->cmd_q);
4179 if (!skb)
4180 return true;
4181
4182 return bt_cb(skb)->req.start;
4183}
4184
Johan Hedberg42c6b122013-03-05 20:37:49 +02004185static void hci_resend_last(struct hci_dev *hdev)
4186{
4187 struct hci_command_hdr *sent;
4188 struct sk_buff *skb;
4189 u16 opcode;
4190
4191 if (!hdev->sent_cmd)
4192 return;
4193
4194 sent = (void *) hdev->sent_cmd->data;
4195 opcode = __le16_to_cpu(sent->opcode);
4196 if (opcode == HCI_OP_RESET)
4197 return;
4198
4199 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4200 if (!skb)
4201 return;
4202
4203 skb_queue_head(&hdev->cmd_q, skb);
4204 queue_work(hdev->workqueue, &hdev->cmd_work);
4205}
4206
Johan Hedberg9238f362013-03-05 20:37:48 +02004207void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4208{
4209 hci_req_complete_t req_complete = NULL;
4210 struct sk_buff *skb;
4211 unsigned long flags;
4212
4213 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4214
Johan Hedberg42c6b122013-03-05 20:37:49 +02004215 /* If the completed command doesn't match the last one that was
4216 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004217 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004218 if (!hci_sent_cmd_data(hdev, opcode)) {
4219 /* Some CSR based controllers generate a spontaneous
4220 * reset complete event during init and any pending
4221 * command will never be completed. In such a case we
4222 * need to resend whatever was the last sent
4223 * command.
4224 */
4225 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4226 hci_resend_last(hdev);
4227
Johan Hedberg9238f362013-03-05 20:37:48 +02004228 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004229 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004230
4231 /* If the command succeeded and there's still more commands in
4232 * this request the request is not yet complete.
4233 */
4234 if (!status && !hci_req_is_complete(hdev))
4235 return;
4236
4237 /* If this was the last command in a request the complete
4238 * callback would be found in hdev->sent_cmd instead of the
4239 * command queue (hdev->cmd_q).
4240 */
4241 if (hdev->sent_cmd) {
4242 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004243
4244 if (req_complete) {
4245 /* We must set the complete callback to NULL to
4246 * avoid calling the callback more than once if
4247 * this function gets called again.
4248 */
4249 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4250
Johan Hedberg9238f362013-03-05 20:37:48 +02004251 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004252 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004253 }
4254
4255 /* Remove all pending commands belonging to this request */
4256 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4257 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4258 if (bt_cb(skb)->req.start) {
4259 __skb_queue_head(&hdev->cmd_q, skb);
4260 break;
4261 }
4262
4263 req_complete = bt_cb(skb)->req.complete;
4264 kfree_skb(skb);
4265 }
4266 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4267
4268call_complete:
4269 if (req_complete)
4270 req_complete(hdev, status);
4271}
4272
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004273static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004275 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 struct sk_buff *skb;
4277
4278 BT_DBG("%s", hdev->name);
4279
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004281 /* Send copy to monitor */
4282 hci_send_to_monitor(hdev, skb);
4283
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 if (atomic_read(&hdev->promisc)) {
4285 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004286 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 }
4288
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004289 if (test_bit(HCI_RAW, &hdev->flags) ||
4290 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 kfree_skb(skb);
4292 continue;
4293 }
4294
4295 if (test_bit(HCI_INIT, &hdev->flags)) {
4296 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004297 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 case HCI_ACLDATA_PKT:
4299 case HCI_SCODATA_PKT:
4300 kfree_skb(skb);
4301 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 }
4304
4305 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004306 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004308 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 hci_event_packet(hdev, skb);
4310 break;
4311
4312 case HCI_ACLDATA_PKT:
4313 BT_DBG("%s ACL data packet", hdev->name);
4314 hci_acldata_packet(hdev, skb);
4315 break;
4316
4317 case HCI_SCODATA_PKT:
4318 BT_DBG("%s SCO data packet", hdev->name);
4319 hci_scodata_packet(hdev, skb);
4320 break;
4321
4322 default:
4323 kfree_skb(skb);
4324 break;
4325 }
4326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327}
4328
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004329static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004331 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 struct sk_buff *skb;
4333
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004334 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4335 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004338 if (atomic_read(&hdev->cmd_cnt)) {
4339 skb = skb_dequeue(&hdev->cmd_q);
4340 if (!skb)
4341 return;
4342
Wei Yongjun7585b972009-02-25 18:29:52 +08004343 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004345 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004346 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004348 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004349 if (test_bit(HCI_RESET, &hdev->flags))
4350 del_timer(&hdev->cmd_timer);
4351 else
4352 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004353 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 } else {
4355 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004356 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 }
4358 }
4359}