blob: 89ff09249eee5c4b3520f1618c4fa46ebd0248b9 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800601
602 hci_dev_unlock(hdev);
603
604 return 0;
605}
606
607static int identity_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, identity_show, inode->i_private);
610}
611
612static const struct file_operations identity_fops = {
613 .open = identity_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800619static int random_address_show(struct seq_file *f, void *p)
620{
621 struct hci_dev *hdev = f->private;
622
623 hci_dev_lock(hdev);
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
626
627 return 0;
628}
629
630static int random_address_open(struct inode *inode, struct file *file)
631{
632 return single_open(file, random_address_show, inode->i_private);
633}
634
635static const struct file_operations random_address_fops = {
636 .open = random_address_open,
637 .read = seq_read,
638 .llseek = seq_lseek,
639 .release = single_release,
640};
641
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700642static int static_address_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
645
646 hci_dev_lock(hdev);
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
649
650 return 0;
651}
652
653static int static_address_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, static_address_show, inode->i_private);
656}
657
658static const struct file_operations static_address_fops = {
659 .open = static_address_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700668{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800669 struct hci_dev *hdev = file->private_data;
670 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700671
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673 buf[1] = '\n';
674 buf[2] = '\0';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
676}
677
678static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
681{
682 struct hci_dev *hdev = file->private_data;
683 char buf[32];
684 size_t buf_size = min(count, (sizeof(buf)-1));
685 bool enable;
686
687 if (test_bit(HCI_UP, &hdev->flags))
688 return -EBUSY;
689
690 if (copy_from_user(buf, user_buf, buf_size))
691 return -EFAULT;
692
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700695 return -EINVAL;
696
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700699
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700703}
704
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705static const struct file_operations force_static_address_fops = {
706 .open = simple_open,
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
710};
Marcel Holtmann92202182013-10-18 16:38:10 -0700711
Marcel Holtmann3698d702014-02-18 21:54:49 -0800712static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713{
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
716
717 hci_dev_lock(hdev);
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
723 }
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730{
731 return single_open(file, identity_resolving_keys_show,
732 inode->i_private);
733}
734
735static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700742static int long_term_keys_show(struct seq_file *f, void *ptr)
743{
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
746
747 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800748 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700751 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
754 }
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760static int long_term_keys_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, long_term_keys_show, inode->i_private);
763}
764
765static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700772static int conn_min_interval_set(void *data, u64 val)
773{
774 struct hci_dev *hdev = data;
775
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777 return -EINVAL;
778
779 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700780 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int conn_min_interval_get(void *data, u64 *val)
787{
788 struct hci_dev *hdev = data;
789
790 hci_dev_lock(hdev);
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
793
794 return 0;
795}
796
797DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
799
800static int conn_max_interval_set(void *data, u64 val)
801{
802 struct hci_dev *hdev = data;
803
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805 return -EINVAL;
806
807 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700808 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700809 hci_dev_unlock(hdev);
810
811 return 0;
812}
813
814static int conn_max_interval_get(void *data, u64 *val)
815{
816 struct hci_dev *hdev = data;
817
818 hci_dev_lock(hdev);
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
821
822 return 0;
823}
824
825DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
827
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800828static int adv_channel_map_set(void *data, u64 val)
829{
830 struct hci_dev *hdev = data;
831
832 if (val < 0x01 || val > 0x07)
833 return -EINVAL;
834
835 hci_dev_lock(hdev);
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
838
839 return 0;
840}
841
842static int adv_channel_map_get(void *data, u64 *val)
843{
844 struct hci_dev *hdev = data;
845
846 hci_dev_lock(hdev);
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
849
850 return 0;
851}
852
853DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
855
Jukka Rissanen89863102013-12-11 17:05:38 +0200856static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct hci_dev *hdev = file->private_data;
860 char buf[3];
861
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863 buf[1] = '\n';
864 buf[2] = '\0';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866}
867
868static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
870{
871 struct hci_dev *hdev = fp->private_data;
872 bool enable;
873 char buf[32];
874 size_t buf_size = min(count, (sizeof(buf)-1));
875
876 if (copy_from_user(buf, user_buffer, buf_size))
877 return -EFAULT;
878
879 buf[buf_size] = '\0';
880
881 if (strtobool(buf, &enable) < 0)
882 return -EINVAL;
883
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885 return -EALREADY;
886
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889 return count;
890}
891
892static const struct file_operations lowpan_debugfs_fops = {
893 .open = simple_open,
894 .read = lowpan_read,
895 .write = lowpan_write,
896 .llseek = default_llseek,
897};
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899/* ---- HCI requests ---- */
900
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 if (hdev->req_status == HCI_REQ_PEND) {
906 hdev->req_result = result;
907 hdev->req_status = HCI_REQ_DONE;
908 wake_up_interruptible(&hdev->req_wait_q);
909 }
910}
911
912static void hci_req_cancel(struct hci_dev *hdev, int err)
913{
914 BT_DBG("%s err 0x%2.2x", hdev->name, err);
915
916 if (hdev->req_status == HCI_REQ_PEND) {
917 hdev->req_result = err;
918 hdev->req_status = HCI_REQ_CANCELED;
919 wake_up_interruptible(&hdev->req_wait_q);
920 }
921}
922
Fengguang Wu77a63e02013-04-20 16:24:31 +0300923static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
924 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300925{
926 struct hci_ev_cmd_complete *ev;
927 struct hci_event_hdr *hdr;
928 struct sk_buff *skb;
929
930 hci_dev_lock(hdev);
931
932 skb = hdev->recv_evt;
933 hdev->recv_evt = NULL;
934
935 hci_dev_unlock(hdev);
936
937 if (!skb)
938 return ERR_PTR(-ENODATA);
939
940 if (skb->len < sizeof(*hdr)) {
941 BT_ERR("Too short HCI event");
942 goto failed;
943 }
944
945 hdr = (void *) skb->data;
946 skb_pull(skb, HCI_EVENT_HDR_SIZE);
947
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300948 if (event) {
949 if (hdr->evt != event)
950 goto failed;
951 return skb;
952 }
953
Johan Hedberg75e84b72013-04-02 13:35:04 +0300954 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
955 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
956 goto failed;
957 }
958
959 if (skb->len < sizeof(*ev)) {
960 BT_ERR("Too short cmd_complete event");
961 goto failed;
962 }
963
964 ev = (void *) skb->data;
965 skb_pull(skb, sizeof(*ev));
966
967 if (opcode == __le16_to_cpu(ev->opcode))
968 return skb;
969
970 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
971 __le16_to_cpu(ev->opcode));
972
973failed:
974 kfree_skb(skb);
975 return ERR_PTR(-ENODATA);
976}
977
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300978struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300979 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300980{
981 DECLARE_WAITQUEUE(wait, current);
982 struct hci_request req;
983 int err = 0;
984
985 BT_DBG("%s", hdev->name);
986
987 hci_req_init(&req, hdev);
988
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300989 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300990
991 hdev->req_status = HCI_REQ_PEND;
992
993 err = hci_req_run(&req, hci_req_sync_complete);
994 if (err < 0)
995 return ERR_PTR(err);
996
997 add_wait_queue(&hdev->req_wait_q, &wait);
998 set_current_state(TASK_INTERRUPTIBLE);
999
1000 schedule_timeout(timeout);
1001
1002 remove_wait_queue(&hdev->req_wait_q, &wait);
1003
1004 if (signal_pending(current))
1005 return ERR_PTR(-EINTR);
1006
1007 switch (hdev->req_status) {
1008 case HCI_REQ_DONE:
1009 err = -bt_to_errno(hdev->req_result);
1010 break;
1011
1012 case HCI_REQ_CANCELED:
1013 err = -hdev->req_result;
1014 break;
1015
1016 default:
1017 err = -ETIMEDOUT;
1018 break;
1019 }
1020
1021 hdev->req_status = hdev->req_result = 0;
1022
1023 BT_DBG("%s end: err %d", hdev->name, err);
1024
1025 if (err < 0)
1026 return ERR_PTR(err);
1027
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001028 return hci_get_cmd_complete(hdev, opcode, event);
1029}
1030EXPORT_SYMBOL(__hci_cmd_sync_ev);
1031
1032struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001033 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001034{
1035 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001036}
1037EXPORT_SYMBOL(__hci_cmd_sync);
1038
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001040static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 void (*func)(struct hci_request *req,
1042 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001043 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001045 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 DECLARE_WAITQUEUE(wait, current);
1047 int err = 0;
1048
1049 BT_DBG("%s start", hdev->name);
1050
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_init(&req, hdev);
1052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 hdev->req_status = HCI_REQ_PEND;
1054
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001056
Johan Hedberg42c6b122013-03-05 20:37:49 +02001057 err = hci_req_run(&req, hci_req_sync_complete);
1058 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001059 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001060
1061 /* ENODATA means the HCI request command queue is empty.
1062 * This can happen when a request with conditionals doesn't
1063 * trigger any commands to be sent. This is normal behavior
1064 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 */
Andre Guedes920c8302013-03-08 11:20:15 -03001066 if (err == -ENODATA)
1067 return 0;
1068
1069 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001070 }
1071
Andre Guedesbc4445c2013-03-08 11:20:13 -03001072 add_wait_queue(&hdev->req_wait_q, &wait);
1073 set_current_state(TASK_INTERRUPTIBLE);
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 schedule_timeout(timeout);
1076
1077 remove_wait_queue(&hdev->req_wait_q, &wait);
1078
1079 if (signal_pending(current))
1080 return -EINTR;
1081
1082 switch (hdev->req_status) {
1083 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001084 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 break;
1086
1087 case HCI_REQ_CANCELED:
1088 err = -hdev->req_result;
1089 break;
1090
1091 default:
1092 err = -ETIMEDOUT;
1093 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Johan Hedberga5040ef2011-01-10 13:28:59 +02001096 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
1098 BT_DBG("%s end: err %d", hdev->name, err);
1099
1100 return err;
1101}
1102
Johan Hedberg01178cd2013-03-05 20:37:41 +02001103static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 void (*req)(struct hci_request *req,
1105 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001106 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
1108 int ret;
1109
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001110 if (!test_bit(HCI_UP, &hdev->flags))
1111 return -ENETDOWN;
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 /* Serialize all requests */
1114 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001115 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 hci_req_unlock(hdev);
1117
1118 return ret;
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 set_bit(HCI_RESET, &req->hdev->flags);
1127 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001135 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001137 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001138 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001139
1140 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142}
1143
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001147
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001148 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001150
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001151 /* Read Local Supported Commands */
1152 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1153
1154 /* Read Local Supported Features */
1155 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1156
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001157 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001159
1160 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001161 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001162
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001163 /* Read Flow Control Mode */
1164 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1165
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001166 /* Read Location Data */
1167 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001168}
1169
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001171{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001173
1174 BT_DBG("%s %ld", hdev->name, opt);
1175
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001176 /* Reset */
1177 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001179
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001180 switch (hdev->dev_type) {
1181 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001183 break;
1184
1185 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001186 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001187 break;
1188
1189 default:
1190 BT_ERR("Unknown device type %d", hdev->dev_type);
1191 break;
1192 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001193}
1194
Johan Hedberg42c6b122013-03-05 20:37:49 +02001195static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001196{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001197 struct hci_dev *hdev = req->hdev;
1198
Johan Hedberg2177bab2013-03-05 20:37:43 +02001199 __le16 param;
1200 __u8 flt_type;
1201
1202 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001203 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001204
1205 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001207
1208 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210
1211 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001213
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001214 /* Read Number of Supported IAC */
1215 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1216
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001217 /* Read Current IAC LAP */
1218 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1219
Johan Hedberg2177bab2013-03-05 20:37:43 +02001220 /* Clear Event Filters */
1221 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223
1224 /* Connection accept timeout ~20 secs */
1225 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001228 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1229 * but it does not support page scan related HCI commands.
1230 */
1231 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1233 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1234 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001238{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001239 struct hci_dev *hdev = req->hdev;
1240
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001243
1244 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001246
1247 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001249
1250 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001251 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001252
1253 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001254 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001255
1256 /* LE-only controllers have LE implicitly enabled */
1257 if (!lmp_bredr_capable(hdev))
1258 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259}
1260
1261static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1262{
1263 if (lmp_ext_inq_capable(hdev))
1264 return 0x02;
1265
1266 if (lmp_inq_rssi_capable(hdev))
1267 return 0x01;
1268
1269 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1270 hdev->lmp_subver == 0x0757)
1271 return 0x01;
1272
1273 if (hdev->manufacturer == 15) {
1274 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1275 return 0x01;
1276 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1277 return 0x01;
1278 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1279 return 0x01;
1280 }
1281
1282 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1283 hdev->lmp_subver == 0x1805)
1284 return 0x01;
1285
1286 return 0x00;
1287}
1288
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290{
1291 u8 mode;
1292
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296}
1297
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 struct hci_dev *hdev = req->hdev;
1301
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302 /* The second byte is 0xff instead of 0x9f (two reserved bits
1303 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1304 * command otherwise.
1305 */
1306 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1307
1308 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1309 * any event mask for pre 1.2 devices.
1310 */
1311 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1312 return;
1313
1314 if (lmp_bredr_capable(hdev)) {
1315 events[4] |= 0x01; /* Flow Specification Complete */
1316 events[4] |= 0x02; /* Inquiry Result with RSSI */
1317 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1318 events[5] |= 0x08; /* Synchronous Connection Complete */
1319 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001320 } else {
1321 /* Use a different default for LE-only devices */
1322 memset(events, 0, sizeof(events));
1323 events[0] |= 0x10; /* Disconnection Complete */
1324 events[0] |= 0x80; /* Encryption Change */
1325 events[1] |= 0x08; /* Read Remote Version Information Complete */
1326 events[1] |= 0x20; /* Command Complete */
1327 events[1] |= 0x40; /* Command Status */
1328 events[1] |= 0x80; /* Hardware Error */
1329 events[2] |= 0x04; /* Number of Completed Packets */
1330 events[3] |= 0x02; /* Data Buffer Overflow */
1331 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332 }
1333
1334 if (lmp_inq_rssi_capable(hdev))
1335 events[4] |= 0x02; /* Inquiry Result with RSSI */
1336
1337 if (lmp_sniffsubr_capable(hdev))
1338 events[5] |= 0x20; /* Sniff Subrating */
1339
1340 if (lmp_pause_enc_capable(hdev))
1341 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1342
1343 if (lmp_ext_inq_capable(hdev))
1344 events[5] |= 0x40; /* Extended Inquiry Result */
1345
1346 if (lmp_no_flush_capable(hdev))
1347 events[7] |= 0x01; /* Enhanced Flush Complete */
1348
1349 if (lmp_lsto_capable(hdev))
1350 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1351
1352 if (lmp_ssp_capable(hdev)) {
1353 events[6] |= 0x01; /* IO Capability Request */
1354 events[6] |= 0x02; /* IO Capability Response */
1355 events[6] |= 0x04; /* User Confirmation Request */
1356 events[6] |= 0x08; /* User Passkey Request */
1357 events[6] |= 0x10; /* Remote OOB Data Request */
1358 events[6] |= 0x20; /* Simple Pairing Complete */
1359 events[7] |= 0x04; /* User Passkey Notification */
1360 events[7] |= 0x08; /* Keypress Notification */
1361 events[7] |= 0x10; /* Remote Host Supported
1362 * Features Notification
1363 */
1364 }
1365
1366 if (lmp_le_capable(hdev))
1367 events[7] |= 0x20; /* LE Meta-Event */
1368
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001370
1371 if (lmp_le_capable(hdev)) {
1372 memset(events, 0, sizeof(events));
1373 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1375 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376 }
1377}
1378
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 struct hci_dev *hdev = req->hdev;
1382
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001385 else
1386 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387
1388 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001393 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1394 * local supported commands HCI command.
1395 */
1396 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398
1399 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001400 /* When SSP is available, then the host features page
1401 * should also be available as well. However some
1402 * controllers list the max_page as 0 as long as SSP
1403 * has not been enabled. To achieve proper debugging
1404 * output, force the minimum max_page to 1 at least.
1405 */
1406 hdev->max_page = 0x01;
1407
Johan Hedberg2177bab2013-03-05 20:37:43 +02001408 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1409 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001410 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1411 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001412 } else {
1413 struct hci_cp_write_eir cp;
1414
1415 memset(hdev->eir, 0, sizeof(hdev->eir));
1416 memset(&cp, 0, sizeof(cp));
1417
Johan Hedberg42c6b122013-03-05 20:37:49 +02001418 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001419 }
1420 }
1421
1422 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424
1425 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001426 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001427
1428 if (lmp_ext_feat_capable(hdev)) {
1429 struct hci_cp_read_local_ext_features cp;
1430
1431 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1433 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434 }
1435
1436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1437 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1439 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440 }
1441}
1442
Johan Hedberg42c6b122013-03-05 20:37:49 +02001443static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001444{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001445 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 struct hci_cp_write_def_link_policy cp;
1447 u16 link_policy = 0;
1448
1449 if (lmp_rswitch_capable(hdev))
1450 link_policy |= HCI_LP_RSWITCH;
1451 if (lmp_hold_capable(hdev))
1452 link_policy |= HCI_LP_HOLD;
1453 if (lmp_sniff_capable(hdev))
1454 link_policy |= HCI_LP_SNIFF;
1455 if (lmp_park_capable(hdev))
1456 link_policy |= HCI_LP_PARK;
1457
1458 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460}
1461
Johan Hedberg42c6b122013-03-05 20:37:49 +02001462static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001463{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001465 struct hci_cp_write_le_host_supported cp;
1466
Johan Hedbergc73eee92013-04-19 18:35:21 +03001467 /* LE-only devices do not support explicit enablement */
1468 if (!lmp_bredr_capable(hdev))
1469 return;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 memset(&cp, 0, sizeof(cp));
1472
1473 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1474 cp.le = 0x01;
1475 cp.simul = lmp_le_br_capable(hdev);
1476 }
1477
1478 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1480 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481}
1482
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001483static void hci_set_event_mask_page_2(struct hci_request *req)
1484{
1485 struct hci_dev *hdev = req->hdev;
1486 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1487
1488 /* If Connectionless Slave Broadcast master role is supported
1489 * enable all necessary events for it.
1490 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001491 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001492 events[1] |= 0x40; /* Triggered Clock Capture */
1493 events[1] |= 0x80; /* Synchronization Train Complete */
1494 events[2] |= 0x10; /* Slave Page Response Timeout */
1495 events[2] |= 0x20; /* CSB Channel Map Change */
1496 }
1497
1498 /* If Connectionless Slave Broadcast slave role is supported
1499 * enable all necessary events for it.
1500 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001501 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001502 events[2] |= 0x01; /* Synchronization Train Received */
1503 events[2] |= 0x02; /* CSB Receive */
1504 events[2] |= 0x04; /* CSB Timeout */
1505 events[2] |= 0x08; /* Truncated Page Complete */
1506 }
1507
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001508 /* Enable Authenticated Payload Timeout Expired event if supported */
1509 if (lmp_ping_capable(hdev))
1510 events[2] |= 0x80;
1511
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001512 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1513}
1514
Johan Hedberg42c6b122013-03-05 20:37:49 +02001515static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001516{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001517 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001518 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001519
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001520 /* Some Broadcom based Bluetooth controllers do not support the
1521 * Delete Stored Link Key command. They are clearly indicating its
1522 * absence in the bit mask of supported commands.
1523 *
1524 * Check the supported commands and only if the the command is marked
1525 * as supported send it. If not supported assume that the controller
1526 * does not have actual support for stored link keys which makes this
1527 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001528 *
1529 * Some controllers indicate that they support handling deleting
1530 * stored link keys, but they don't. The quirk lets a driver
1531 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001532 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001533 if (hdev->commands[6] & 0x80 &&
1534 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001535 struct hci_cp_delete_stored_link_key cp;
1536
1537 bacpy(&cp.bdaddr, BDADDR_ANY);
1538 cp.delete_all = 0x01;
1539 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1540 sizeof(cp), &cp);
1541 }
1542
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001544 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001545
Johan Hedberg7bf32042014-02-23 19:42:29 +02001546 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001548
1549 /* Read features beyond page 1 if available */
1550 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1551 struct hci_cp_read_local_ext_features cp;
1552
1553 cp.page = p;
1554 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1555 sizeof(cp), &cp);
1556 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557}
1558
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001559static void hci_init4_req(struct hci_request *req, unsigned long opt)
1560{
1561 struct hci_dev *hdev = req->hdev;
1562
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001563 /* Set event mask page 2 if the HCI command for it is supported */
1564 if (hdev->commands[22] & 0x04)
1565 hci_set_event_mask_page_2(req);
1566
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001567 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001568 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001569 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001570
1571 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001572 if ((lmp_sc_capable(hdev) ||
1573 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001574 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1575 u8 support = 0x01;
1576 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1577 sizeof(support), &support);
1578 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001579}
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581static int __hci_init(struct hci_dev *hdev)
1582{
1583 int err;
1584
1585 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1586 if (err < 0)
1587 return err;
1588
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001589 /* The Device Under Test (DUT) mode is special and available for
1590 * all controller types. So just create it early on.
1591 */
1592 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1593 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1594 &dut_mode_fops);
1595 }
1596
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1598 * BR/EDR/LE type controllers. AMP controllers only need the
1599 * first stage init.
1600 */
1601 if (hdev->dev_type != HCI_BREDR)
1602 return 0;
1603
1604 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1605 if (err < 0)
1606 return err;
1607
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001608 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1609 if (err < 0)
1610 return err;
1611
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001612 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1613 if (err < 0)
1614 return err;
1615
1616 /* Only create debugfs entries during the initial setup
1617 * phase and not every time the controller gets powered on.
1618 */
1619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1620 return 0;
1621
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001622 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1623 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001624 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1625 &hdev->manufacturer);
1626 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1627 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001628 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1629 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001630 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1631
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001632 if (lmp_bredr_capable(hdev)) {
1633 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1634 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001635 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1636 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001637 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1638 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001639 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1640 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001641 }
1642
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001643 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001644 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1645 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001646 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1647 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001648 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1649 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001650 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1651 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001652 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001653
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001654 if (lmp_sniff_capable(hdev)) {
1655 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1656 hdev, &idle_timeout_fops);
1657 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1658 hdev, &sniff_min_interval_fops);
1659 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1660 hdev, &sniff_max_interval_fops);
1661 }
1662
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001663 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001664 debugfs_create_file("identity", 0400, hdev->debugfs,
1665 hdev, &identity_fops);
1666 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1667 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001668 debugfs_create_file("random_address", 0444, hdev->debugfs,
1669 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001670 debugfs_create_file("static_address", 0444, hdev->debugfs,
1671 hdev, &static_address_fops);
1672
1673 /* For controllers with a public address, provide a debug
1674 * option to force the usage of the configured static
1675 * address. By default the public address is used.
1676 */
1677 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1678 debugfs_create_file("force_static_address", 0644,
1679 hdev->debugfs, hdev,
1680 &force_static_address_fops);
1681
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001682 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1683 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001684 debugfs_create_file("identity_resolving_keys", 0400,
1685 hdev->debugfs, hdev,
1686 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001687 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1688 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001689 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1690 hdev, &conn_min_interval_fops);
1691 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1692 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001693 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1694 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001695 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1696 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001697 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001698
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001699 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001700}
1701
Johan Hedberg42c6b122013-03-05 20:37:49 +02001702static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
1704 __u8 scan = opt;
1705
Johan Hedberg42c6b122013-03-05 20:37:49 +02001706 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001709 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710}
1711
Johan Hedberg42c6b122013-03-05 20:37:49 +02001712static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
1714 __u8 auth = opt;
1715
Johan Hedberg42c6b122013-03-05 20:37:49 +02001716 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720}
1721
Johan Hedberg42c6b122013-03-05 20:37:49 +02001722static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
1724 __u8 encrypt = opt;
1725
Johan Hedberg42c6b122013-03-05 20:37:49 +02001726 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001728 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001729 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730}
1731
Johan Hedberg42c6b122013-03-05 20:37:49 +02001732static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001733{
1734 __le16 policy = cpu_to_le16(opt);
1735
Johan Hedberg42c6b122013-03-05 20:37:49 +02001736 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001737
1738 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001739 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001740}
1741
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001742/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 * Device is held on return. */
1744struct hci_dev *hci_dev_get(int index)
1745{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001746 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 BT_DBG("%d", index);
1749
1750 if (index < 0)
1751 return NULL;
1752
1753 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001754 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 if (d->id == index) {
1756 hdev = hci_dev_hold(d);
1757 break;
1758 }
1759 }
1760 read_unlock(&hci_dev_list_lock);
1761 return hdev;
1762}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
1764/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001765
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001766bool hci_discovery_active(struct hci_dev *hdev)
1767{
1768 struct discovery_state *discov = &hdev->discovery;
1769
Andre Guedes6fbe1952012-02-03 17:47:58 -03001770 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001771 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001772 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001773 return true;
1774
Andre Guedes6fbe1952012-02-03 17:47:58 -03001775 default:
1776 return false;
1777 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001778}
1779
Johan Hedbergff9ef572012-01-04 14:23:45 +02001780void hci_discovery_set_state(struct hci_dev *hdev, int state)
1781{
1782 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1783
1784 if (hdev->discovery.state == state)
1785 return;
1786
1787 switch (state) {
1788 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001789 if (hdev->discovery.state != DISCOVERY_STARTING)
1790 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001791 break;
1792 case DISCOVERY_STARTING:
1793 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001794 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001795 mgmt_discovering(hdev, 1);
1796 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001797 case DISCOVERY_RESOLVING:
1798 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001799 case DISCOVERY_STOPPING:
1800 break;
1801 }
1802
1803 hdev->discovery.state = state;
1804}
1805
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001806void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807{
Johan Hedberg30883512012-01-04 14:16:21 +02001808 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001809 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Johan Hedberg561aafb2012-01-04 13:31:59 +02001811 list_for_each_entry_safe(p, n, &cache->all, all) {
1812 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001813 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001815
1816 INIT_LIST_HEAD(&cache->unknown);
1817 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001820struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1821 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822{
Johan Hedberg30883512012-01-04 14:16:21 +02001823 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 struct inquiry_entry *e;
1825
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001826 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Johan Hedberg561aafb2012-01-04 13:31:59 +02001828 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001830 return e;
1831 }
1832
1833 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
Johan Hedberg561aafb2012-01-04 13:31:59 +02001836struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001837 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001838{
Johan Hedberg30883512012-01-04 14:16:21 +02001839 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001840 struct inquiry_entry *e;
1841
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001842 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001843
1844 list_for_each_entry(e, &cache->unknown, list) {
1845 if (!bacmp(&e->data.bdaddr, bdaddr))
1846 return e;
1847 }
1848
1849 return NULL;
1850}
1851
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001852struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001853 bdaddr_t *bdaddr,
1854 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001855{
1856 struct discovery_state *cache = &hdev->discovery;
1857 struct inquiry_entry *e;
1858
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001859 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001860
1861 list_for_each_entry(e, &cache->resolve, list) {
1862 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1863 return e;
1864 if (!bacmp(&e->data.bdaddr, bdaddr))
1865 return e;
1866 }
1867
1868 return NULL;
1869}
1870
Johan Hedberga3d4e202012-01-09 00:53:02 +02001871void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001872 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001873{
1874 struct discovery_state *cache = &hdev->discovery;
1875 struct list_head *pos = &cache->resolve;
1876 struct inquiry_entry *p;
1877
1878 list_del(&ie->list);
1879
1880 list_for_each_entry(p, &cache->resolve, list) {
1881 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001882 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001883 break;
1884 pos = &p->list;
1885 }
1886
1887 list_add(&ie->list, pos);
1888}
1889
Johan Hedberg31754052012-01-04 13:39:52 +02001890bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001891 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892{
Johan Hedberg30883512012-01-04 14:16:21 +02001893 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001894 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001896 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Szymon Janc2b2fec42012-11-20 11:38:54 +01001898 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1899
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001900 if (ssp)
1901 *ssp = data->ssp_mode;
1902
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001903 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001904 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001905 if (ie->data.ssp_mode && ssp)
1906 *ssp = true;
1907
Johan Hedberga3d4e202012-01-09 00:53:02 +02001908 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001909 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001910 ie->data.rssi = data->rssi;
1911 hci_inquiry_cache_update_resolve(hdev, ie);
1912 }
1913
Johan Hedberg561aafb2012-01-04 13:31:59 +02001914 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001915 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001916
Johan Hedberg561aafb2012-01-04 13:31:59 +02001917 /* Entry not in the cache. Add new one. */
1918 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1919 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001920 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001921
1922 list_add(&ie->all, &cache->all);
1923
1924 if (name_known) {
1925 ie->name_state = NAME_KNOWN;
1926 } else {
1927 ie->name_state = NAME_NOT_KNOWN;
1928 list_add(&ie->list, &cache->unknown);
1929 }
1930
1931update:
1932 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001933 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001934 ie->name_state = NAME_KNOWN;
1935 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 }
1937
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001938 memcpy(&ie->data, data, sizeof(*data));
1939 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001941
1942 if (ie->name_state == NAME_NOT_KNOWN)
1943 return false;
1944
1945 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946}
1947
1948static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1949{
Johan Hedberg30883512012-01-04 14:16:21 +02001950 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 struct inquiry_info *info = (struct inquiry_info *) buf;
1952 struct inquiry_entry *e;
1953 int copied = 0;
1954
Johan Hedberg561aafb2012-01-04 13:31:59 +02001955 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001957
1958 if (copied >= num)
1959 break;
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 bacpy(&info->bdaddr, &data->bdaddr);
1962 info->pscan_rep_mode = data->pscan_rep_mode;
1963 info->pscan_period_mode = data->pscan_period_mode;
1964 info->pscan_mode = data->pscan_mode;
1965 memcpy(info->dev_class, data->dev_class, 3);
1966 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001969 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 }
1971
1972 BT_DBG("cache %p, copied %d", cache, copied);
1973 return copied;
1974}
1975
Johan Hedberg42c6b122013-03-05 20:37:49 +02001976static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
1978 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001979 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 struct hci_cp_inquiry cp;
1981
1982 BT_DBG("%s", hdev->name);
1983
1984 if (test_bit(HCI_INQUIRY, &hdev->flags))
1985 return;
1986
1987 /* Start Inquiry */
1988 memcpy(&cp.lap, &ir->lap, 3);
1989 cp.length = ir->length;
1990 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001991 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992}
1993
Andre Guedes3e13fa12013-03-27 20:04:56 -03001994static int wait_inquiry(void *word)
1995{
1996 schedule();
1997 return signal_pending(current);
1998}
1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000int hci_inquiry(void __user *arg)
2001{
2002 __u8 __user *ptr = arg;
2003 struct hci_inquiry_req ir;
2004 struct hci_dev *hdev;
2005 int err = 0, do_inquiry = 0, max_rsp;
2006 long timeo;
2007 __u8 *buf;
2008
2009 if (copy_from_user(&ir, ptr, sizeof(ir)))
2010 return -EFAULT;
2011
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002012 hdev = hci_dev_get(ir.dev_id);
2013 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 return -ENODEV;
2015
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002016 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2017 err = -EBUSY;
2018 goto done;
2019 }
2020
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002021 if (hdev->dev_type != HCI_BREDR) {
2022 err = -EOPNOTSUPP;
2023 goto done;
2024 }
2025
Johan Hedberg56f87902013-10-02 13:43:13 +03002026 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2027 err = -EOPNOTSUPP;
2028 goto done;
2029 }
2030
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002031 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002032 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002033 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002034 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 do_inquiry = 1;
2036 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002037 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
Marcel Holtmann04837f62006-07-03 10:02:33 +02002039 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002040
2041 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002042 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2043 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002044 if (err < 0)
2045 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002046
2047 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2048 * cleared). If it is interrupted by a signal, return -EINTR.
2049 */
2050 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2051 TASK_INTERRUPTIBLE))
2052 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002055 /* for unlimited number of responses we will use buffer with
2056 * 255 entries
2057 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2059
2060 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2061 * copy it to the user space.
2062 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002063 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002064 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 err = -ENOMEM;
2066 goto done;
2067 }
2068
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002069 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002071 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
2073 BT_DBG("num_rsp %d", ir.num_rsp);
2074
2075 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2076 ptr += sizeof(ir);
2077 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002078 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002080 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 err = -EFAULT;
2082
2083 kfree(buf);
2084
2085done:
2086 hci_dev_put(hdev);
2087 return err;
2088}
2089
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002090static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 int ret = 0;
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 BT_DBG("%s %p", hdev->name, hdev);
2095
2096 hci_req_lock(hdev);
2097
Johan Hovold94324962012-03-15 14:48:41 +01002098 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2099 ret = -ENODEV;
2100 goto done;
2101 }
2102
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002103 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2104 /* Check for rfkill but allow the HCI setup stage to
2105 * proceed (which in itself doesn't cause any RF activity).
2106 */
2107 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2108 ret = -ERFKILL;
2109 goto done;
2110 }
2111
2112 /* Check for valid public address or a configured static
2113 * random adddress, but let the HCI setup proceed to
2114 * be able to determine if there is a public address
2115 * or not.
2116 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002117 * In case of user channel usage, it is not important
2118 * if a public address or static random address is
2119 * available.
2120 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002121 * This check is only valid for BR/EDR controllers
2122 * since AMP controllers do not have an address.
2123 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002124 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2125 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002126 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2127 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2128 ret = -EADDRNOTAVAIL;
2129 goto done;
2130 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002131 }
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 if (test_bit(HCI_UP, &hdev->flags)) {
2134 ret = -EALREADY;
2135 goto done;
2136 }
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 if (hdev->open(hdev)) {
2139 ret = -EIO;
2140 goto done;
2141 }
2142
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002143 atomic_set(&hdev->cmd_cnt, 1);
2144 set_bit(HCI_INIT, &hdev->flags);
2145
2146 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2147 ret = hdev->setup(hdev);
2148
2149 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002150 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2151 set_bit(HCI_RAW, &hdev->flags);
2152
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002153 if (!test_bit(HCI_RAW, &hdev->flags) &&
2154 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002155 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002158 clear_bit(HCI_INIT, &hdev->flags);
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 if (!ret) {
2161 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002162 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 set_bit(HCI_UP, &hdev->flags);
2164 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002165 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002166 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002167 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002168 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002169 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002170 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002171 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002172 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002174 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002175 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002176 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
2178 skb_queue_purge(&hdev->cmd_q);
2179 skb_queue_purge(&hdev->rx_q);
2180
2181 if (hdev->flush)
2182 hdev->flush(hdev);
2183
2184 if (hdev->sent_cmd) {
2185 kfree_skb(hdev->sent_cmd);
2186 hdev->sent_cmd = NULL;
2187 }
2188
2189 hdev->close(hdev);
2190 hdev->flags = 0;
2191 }
2192
2193done:
2194 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 return ret;
2196}
2197
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002198/* ---- HCI ioctl helpers ---- */
2199
2200int hci_dev_open(__u16 dev)
2201{
2202 struct hci_dev *hdev;
2203 int err;
2204
2205 hdev = hci_dev_get(dev);
2206 if (!hdev)
2207 return -ENODEV;
2208
Johan Hedberge1d08f42013-10-01 22:44:50 +03002209 /* We need to ensure that no other power on/off work is pending
2210 * before proceeding to call hci_dev_do_open. This is
2211 * particularly important if the setup procedure has not yet
2212 * completed.
2213 */
2214 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2215 cancel_delayed_work(&hdev->power_off);
2216
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002217 /* After this call it is guaranteed that the setup procedure
2218 * has finished. This means that error conditions like RFKILL
2219 * or no valid public or static random address apply.
2220 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002221 flush_workqueue(hdev->req_workqueue);
2222
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002223 err = hci_dev_do_open(hdev);
2224
2225 hci_dev_put(hdev);
2226
2227 return err;
2228}
2229
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230static int hci_dev_do_close(struct hci_dev *hdev)
2231{
2232 BT_DBG("%s %p", hdev->name, hdev);
2233
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002234 cancel_delayed_work(&hdev->power_off);
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 hci_req_cancel(hdev, ENODEV);
2237 hci_req_lock(hdev);
2238
2239 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002240 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 hci_req_unlock(hdev);
2242 return 0;
2243 }
2244
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002245 /* Flush RX and TX works */
2246 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002247 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002249 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002250 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002251 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002252 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002253 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002254 }
2255
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002256 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002257 cancel_delayed_work(&hdev->service_cache);
2258
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002259 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002260
2261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2262 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002263
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002264 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002265 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002267 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 hci_notify(hdev, HCI_DEV_DOWN);
2270
2271 if (hdev->flush)
2272 hdev->flush(hdev);
2273
2274 /* Reset device */
2275 skb_queue_purge(&hdev->cmd_q);
2276 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002277 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002278 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002279 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002281 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 clear_bit(HCI_INIT, &hdev->flags);
2283 }
2284
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002285 /* flush cmd work */
2286 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
2288 /* Drop queues */
2289 skb_queue_purge(&hdev->rx_q);
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->raw_q);
2292
2293 /* Drop last sent command */
2294 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002295 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 kfree_skb(hdev->sent_cmd);
2297 hdev->sent_cmd = NULL;
2298 }
2299
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002300 kfree_skb(hdev->recv_evt);
2301 hdev->recv_evt = NULL;
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 /* After this point our queues are empty
2304 * and no tasks are scheduled. */
2305 hdev->close(hdev);
2306
Johan Hedberg35b973c2013-03-15 17:06:59 -05002307 /* Clear flags */
2308 hdev->flags = 0;
2309 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2310
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002311 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2312 if (hdev->dev_type == HCI_BREDR) {
2313 hci_dev_lock(hdev);
2314 mgmt_powered(hdev, 0);
2315 hci_dev_unlock(hdev);
2316 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002317 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002318
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002319 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002320 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002321
Johan Hedberge59fda82012-02-22 18:11:53 +02002322 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002323 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002324 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 hci_req_unlock(hdev);
2327
2328 hci_dev_put(hdev);
2329 return 0;
2330}
2331
2332int hci_dev_close(__u16 dev)
2333{
2334 struct hci_dev *hdev;
2335 int err;
2336
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002337 hdev = hci_dev_get(dev);
2338 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002340
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002341 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2342 err = -EBUSY;
2343 goto done;
2344 }
2345
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002346 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2347 cancel_delayed_work(&hdev->power_off);
2348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002350
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002351done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 hci_dev_put(hdev);
2353 return err;
2354}
2355
2356int hci_dev_reset(__u16 dev)
2357{
2358 struct hci_dev *hdev;
2359 int ret = 0;
2360
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002361 hdev = hci_dev_get(dev);
2362 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 return -ENODEV;
2364
2365 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
Marcel Holtmann808a0492013-08-26 20:57:58 -07002367 if (!test_bit(HCI_UP, &hdev->flags)) {
2368 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002372 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2373 ret = -EBUSY;
2374 goto done;
2375 }
2376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 /* Drop queues */
2378 skb_queue_purge(&hdev->rx_q);
2379 skb_queue_purge(&hdev->cmd_q);
2380
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002381 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002382 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002384 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
2386 if (hdev->flush)
2387 hdev->flush(hdev);
2388
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002389 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002390 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002393 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 hci_req_unlock(hdev);
2397 hci_dev_put(hdev);
2398 return ret;
2399}
2400
2401int hci_dev_reset_stat(__u16 dev)
2402{
2403 struct hci_dev *hdev;
2404 int ret = 0;
2405
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002406 hdev = hci_dev_get(dev);
2407 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 return -ENODEV;
2409
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002410 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2411 ret = -EBUSY;
2412 goto done;
2413 }
2414
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2416
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002417done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 return ret;
2420}
2421
2422int hci_dev_cmd(unsigned int cmd, void __user *arg)
2423{
2424 struct hci_dev *hdev;
2425 struct hci_dev_req dr;
2426 int err = 0;
2427
2428 if (copy_from_user(&dr, arg, sizeof(dr)))
2429 return -EFAULT;
2430
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002431 hdev = hci_dev_get(dr.dev_id);
2432 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return -ENODEV;
2434
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002435 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2436 err = -EBUSY;
2437 goto done;
2438 }
2439
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002440 if (hdev->dev_type != HCI_BREDR) {
2441 err = -EOPNOTSUPP;
2442 goto done;
2443 }
2444
Johan Hedberg56f87902013-10-02 13:43:13 +03002445 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2446 err = -EOPNOTSUPP;
2447 goto done;
2448 }
2449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 switch (cmd) {
2451 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002452 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2453 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 break;
2455
2456 case HCISETENCRYPT:
2457 if (!lmp_encrypt_capable(hdev)) {
2458 err = -EOPNOTSUPP;
2459 break;
2460 }
2461
2462 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2463 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002464 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2465 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 if (err)
2467 break;
2468 }
2469
Johan Hedberg01178cd2013-03-05 20:37:41 +02002470 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2471 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 break;
2473
2474 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002475 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2476 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 break;
2478
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002479 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002480 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2481 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002482 break;
2483
2484 case HCISETLINKMODE:
2485 hdev->link_mode = ((__u16) dr.dev_opt) &
2486 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2487 break;
2488
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 case HCISETPTYPE:
2490 hdev->pkt_type = (__u16) dr.dev_opt;
2491 break;
2492
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002494 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2495 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 break;
2497
2498 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002499 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2500 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 break;
2502
2503 default:
2504 err = -EINVAL;
2505 break;
2506 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002507
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002508done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 hci_dev_put(hdev);
2510 return err;
2511}
2512
2513int hci_get_dev_list(void __user *arg)
2514{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002515 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 struct hci_dev_list_req *dl;
2517 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 int n = 0, size, err;
2519 __u16 dev_num;
2520
2521 if (get_user(dev_num, (__u16 __user *) arg))
2522 return -EFAULT;
2523
2524 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2525 return -EINVAL;
2526
2527 size = sizeof(*dl) + dev_num * sizeof(*dr);
2528
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002529 dl = kzalloc(size, GFP_KERNEL);
2530 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 return -ENOMEM;
2532
2533 dr = dl->dev_req;
2534
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002535 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002536 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002537 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002538 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002539
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002540 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2541 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 (dr + n)->dev_id = hdev->id;
2544 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002545
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 if (++n >= dev_num)
2547 break;
2548 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002549 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
2551 dl->dev_num = n;
2552 size = sizeof(*dl) + n * sizeof(*dr);
2553
2554 err = copy_to_user(arg, dl, size);
2555 kfree(dl);
2556
2557 return err ? -EFAULT : 0;
2558}
2559
2560int hci_get_dev_info(void __user *arg)
2561{
2562 struct hci_dev *hdev;
2563 struct hci_dev_info di;
2564 int err = 0;
2565
2566 if (copy_from_user(&di, arg, sizeof(di)))
2567 return -EFAULT;
2568
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002569 hdev = hci_dev_get(di.dev_id);
2570 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 return -ENODEV;
2572
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002574 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002575
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002576 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2577 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 strcpy(di.name, hdev->name);
2580 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002581 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 di.flags = hdev->flags;
2583 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002584 if (lmp_bredr_capable(hdev)) {
2585 di.acl_mtu = hdev->acl_mtu;
2586 di.acl_pkts = hdev->acl_pkts;
2587 di.sco_mtu = hdev->sco_mtu;
2588 di.sco_pkts = hdev->sco_pkts;
2589 } else {
2590 di.acl_mtu = hdev->le_mtu;
2591 di.acl_pkts = hdev->le_pkts;
2592 di.sco_mtu = 0;
2593 di.sco_pkts = 0;
2594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 di.link_policy = hdev->link_policy;
2596 di.link_mode = hdev->link_mode;
2597
2598 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2599 memcpy(&di.features, &hdev->features, sizeof(di.features));
2600
2601 if (copy_to_user(arg, &di, sizeof(di)))
2602 err = -EFAULT;
2603
2604 hci_dev_put(hdev);
2605
2606 return err;
2607}
2608
2609/* ---- Interface to HCI drivers ---- */
2610
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002611static int hci_rfkill_set_block(void *data, bool blocked)
2612{
2613 struct hci_dev *hdev = data;
2614
2615 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2616
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002617 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2618 return -EBUSY;
2619
Johan Hedberg5e130362013-09-13 08:58:17 +03002620 if (blocked) {
2621 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002622 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2623 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002624 } else {
2625 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002626 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002627
2628 return 0;
2629}
2630
2631static const struct rfkill_ops hci_rfkill_ops = {
2632 .set_block = hci_rfkill_set_block,
2633};
2634
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002635static void hci_power_on(struct work_struct *work)
2636{
2637 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002638 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002639
2640 BT_DBG("%s", hdev->name);
2641
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002642 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002643 if (err < 0) {
2644 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002645 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002646 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002647
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002648 /* During the HCI setup phase, a few error conditions are
2649 * ignored and they need to be checked now. If they are still
2650 * valid, it is important to turn the device back off.
2651 */
2652 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2653 (hdev->dev_type == HCI_BREDR &&
2654 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2655 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002656 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2657 hci_dev_do_close(hdev);
2658 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002659 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2660 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002661 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002662
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002663 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002664 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002665}
2666
2667static void hci_power_off(struct work_struct *work)
2668{
Johan Hedberg32435532011-11-07 22:16:04 +02002669 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002670 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002671
2672 BT_DBG("%s", hdev->name);
2673
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002674 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002675}
2676
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002677static void hci_discov_off(struct work_struct *work)
2678{
2679 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002680
2681 hdev = container_of(work, struct hci_dev, discov_off.work);
2682
2683 BT_DBG("%s", hdev->name);
2684
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002685 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002686}
2687
Johan Hedberg35f74982014-02-18 17:14:32 +02002688void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002689{
Johan Hedberg48210022013-01-27 00:31:28 +02002690 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002691
Johan Hedberg48210022013-01-27 00:31:28 +02002692 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2693 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002694 kfree(uuid);
2695 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002696}
2697
Johan Hedberg35f74982014-02-18 17:14:32 +02002698void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002699{
2700 struct list_head *p, *n;
2701
2702 list_for_each_safe(p, n, &hdev->link_keys) {
2703 struct link_key *key;
2704
2705 key = list_entry(p, struct link_key, list);
2706
2707 list_del(p);
2708 kfree(key);
2709 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002710}
2711
Johan Hedberg35f74982014-02-18 17:14:32 +02002712void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002713{
2714 struct smp_ltk *k, *tmp;
2715
2716 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2717 list_del(&k->list);
2718 kfree(k);
2719 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002720}
2721
Johan Hedberg970c4e42014-02-18 10:19:33 +02002722void hci_smp_irks_clear(struct hci_dev *hdev)
2723{
2724 struct smp_irk *k, *tmp;
2725
2726 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2727 list_del(&k->list);
2728 kfree(k);
2729 }
2730}
2731
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002732struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2733{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002734 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002735
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002736 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002737 if (bacmp(bdaddr, &k->bdaddr) == 0)
2738 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002739
2740 return NULL;
2741}
2742
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302743static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002744 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002745{
2746 /* Legacy key */
2747 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302748 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002749
2750 /* Debug keys are insecure so don't store them persistently */
2751 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302752 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002753
2754 /* Changed combination key and there's no previous one */
2755 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302756 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002757
2758 /* Security mode 3 case */
2759 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302760 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002761
2762 /* Neither local nor remote side had no-bonding as requirement */
2763 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302764 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002765
2766 /* Local side had dedicated bonding as requirement */
2767 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302768 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002769
2770 /* Remote side had dedicated bonding as requirement */
2771 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302772 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002773
2774 /* If none of the above criteria match, then don't store the key
2775 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302776 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002777}
2778
Johan Hedberg98a0b842014-01-30 19:40:00 -08002779static bool ltk_type_master(u8 type)
2780{
2781 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2782 return true;
2783
2784 return false;
2785}
2786
2787struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2788 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002789{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002790 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002791
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002792 list_for_each_entry(k, &hdev->long_term_keys, list) {
2793 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002794 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002795 continue;
2796
Johan Hedberg98a0b842014-01-30 19:40:00 -08002797 if (ltk_type_master(k->type) != master)
2798 continue;
2799
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002800 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002801 }
2802
2803 return NULL;
2804}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002805
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002806struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002807 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002808{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002809 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002810
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002811 list_for_each_entry(k, &hdev->long_term_keys, list)
2812 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002813 bacmp(bdaddr, &k->bdaddr) == 0 &&
2814 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002815 return k;
2816
2817 return NULL;
2818}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002819
Johan Hedberg970c4e42014-02-18 10:19:33 +02002820struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2821{
2822 struct smp_irk *irk;
2823
2824 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2825 if (!bacmp(&irk->rpa, rpa))
2826 return irk;
2827 }
2828
2829 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2830 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2831 bacpy(&irk->rpa, rpa);
2832 return irk;
2833 }
2834 }
2835
2836 return NULL;
2837}
2838
2839struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2840 u8 addr_type)
2841{
2842 struct smp_irk *irk;
2843
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002844 /* Identity Address must be public or static random */
2845 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2846 return NULL;
2847
Johan Hedberg970c4e42014-02-18 10:19:33 +02002848 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2849 if (addr_type == irk->addr_type &&
2850 bacmp(bdaddr, &irk->bdaddr) == 0)
2851 return irk;
2852 }
2853
2854 return NULL;
2855}
2856
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002857int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002858 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002859{
2860 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302861 u8 old_key_type;
2862 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002863
2864 old_key = hci_find_link_key(hdev, bdaddr);
2865 if (old_key) {
2866 old_key_type = old_key->type;
2867 key = old_key;
2868 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002869 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002870 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002871 if (!key)
2872 return -ENOMEM;
2873 list_add(&key->list, &hdev->link_keys);
2874 }
2875
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002876 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002877
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002878 /* Some buggy controller combinations generate a changed
2879 * combination key for legacy pairing even when there's no
2880 * previous key */
2881 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002882 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002883 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002884 if (conn)
2885 conn->key_type = type;
2886 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002887
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002888 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002889 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002890 key->pin_len = pin_len;
2891
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002892 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002893 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002894 else
2895 key->type = type;
2896
Johan Hedberg4df378a2011-04-28 11:29:03 -07002897 if (!new_key)
2898 return 0;
2899
2900 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2901
Johan Hedberg744cf192011-11-08 20:40:14 +02002902 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002903
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302904 if (conn)
2905 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002906
2907 return 0;
2908}
2909
Johan Hedbergca9142b2014-02-19 14:57:44 +02002910struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002911 u8 addr_type, u8 type, u8 authenticated,
2912 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002913{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002914 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002915 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002916
Johan Hedberg98a0b842014-01-30 19:40:00 -08002917 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002918 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002919 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002920 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002921 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002923 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002924 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002925 }
2926
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002927 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002928 key->bdaddr_type = addr_type;
2929 memcpy(key->val, tk, sizeof(key->val));
2930 key->authenticated = authenticated;
2931 key->ediv = ediv;
2932 key->enc_size = enc_size;
2933 key->type = type;
2934 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002935
Johan Hedbergca9142b2014-02-19 14:57:44 +02002936 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937}
2938
Johan Hedbergca9142b2014-02-19 14:57:44 +02002939struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2940 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002941{
2942 struct smp_irk *irk;
2943
2944 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2945 if (!irk) {
2946 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2947 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002948 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002949
2950 bacpy(&irk->bdaddr, bdaddr);
2951 irk->addr_type = addr_type;
2952
2953 list_add(&irk->list, &hdev->identity_resolving_keys);
2954 }
2955
2956 memcpy(irk->val, val, 16);
2957 bacpy(&irk->rpa, rpa);
2958
Johan Hedbergca9142b2014-02-19 14:57:44 +02002959 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002960}
2961
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002962int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2963{
2964 struct link_key *key;
2965
2966 key = hci_find_link_key(hdev, bdaddr);
2967 if (!key)
2968 return -ENOENT;
2969
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002970 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002971
2972 list_del(&key->list);
2973 kfree(key);
2974
2975 return 0;
2976}
2977
Johan Hedberge0b2b272014-02-18 17:14:31 +02002978int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002979{
2980 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002981 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002982
2983 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002984 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002985 continue;
2986
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002987 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002988
2989 list_del(&k->list);
2990 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002991 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002992 }
2993
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002994 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002995}
2996
Johan Hedberga7ec7332014-02-18 17:14:35 +02002997void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2998{
2999 struct smp_irk *k, *tmp;
3000
Johan Hedberg668b7b12014-02-21 16:03:31 +02003001 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003002 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3003 continue;
3004
3005 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3006
3007 list_del(&k->list);
3008 kfree(k);
3009 }
3010}
3011
Ville Tervo6bd32322011-02-16 16:32:41 +02003012/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003013static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003014{
3015 struct hci_dev *hdev = (void *) arg;
3016
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003017 if (hdev->sent_cmd) {
3018 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3019 u16 opcode = __le16_to_cpu(sent->opcode);
3020
3021 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3022 } else {
3023 BT_ERR("%s command tx timeout", hdev->name);
3024 }
3025
Ville Tervo6bd32322011-02-16 16:32:41 +02003026 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003027 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003028}
3029
Szymon Janc2763eda2011-03-22 13:12:22 +01003030struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003031 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003032{
3033 struct oob_data *data;
3034
3035 list_for_each_entry(data, &hdev->remote_oob_data, list)
3036 if (bacmp(bdaddr, &data->bdaddr) == 0)
3037 return data;
3038
3039 return NULL;
3040}
3041
3042int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3043{
3044 struct oob_data *data;
3045
3046 data = hci_find_remote_oob_data(hdev, bdaddr);
3047 if (!data)
3048 return -ENOENT;
3049
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003050 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003051
3052 list_del(&data->list);
3053 kfree(data);
3054
3055 return 0;
3056}
3057
Johan Hedberg35f74982014-02-18 17:14:32 +02003058void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003059{
3060 struct oob_data *data, *n;
3061
3062 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3063 list_del(&data->list);
3064 kfree(data);
3065 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003066}
3067
Marcel Holtmann07988722014-01-10 02:07:29 -08003068int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003070{
3071 struct oob_data *data;
3072
3073 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003074 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003075 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003076 if (!data)
3077 return -ENOMEM;
3078
3079 bacpy(&data->bdaddr, bdaddr);
3080 list_add(&data->list, &hdev->remote_oob_data);
3081 }
3082
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003083 memcpy(data->hash192, hash, sizeof(data->hash192));
3084 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003085
Marcel Holtmann07988722014-01-10 02:07:29 -08003086 memset(data->hash256, 0, sizeof(data->hash256));
3087 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3088
3089 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3090
3091 return 0;
3092}
3093
3094int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3095 u8 *hash192, u8 *randomizer192,
3096 u8 *hash256, u8 *randomizer256)
3097{
3098 struct oob_data *data;
3099
3100 data = hci_find_remote_oob_data(hdev, bdaddr);
3101 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003102 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003103 if (!data)
3104 return -ENOMEM;
3105
3106 bacpy(&data->bdaddr, bdaddr);
3107 list_add(&data->list, &hdev->remote_oob_data);
3108 }
3109
3110 memcpy(data->hash192, hash192, sizeof(data->hash192));
3111 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3112
3113 memcpy(data->hash256, hash256, sizeof(data->hash256));
3114 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3115
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003116 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003117
3118 return 0;
3119}
3120
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003121struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3122 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003123{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003124 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003125
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003126 list_for_each_entry(b, &hdev->blacklist, list) {
3127 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003128 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003129 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003130
3131 return NULL;
3132}
3133
Johan Hedberg35f74982014-02-18 17:14:32 +02003134void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003135{
3136 struct list_head *p, *n;
3137
3138 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003139 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003140
3141 list_del(p);
3142 kfree(b);
3143 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003144}
3145
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003146int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003147{
3148 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003149
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003150 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003151 return -EBADF;
3152
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003153 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003154 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003155
3156 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003157 if (!entry)
3158 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003159
3160 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003161 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003162
3163 list_add(&entry->list, &hdev->blacklist);
3164
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003165 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003166}
3167
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003168int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003169{
3170 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003171
Johan Hedberg35f74982014-02-18 17:14:32 +02003172 if (!bacmp(bdaddr, BDADDR_ANY)) {
3173 hci_blacklist_clear(hdev);
3174 return 0;
3175 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003176
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003177 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003178 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003179 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003180
3181 list_del(&entry->list);
3182 kfree(entry);
3183
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003184 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003185}
3186
Andre Guedes15819a72014-02-03 13:56:18 -03003187/* This function requires the caller holds hdev->lock */
3188struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3189 bdaddr_t *addr, u8 addr_type)
3190{
3191 struct hci_conn_params *params;
3192
3193 list_for_each_entry(params, &hdev->le_conn_params, list) {
3194 if (bacmp(&params->addr, addr) == 0 &&
3195 params->addr_type == addr_type) {
3196 return params;
3197 }
3198 }
3199
3200 return NULL;
3201}
3202
Andre Guedescef952c2014-02-26 20:21:49 -03003203static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3204{
3205 struct hci_conn *conn;
3206
3207 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3208 if (!conn)
3209 return false;
3210
3211 if (conn->dst_type != type)
3212 return false;
3213
3214 if (conn->state != BT_CONNECTED)
3215 return false;
3216
3217 return true;
3218}
3219
Andre Guedes15819a72014-02-03 13:56:18 -03003220/* This function requires the caller holds hdev->lock */
3221void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003222 u8 auto_connect, u16 conn_min_interval,
3223 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003224{
3225 struct hci_conn_params *params;
3226
3227 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003228 if (params)
3229 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003230
3231 params = kzalloc(sizeof(*params), GFP_KERNEL);
3232 if (!params) {
3233 BT_ERR("Out of memory");
3234 return;
3235 }
3236
3237 bacpy(&params->addr, addr);
3238 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003239
3240 list_add(&params->list, &hdev->le_conn_params);
3241
3242update:
Andre Guedes15819a72014-02-03 13:56:18 -03003243 params->conn_min_interval = conn_min_interval;
3244 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003245 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003246
Andre Guedescef952c2014-02-26 20:21:49 -03003247 switch (auto_connect) {
3248 case HCI_AUTO_CONN_DISABLED:
3249 case HCI_AUTO_CONN_LINK_LOSS:
3250 hci_pend_le_conn_del(hdev, addr, addr_type);
3251 break;
3252 case HCI_AUTO_CONN_ALWAYS:
3253 if (!is_connected(hdev, addr, addr_type))
3254 hci_pend_le_conn_add(hdev, addr, addr_type);
3255 break;
3256 }
Andre Guedes15819a72014-02-03 13:56:18 -03003257
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003258 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3259 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3260 conn_min_interval, conn_max_interval);
Andre Guedes15819a72014-02-03 13:56:18 -03003261}
3262
3263/* This function requires the caller holds hdev->lock */
3264void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3265{
3266 struct hci_conn_params *params;
3267
3268 params = hci_conn_params_lookup(hdev, addr, addr_type);
3269 if (!params)
3270 return;
3271
Andre Guedescef952c2014-02-26 20:21:49 -03003272 hci_pend_le_conn_del(hdev, addr, addr_type);
3273
Andre Guedes15819a72014-02-03 13:56:18 -03003274 list_del(&params->list);
3275 kfree(params);
3276
3277 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3278}
3279
3280/* This function requires the caller holds hdev->lock */
3281void hci_conn_params_clear(struct hci_dev *hdev)
3282{
3283 struct hci_conn_params *params, *tmp;
3284
3285 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3286 list_del(&params->list);
3287 kfree(params);
3288 }
3289
3290 BT_DBG("All LE connection parameters were removed");
3291}
3292
Andre Guedes77a77a32014-02-26 20:21:46 -03003293/* This function requires the caller holds hdev->lock */
3294struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3295 bdaddr_t *addr, u8 addr_type)
3296{
3297 struct bdaddr_list *entry;
3298
3299 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3300 if (bacmp(&entry->bdaddr, addr) == 0 &&
3301 entry->bdaddr_type == addr_type)
3302 return entry;
3303 }
3304
3305 return NULL;
3306}
3307
3308/* This function requires the caller holds hdev->lock */
3309void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3310{
3311 struct bdaddr_list *entry;
3312
3313 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3314 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003315 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003316
3317 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3318 if (!entry) {
3319 BT_ERR("Out of memory");
3320 return;
3321 }
3322
3323 bacpy(&entry->bdaddr, addr);
3324 entry->bdaddr_type = addr_type;
3325
3326 list_add(&entry->list, &hdev->pend_le_conns);
3327
3328 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003329
3330done:
3331 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003332}
3333
3334/* This function requires the caller holds hdev->lock */
3335void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3336{
3337 struct bdaddr_list *entry;
3338
3339 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3340 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003341 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003342
3343 list_del(&entry->list);
3344 kfree(entry);
3345
3346 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003347
3348done:
3349 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003350}
3351
3352/* This function requires the caller holds hdev->lock */
3353void hci_pend_le_conns_clear(struct hci_dev *hdev)
3354{
3355 struct bdaddr_list *entry, *tmp;
3356
3357 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3358 list_del(&entry->list);
3359 kfree(entry);
3360 }
3361
3362 BT_DBG("All LE pending connections cleared");
3363}
3364
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003365static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003366{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003367 if (status) {
3368 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003369
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003370 hci_dev_lock(hdev);
3371 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3372 hci_dev_unlock(hdev);
3373 return;
3374 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003375}
3376
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003377static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003378{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003379 /* General inquiry access code (GIAC) */
3380 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3381 struct hci_request req;
3382 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003383 int err;
3384
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003385 if (status) {
3386 BT_ERR("Failed to disable LE scanning: status %d", status);
3387 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003388 }
3389
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003390 switch (hdev->discovery.type) {
3391 case DISCOV_TYPE_LE:
3392 hci_dev_lock(hdev);
3393 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3394 hci_dev_unlock(hdev);
3395 break;
3396
3397 case DISCOV_TYPE_INTERLEAVED:
3398 hci_req_init(&req, hdev);
3399
3400 memset(&cp, 0, sizeof(cp));
3401 memcpy(&cp.lap, lap, sizeof(cp.lap));
3402 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3403 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3404
3405 hci_dev_lock(hdev);
3406
3407 hci_inquiry_cache_flush(hdev);
3408
3409 err = hci_req_run(&req, inquiry_complete);
3410 if (err) {
3411 BT_ERR("Inquiry request failed: err %d", err);
3412 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3413 }
3414
3415 hci_dev_unlock(hdev);
3416 break;
3417 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003418}
3419
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003420static void le_scan_disable_work(struct work_struct *work)
3421{
3422 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003423 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003424 struct hci_request req;
3425 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003426
3427 BT_DBG("%s", hdev->name);
3428
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003429 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003430
Andre Guedesb1efcc22014-02-26 20:21:40 -03003431 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003432
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003433 err = hci_req_run(&req, le_scan_disable_work_complete);
3434 if (err)
3435 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003436}
3437
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003438int hci_update_random_address(struct hci_request *req, bool require_privacy,
3439 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003440{
3441 struct hci_dev *hdev = req->hdev;
3442 int err;
3443
3444 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003445 * current RPA has expired or there is something else than
3446 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003447 */
3448 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003449 int to;
3450
3451 *own_addr_type = ADDR_LE_DEV_RANDOM;
3452
3453 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003454 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003455 return 0;
3456
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003457 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003458 if (err < 0) {
3459 BT_ERR("%s failed to generate new RPA", hdev->name);
3460 return err;
3461 }
3462
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003463 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003464
3465 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3466 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3467
3468 return 0;
3469 }
3470
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003471 /* In case of required privacy without resolvable private address,
3472 * use an unresolvable private address. This is useful for active
3473 * scanning and non-connectable advertising.
3474 */
3475 if (require_privacy) {
3476 bdaddr_t urpa;
3477
3478 get_random_bytes(&urpa, 6);
3479 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3480
3481 *own_addr_type = ADDR_LE_DEV_RANDOM;
3482 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3483 return 0;
3484 }
3485
Johan Hedbergebd3a742014-02-23 19:42:21 +02003486 /* If forcing static address is in use or there is no public
3487 * address use the static address as random address (but skip
3488 * the HCI command if the current random address is already the
3489 * static one.
3490 */
3491 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3492 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3493 *own_addr_type = ADDR_LE_DEV_RANDOM;
3494 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3495 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3496 &hdev->static_addr);
3497 return 0;
3498 }
3499
3500 /* Neither privacy nor static address is being used so use a
3501 * public address.
3502 */
3503 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3504
3505 return 0;
3506}
3507
David Herrmann9be0dab2012-04-22 14:39:57 +02003508/* Alloc HCI device */
3509struct hci_dev *hci_alloc_dev(void)
3510{
3511 struct hci_dev *hdev;
3512
3513 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3514 if (!hdev)
3515 return NULL;
3516
David Herrmannb1b813d2012-04-22 14:39:58 +02003517 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3518 hdev->esco_type = (ESCO_HV1);
3519 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003520 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3521 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003522 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3523 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003524
David Herrmannb1b813d2012-04-22 14:39:58 +02003525 hdev->sniff_max_interval = 800;
3526 hdev->sniff_min_interval = 80;
3527
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003528 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003529 hdev->le_scan_interval = 0x0060;
3530 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003531 hdev->le_conn_min_interval = 0x0028;
3532 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003533
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003534 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3535
David Herrmannb1b813d2012-04-22 14:39:58 +02003536 mutex_init(&hdev->lock);
3537 mutex_init(&hdev->req_lock);
3538
3539 INIT_LIST_HEAD(&hdev->mgmt_pending);
3540 INIT_LIST_HEAD(&hdev->blacklist);
3541 INIT_LIST_HEAD(&hdev->uuids);
3542 INIT_LIST_HEAD(&hdev->link_keys);
3543 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003544 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003545 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003546 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003547 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003548 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003549
3550 INIT_WORK(&hdev->rx_work, hci_rx_work);
3551 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3552 INIT_WORK(&hdev->tx_work, hci_tx_work);
3553 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003554
David Herrmannb1b813d2012-04-22 14:39:58 +02003555 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3556 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3557 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3558
David Herrmannb1b813d2012-04-22 14:39:58 +02003559 skb_queue_head_init(&hdev->rx_q);
3560 skb_queue_head_init(&hdev->cmd_q);
3561 skb_queue_head_init(&hdev->raw_q);
3562
3563 init_waitqueue_head(&hdev->req_wait_q);
3564
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003565 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003566
David Herrmannb1b813d2012-04-22 14:39:58 +02003567 hci_init_sysfs(hdev);
3568 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003569
3570 return hdev;
3571}
3572EXPORT_SYMBOL(hci_alloc_dev);
3573
3574/* Free HCI device */
3575void hci_free_dev(struct hci_dev *hdev)
3576{
David Herrmann9be0dab2012-04-22 14:39:57 +02003577 /* will free via device release */
3578 put_device(&hdev->dev);
3579}
3580EXPORT_SYMBOL(hci_free_dev);
3581
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582/* Register HCI device */
3583int hci_register_dev(struct hci_dev *hdev)
3584{
David Herrmannb1b813d2012-04-22 14:39:58 +02003585 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586
David Herrmann010666a2012-01-07 15:47:07 +01003587 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 return -EINVAL;
3589
Mat Martineau08add512011-11-02 16:18:36 -07003590 /* Do not allow HCI_AMP devices to register at index 0,
3591 * so the index can be used as the AMP controller ID.
3592 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003593 switch (hdev->dev_type) {
3594 case HCI_BREDR:
3595 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3596 break;
3597 case HCI_AMP:
3598 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3599 break;
3600 default:
3601 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003603
Sasha Levin3df92b32012-05-27 22:36:56 +02003604 if (id < 0)
3605 return id;
3606
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 sprintf(hdev->name, "hci%d", id);
3608 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003609
3610 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3611
Kees Cookd8537542013-07-03 15:04:57 -07003612 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3613 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003614 if (!hdev->workqueue) {
3615 error = -ENOMEM;
3616 goto err;
3617 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003618
Kees Cookd8537542013-07-03 15:04:57 -07003619 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3620 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003621 if (!hdev->req_workqueue) {
3622 destroy_workqueue(hdev->workqueue);
3623 error = -ENOMEM;
3624 goto err;
3625 }
3626
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003627 if (!IS_ERR_OR_NULL(bt_debugfs))
3628 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3629
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003630 dev_set_name(&hdev->dev, "%s", hdev->name);
3631
Johan Hedberg99780a72014-02-18 10:40:07 +02003632 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3633 CRYPTO_ALG_ASYNC);
3634 if (IS_ERR(hdev->tfm_aes)) {
3635 BT_ERR("Unable to create crypto context");
3636 error = PTR_ERR(hdev->tfm_aes);
3637 hdev->tfm_aes = NULL;
3638 goto err_wqueue;
3639 }
3640
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003641 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003642 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003643 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003645 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003646 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3647 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003648 if (hdev->rfkill) {
3649 if (rfkill_register(hdev->rfkill) < 0) {
3650 rfkill_destroy(hdev->rfkill);
3651 hdev->rfkill = NULL;
3652 }
3653 }
3654
Johan Hedberg5e130362013-09-13 08:58:17 +03003655 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3656 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3657
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003658 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003659 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003660
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003661 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003662 /* Assume BR/EDR support until proven otherwise (such as
3663 * through reading supported features during init.
3664 */
3665 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3666 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003667
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003668 write_lock(&hci_dev_list_lock);
3669 list_add(&hdev->list, &hci_dev_list);
3670 write_unlock(&hci_dev_list_lock);
3671
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003673 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
Johan Hedberg19202572013-01-14 22:33:51 +02003675 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003676
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003678
Johan Hedberg99780a72014-02-18 10:40:07 +02003679err_tfm:
3680 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003681err_wqueue:
3682 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003683 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003684err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003685 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003686
David Herrmann33ca9542011-10-08 14:58:49 +02003687 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688}
3689EXPORT_SYMBOL(hci_register_dev);
3690
3691/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003692void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693{
Sasha Levin3df92b32012-05-27 22:36:56 +02003694 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003695
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003696 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697
Johan Hovold94324962012-03-15 14:48:41 +01003698 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3699
Sasha Levin3df92b32012-05-27 22:36:56 +02003700 id = hdev->id;
3701
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003702 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003704 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
3706 hci_dev_do_close(hdev);
3707
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303708 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003709 kfree_skb(hdev->reassembly[i]);
3710
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003711 cancel_work_sync(&hdev->power_on);
3712
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003713 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003714 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003715 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003716 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003717 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003718 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003719
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003720 /* mgmt_index_removed should take care of emptying the
3721 * pending list */
3722 BUG_ON(!list_empty(&hdev->mgmt_pending));
3723
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 hci_notify(hdev, HCI_DEV_UNREG);
3725
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003726 if (hdev->rfkill) {
3727 rfkill_unregister(hdev->rfkill);
3728 rfkill_destroy(hdev->rfkill);
3729 }
3730
Johan Hedberg99780a72014-02-18 10:40:07 +02003731 if (hdev->tfm_aes)
3732 crypto_free_blkcipher(hdev->tfm_aes);
3733
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003734 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003735
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003736 debugfs_remove_recursive(hdev->debugfs);
3737
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003738 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003739 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003740
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003741 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003742 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003743 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003744 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003745 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003746 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003747 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003748 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003749 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003750 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003751
David Herrmanndc946bd2012-01-07 15:47:24 +01003752 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003753
3754 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755}
3756EXPORT_SYMBOL(hci_unregister_dev);
3757
3758/* Suspend HCI device */
3759int hci_suspend_dev(struct hci_dev *hdev)
3760{
3761 hci_notify(hdev, HCI_DEV_SUSPEND);
3762 return 0;
3763}
3764EXPORT_SYMBOL(hci_suspend_dev);
3765
3766/* Resume HCI device */
3767int hci_resume_dev(struct hci_dev *hdev)
3768{
3769 hci_notify(hdev, HCI_DEV_RESUME);
3770 return 0;
3771}
3772EXPORT_SYMBOL(hci_resume_dev);
3773
Marcel Holtmann76bca882009-11-18 00:40:39 +01003774/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003775int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003776{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003777 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003778 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003779 kfree_skb(skb);
3780 return -ENXIO;
3781 }
3782
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003783 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003784 bt_cb(skb)->incoming = 1;
3785
3786 /* Time stamp */
3787 __net_timestamp(skb);
3788
Marcel Holtmann76bca882009-11-18 00:40:39 +01003789 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003790 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003791
Marcel Holtmann76bca882009-11-18 00:40:39 +01003792 return 0;
3793}
3794EXPORT_SYMBOL(hci_recv_frame);
3795
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303796static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003797 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303798{
3799 int len = 0;
3800 int hlen = 0;
3801 int remain = count;
3802 struct sk_buff *skb;
3803 struct bt_skb_cb *scb;
3804
3805 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003806 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303807 return -EILSEQ;
3808
3809 skb = hdev->reassembly[index];
3810
3811 if (!skb) {
3812 switch (type) {
3813 case HCI_ACLDATA_PKT:
3814 len = HCI_MAX_FRAME_SIZE;
3815 hlen = HCI_ACL_HDR_SIZE;
3816 break;
3817 case HCI_EVENT_PKT:
3818 len = HCI_MAX_EVENT_SIZE;
3819 hlen = HCI_EVENT_HDR_SIZE;
3820 break;
3821 case HCI_SCODATA_PKT:
3822 len = HCI_MAX_SCO_SIZE;
3823 hlen = HCI_SCO_HDR_SIZE;
3824 break;
3825 }
3826
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003827 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303828 if (!skb)
3829 return -ENOMEM;
3830
3831 scb = (void *) skb->cb;
3832 scb->expect = hlen;
3833 scb->pkt_type = type;
3834
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303835 hdev->reassembly[index] = skb;
3836 }
3837
3838 while (count) {
3839 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003840 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303841
3842 memcpy(skb_put(skb, len), data, len);
3843
3844 count -= len;
3845 data += len;
3846 scb->expect -= len;
3847 remain = count;
3848
3849 switch (type) {
3850 case HCI_EVENT_PKT:
3851 if (skb->len == HCI_EVENT_HDR_SIZE) {
3852 struct hci_event_hdr *h = hci_event_hdr(skb);
3853 scb->expect = h->plen;
3854
3855 if (skb_tailroom(skb) < scb->expect) {
3856 kfree_skb(skb);
3857 hdev->reassembly[index] = NULL;
3858 return -ENOMEM;
3859 }
3860 }
3861 break;
3862
3863 case HCI_ACLDATA_PKT:
3864 if (skb->len == HCI_ACL_HDR_SIZE) {
3865 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3866 scb->expect = __le16_to_cpu(h->dlen);
3867
3868 if (skb_tailroom(skb) < scb->expect) {
3869 kfree_skb(skb);
3870 hdev->reassembly[index] = NULL;
3871 return -ENOMEM;
3872 }
3873 }
3874 break;
3875
3876 case HCI_SCODATA_PKT:
3877 if (skb->len == HCI_SCO_HDR_SIZE) {
3878 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3879 scb->expect = h->dlen;
3880
3881 if (skb_tailroom(skb) < scb->expect) {
3882 kfree_skb(skb);
3883 hdev->reassembly[index] = NULL;
3884 return -ENOMEM;
3885 }
3886 }
3887 break;
3888 }
3889
3890 if (scb->expect == 0) {
3891 /* Complete frame */
3892
3893 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003894 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303895
3896 hdev->reassembly[index] = NULL;
3897 return remain;
3898 }
3899 }
3900
3901 return remain;
3902}
3903
Marcel Holtmannef222012007-07-11 06:42:04 +02003904int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3905{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303906 int rem = 0;
3907
Marcel Holtmannef222012007-07-11 06:42:04 +02003908 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3909 return -EILSEQ;
3910
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003911 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003912 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303913 if (rem < 0)
3914 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003915
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303916 data += (count - rem);
3917 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003918 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003919
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303920 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003921}
3922EXPORT_SYMBOL(hci_recv_fragment);
3923
Suraj Sumangala99811512010-07-14 13:02:19 +05303924#define STREAM_REASSEMBLY 0
3925
3926int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3927{
3928 int type;
3929 int rem = 0;
3930
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003931 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303932 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3933
3934 if (!skb) {
3935 struct { char type; } *pkt;
3936
3937 /* Start of the frame */
3938 pkt = data;
3939 type = pkt->type;
3940
3941 data++;
3942 count--;
3943 } else
3944 type = bt_cb(skb)->pkt_type;
3945
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003946 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003947 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303948 if (rem < 0)
3949 return rem;
3950
3951 data += (count - rem);
3952 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003953 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303954
3955 return rem;
3956}
3957EXPORT_SYMBOL(hci_recv_stream_fragment);
3958
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959/* ---- Interface to upper protocols ---- */
3960
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961int hci_register_cb(struct hci_cb *cb)
3962{
3963 BT_DBG("%p name %s", cb, cb->name);
3964
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003965 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003967 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
3969 return 0;
3970}
3971EXPORT_SYMBOL(hci_register_cb);
3972
3973int hci_unregister_cb(struct hci_cb *cb)
3974{
3975 BT_DBG("%p name %s", cb, cb->name);
3976
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003977 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003979 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980
3981 return 0;
3982}
3983EXPORT_SYMBOL(hci_unregister_cb);
3984
Marcel Holtmann51086992013-10-10 14:54:19 -07003985static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003987 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003989 /* Time stamp */
3990 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003992 /* Send copy to monitor */
3993 hci_send_to_monitor(hdev, skb);
3994
3995 if (atomic_read(&hdev->promisc)) {
3996 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003997 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 }
3999
4000 /* Get rid of skb owner, prior to sending to the driver. */
4001 skb_orphan(skb);
4002
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004003 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004004 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005}
4006
Johan Hedberg3119ae92013-03-05 20:37:44 +02004007void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4008{
4009 skb_queue_head_init(&req->cmd_q);
4010 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004011 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004012}
4013
4014int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4015{
4016 struct hci_dev *hdev = req->hdev;
4017 struct sk_buff *skb;
4018 unsigned long flags;
4019
4020 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4021
Andre Guedes5d73e032013-03-08 11:20:16 -03004022 /* If an error occured during request building, remove all HCI
4023 * commands queued on the HCI request queue.
4024 */
4025 if (req->err) {
4026 skb_queue_purge(&req->cmd_q);
4027 return req->err;
4028 }
4029
Johan Hedberg3119ae92013-03-05 20:37:44 +02004030 /* Do not allow empty requests */
4031 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004032 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004033
4034 skb = skb_peek_tail(&req->cmd_q);
4035 bt_cb(skb)->req.complete = complete;
4036
4037 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4038 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4039 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4040
4041 queue_work(hdev->workqueue, &hdev->cmd_work);
4042
4043 return 0;
4044}
4045
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004046static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004047 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048{
4049 int len = HCI_COMMAND_HDR_SIZE + plen;
4050 struct hci_command_hdr *hdr;
4051 struct sk_buff *skb;
4052
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004054 if (!skb)
4055 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
4057 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004058 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 hdr->plen = plen;
4060
4061 if (plen)
4062 memcpy(skb_put(skb, plen), param, plen);
4063
4064 BT_DBG("skb len %d", skb->len);
4065
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004066 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004067
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004068 return skb;
4069}
4070
4071/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004072int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4073 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004074{
4075 struct sk_buff *skb;
4076
4077 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4078
4079 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4080 if (!skb) {
4081 BT_ERR("%s no memory for command", hdev->name);
4082 return -ENOMEM;
4083 }
4084
Johan Hedberg11714b32013-03-05 20:37:47 +02004085 /* Stand-alone HCI commands must be flaged as
4086 * single-command requests.
4087 */
4088 bt_cb(skb)->req.start = true;
4089
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004091 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
4093 return 0;
4094}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095
Johan Hedberg71c76a12013-03-05 20:37:46 +02004096/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004097void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4098 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004099{
4100 struct hci_dev *hdev = req->hdev;
4101 struct sk_buff *skb;
4102
4103 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4104
Andre Guedes34739c12013-03-08 11:20:18 -03004105 /* If an error occured during request building, there is no point in
4106 * queueing the HCI command. We can simply return.
4107 */
4108 if (req->err)
4109 return;
4110
Johan Hedberg71c76a12013-03-05 20:37:46 +02004111 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4112 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004113 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4114 hdev->name, opcode);
4115 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004116 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004117 }
4118
4119 if (skb_queue_empty(&req->cmd_q))
4120 bt_cb(skb)->req.start = true;
4121
Johan Hedberg02350a72013-04-03 21:50:29 +03004122 bt_cb(skb)->req.event = event;
4123
Johan Hedberg71c76a12013-03-05 20:37:46 +02004124 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004125}
4126
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004127void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4128 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004129{
4130 hci_req_add_ev(req, opcode, plen, param, 0);
4131}
4132
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004134void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135{
4136 struct hci_command_hdr *hdr;
4137
4138 if (!hdev->sent_cmd)
4139 return NULL;
4140
4141 hdr = (void *) hdev->sent_cmd->data;
4142
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004143 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 return NULL;
4145
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004146 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147
4148 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4149}
4150
4151/* Send ACL data */
4152static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4153{
4154 struct hci_acl_hdr *hdr;
4155 int len = skb->len;
4156
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004157 skb_push(skb, HCI_ACL_HDR_SIZE);
4158 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004159 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004160 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4161 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162}
4163
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004164static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004165 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004167 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 struct hci_dev *hdev = conn->hdev;
4169 struct sk_buff *list;
4170
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004171 skb->len = skb_headlen(skb);
4172 skb->data_len = 0;
4173
4174 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004175
4176 switch (hdev->dev_type) {
4177 case HCI_BREDR:
4178 hci_add_acl_hdr(skb, conn->handle, flags);
4179 break;
4180 case HCI_AMP:
4181 hci_add_acl_hdr(skb, chan->handle, flags);
4182 break;
4183 default:
4184 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4185 return;
4186 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004187
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004188 list = skb_shinfo(skb)->frag_list;
4189 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 /* Non fragmented */
4191 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4192
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004193 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 } else {
4195 /* Fragmented */
4196 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4197
4198 skb_shinfo(skb)->frag_list = NULL;
4199
4200 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004201 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004203 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004204
4205 flags &= ~ACL_START;
4206 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 do {
4208 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004209
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004210 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004211 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212
4213 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4214
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004215 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 } while (list);
4217
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004218 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004220}
4221
4222void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4223{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004224 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004225
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004226 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004227
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004228 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004230 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232
4233/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004234void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235{
4236 struct hci_dev *hdev = conn->hdev;
4237 struct hci_sco_hdr hdr;
4238
4239 BT_DBG("%s len %d", hdev->name, skb->len);
4240
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004241 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 hdr.dlen = skb->len;
4243
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004244 skb_push(skb, HCI_SCO_HDR_SIZE);
4245 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004248 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004249
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004251 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
4254/* ---- HCI TX task (outgoing data) ---- */
4255
4256/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004257static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4258 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259{
4260 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004261 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004262 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004264 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004266
4267 rcu_read_lock();
4268
4269 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004270 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004272
4273 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4274 continue;
4275
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 num++;
4277
4278 if (c->sent < min) {
4279 min = c->sent;
4280 conn = c;
4281 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004282
4283 if (hci_conn_num(hdev, type) == num)
4284 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 }
4286
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004287 rcu_read_unlock();
4288
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004290 int cnt, q;
4291
4292 switch (conn->type) {
4293 case ACL_LINK:
4294 cnt = hdev->acl_cnt;
4295 break;
4296 case SCO_LINK:
4297 case ESCO_LINK:
4298 cnt = hdev->sco_cnt;
4299 break;
4300 case LE_LINK:
4301 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4302 break;
4303 default:
4304 cnt = 0;
4305 BT_ERR("Unknown link type");
4306 }
4307
4308 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 *quote = q ? q : 1;
4310 } else
4311 *quote = 0;
4312
4313 BT_DBG("conn %p quote %d", conn, *quote);
4314 return conn;
4315}
4316
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004317static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318{
4319 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004320 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321
Ville Tervobae1f5d92011-02-10 22:38:53 -03004322 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004324 rcu_read_lock();
4325
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004327 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004328 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004329 BT_ERR("%s killing stalled connection %pMR",
4330 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004331 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 }
4333 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004334
4335 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336}
4337
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004338static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4339 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004340{
4341 struct hci_conn_hash *h = &hdev->conn_hash;
4342 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004343 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004344 struct hci_conn *conn;
4345 int cnt, q, conn_num = 0;
4346
4347 BT_DBG("%s", hdev->name);
4348
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004349 rcu_read_lock();
4350
4351 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004352 struct hci_chan *tmp;
4353
4354 if (conn->type != type)
4355 continue;
4356
4357 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4358 continue;
4359
4360 conn_num++;
4361
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004362 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004363 struct sk_buff *skb;
4364
4365 if (skb_queue_empty(&tmp->data_q))
4366 continue;
4367
4368 skb = skb_peek(&tmp->data_q);
4369 if (skb->priority < cur_prio)
4370 continue;
4371
4372 if (skb->priority > cur_prio) {
4373 num = 0;
4374 min = ~0;
4375 cur_prio = skb->priority;
4376 }
4377
4378 num++;
4379
4380 if (conn->sent < min) {
4381 min = conn->sent;
4382 chan = tmp;
4383 }
4384 }
4385
4386 if (hci_conn_num(hdev, type) == conn_num)
4387 break;
4388 }
4389
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004390 rcu_read_unlock();
4391
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004392 if (!chan)
4393 return NULL;
4394
4395 switch (chan->conn->type) {
4396 case ACL_LINK:
4397 cnt = hdev->acl_cnt;
4398 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004399 case AMP_LINK:
4400 cnt = hdev->block_cnt;
4401 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004402 case SCO_LINK:
4403 case ESCO_LINK:
4404 cnt = hdev->sco_cnt;
4405 break;
4406 case LE_LINK:
4407 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4408 break;
4409 default:
4410 cnt = 0;
4411 BT_ERR("Unknown link type");
4412 }
4413
4414 q = cnt / num;
4415 *quote = q ? q : 1;
4416 BT_DBG("chan %p quote %d", chan, *quote);
4417 return chan;
4418}
4419
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004420static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4421{
4422 struct hci_conn_hash *h = &hdev->conn_hash;
4423 struct hci_conn *conn;
4424 int num = 0;
4425
4426 BT_DBG("%s", hdev->name);
4427
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004428 rcu_read_lock();
4429
4430 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004431 struct hci_chan *chan;
4432
4433 if (conn->type != type)
4434 continue;
4435
4436 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4437 continue;
4438
4439 num++;
4440
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004441 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004442 struct sk_buff *skb;
4443
4444 if (chan->sent) {
4445 chan->sent = 0;
4446 continue;
4447 }
4448
4449 if (skb_queue_empty(&chan->data_q))
4450 continue;
4451
4452 skb = skb_peek(&chan->data_q);
4453 if (skb->priority >= HCI_PRIO_MAX - 1)
4454 continue;
4455
4456 skb->priority = HCI_PRIO_MAX - 1;
4457
4458 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004459 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004460 }
4461
4462 if (hci_conn_num(hdev, type) == num)
4463 break;
4464 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004465
4466 rcu_read_unlock();
4467
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004468}
4469
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004470static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4471{
4472 /* Calculate count of blocks used by this packet */
4473 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4474}
4475
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004476static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 if (!test_bit(HCI_RAW, &hdev->flags)) {
4479 /* ACL tx timeout must be longer than maximum
4480 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004481 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004482 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004483 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004485}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004487static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004488{
4489 unsigned int cnt = hdev->acl_cnt;
4490 struct hci_chan *chan;
4491 struct sk_buff *skb;
4492 int quote;
4493
4494 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004495
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004496 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004497 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004498 u32 priority = (skb_peek(&chan->data_q))->priority;
4499 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004500 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004501 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004502
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004503 /* Stop if priority has changed */
4504 if (skb->priority < priority)
4505 break;
4506
4507 skb = skb_dequeue(&chan->data_q);
4508
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004509 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004510 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004511
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004512 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 hdev->acl_last_tx = jiffies;
4514
4515 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004516 chan->sent++;
4517 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 }
4519 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004520
4521 if (cnt != hdev->acl_cnt)
4522 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523}
4524
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004525static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004526{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004527 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004528 struct hci_chan *chan;
4529 struct sk_buff *skb;
4530 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004531 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004532
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004533 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004534
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004535 BT_DBG("%s", hdev->name);
4536
4537 if (hdev->dev_type == HCI_AMP)
4538 type = AMP_LINK;
4539 else
4540 type = ACL_LINK;
4541
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004542 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004543 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004544 u32 priority = (skb_peek(&chan->data_q))->priority;
4545 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4546 int blocks;
4547
4548 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004549 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004550
4551 /* Stop if priority has changed */
4552 if (skb->priority < priority)
4553 break;
4554
4555 skb = skb_dequeue(&chan->data_q);
4556
4557 blocks = __get_blocks(hdev, skb);
4558 if (blocks > hdev->block_cnt)
4559 return;
4560
4561 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004562 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004563
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004564 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004565 hdev->acl_last_tx = jiffies;
4566
4567 hdev->block_cnt -= blocks;
4568 quote -= blocks;
4569
4570 chan->sent += blocks;
4571 chan->conn->sent += blocks;
4572 }
4573 }
4574
4575 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004576 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004577}
4578
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004579static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004580{
4581 BT_DBG("%s", hdev->name);
4582
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004583 /* No ACL link over BR/EDR controller */
4584 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4585 return;
4586
4587 /* No AMP link over AMP controller */
4588 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004589 return;
4590
4591 switch (hdev->flow_ctl_mode) {
4592 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4593 hci_sched_acl_pkt(hdev);
4594 break;
4595
4596 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4597 hci_sched_acl_blk(hdev);
4598 break;
4599 }
4600}
4601
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004603static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604{
4605 struct hci_conn *conn;
4606 struct sk_buff *skb;
4607 int quote;
4608
4609 BT_DBG("%s", hdev->name);
4610
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004611 if (!hci_conn_num(hdev, SCO_LINK))
4612 return;
4613
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4615 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4616 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004617 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618
4619 conn->sent++;
4620 if (conn->sent == ~0)
4621 conn->sent = 0;
4622 }
4623 }
4624}
4625
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004626static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004627{
4628 struct hci_conn *conn;
4629 struct sk_buff *skb;
4630 int quote;
4631
4632 BT_DBG("%s", hdev->name);
4633
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004634 if (!hci_conn_num(hdev, ESCO_LINK))
4635 return;
4636
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004637 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4638 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004639 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4640 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004641 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004642
4643 conn->sent++;
4644 if (conn->sent == ~0)
4645 conn->sent = 0;
4646 }
4647 }
4648}
4649
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004650static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004651{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004652 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004653 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004654 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004655
4656 BT_DBG("%s", hdev->name);
4657
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004658 if (!hci_conn_num(hdev, LE_LINK))
4659 return;
4660
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004661 if (!test_bit(HCI_RAW, &hdev->flags)) {
4662 /* LE tx timeout must be longer than maximum
4663 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004664 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004665 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004666 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004667 }
4668
4669 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004670 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004671 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004672 u32 priority = (skb_peek(&chan->data_q))->priority;
4673 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004674 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004675 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004676
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004677 /* Stop if priority has changed */
4678 if (skb->priority < priority)
4679 break;
4680
4681 skb = skb_dequeue(&chan->data_q);
4682
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004683 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004684 hdev->le_last_tx = jiffies;
4685
4686 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004687 chan->sent++;
4688 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004689 }
4690 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004691
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004692 if (hdev->le_pkts)
4693 hdev->le_cnt = cnt;
4694 else
4695 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004696
4697 if (cnt != tmp)
4698 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004699}
4700
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004701static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004703 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704 struct sk_buff *skb;
4705
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004706 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004707 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708
Marcel Holtmann52de5992013-09-03 18:08:38 -07004709 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4710 /* Schedule queues and send stuff to HCI driver */
4711 hci_sched_acl(hdev);
4712 hci_sched_sco(hdev);
4713 hci_sched_esco(hdev);
4714 hci_sched_le(hdev);
4715 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004716
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 /* Send next queued raw (unknown type) packet */
4718 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004719 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720}
4721
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004722/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723
4724/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004725static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726{
4727 struct hci_acl_hdr *hdr = (void *) skb->data;
4728 struct hci_conn *conn;
4729 __u16 handle, flags;
4730
4731 skb_pull(skb, HCI_ACL_HDR_SIZE);
4732
4733 handle = __le16_to_cpu(hdr->handle);
4734 flags = hci_flags(handle);
4735 handle = hci_handle(handle);
4736
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004737 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004738 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739
4740 hdev->stat.acl_rx++;
4741
4742 hci_dev_lock(hdev);
4743 conn = hci_conn_hash_lookup_handle(hdev, handle);
4744 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004745
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004747 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004748
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004750 l2cap_recv_acldata(conn, skb, flags);
4751 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004753 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004754 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 }
4756
4757 kfree_skb(skb);
4758}
4759
4760/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004761static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762{
4763 struct hci_sco_hdr *hdr = (void *) skb->data;
4764 struct hci_conn *conn;
4765 __u16 handle;
4766
4767 skb_pull(skb, HCI_SCO_HDR_SIZE);
4768
4769 handle = __le16_to_cpu(hdr->handle);
4770
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004771 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772
4773 hdev->stat.sco_rx++;
4774
4775 hci_dev_lock(hdev);
4776 conn = hci_conn_hash_lookup_handle(hdev, handle);
4777 hci_dev_unlock(hdev);
4778
4779 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004781 sco_recv_scodata(conn, skb);
4782 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004784 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004785 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 }
4787
4788 kfree_skb(skb);
4789}
4790
Johan Hedberg9238f362013-03-05 20:37:48 +02004791static bool hci_req_is_complete(struct hci_dev *hdev)
4792{
4793 struct sk_buff *skb;
4794
4795 skb = skb_peek(&hdev->cmd_q);
4796 if (!skb)
4797 return true;
4798
4799 return bt_cb(skb)->req.start;
4800}
4801
Johan Hedberg42c6b122013-03-05 20:37:49 +02004802static void hci_resend_last(struct hci_dev *hdev)
4803{
4804 struct hci_command_hdr *sent;
4805 struct sk_buff *skb;
4806 u16 opcode;
4807
4808 if (!hdev->sent_cmd)
4809 return;
4810
4811 sent = (void *) hdev->sent_cmd->data;
4812 opcode = __le16_to_cpu(sent->opcode);
4813 if (opcode == HCI_OP_RESET)
4814 return;
4815
4816 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4817 if (!skb)
4818 return;
4819
4820 skb_queue_head(&hdev->cmd_q, skb);
4821 queue_work(hdev->workqueue, &hdev->cmd_work);
4822}
4823
Johan Hedberg9238f362013-03-05 20:37:48 +02004824void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4825{
4826 hci_req_complete_t req_complete = NULL;
4827 struct sk_buff *skb;
4828 unsigned long flags;
4829
4830 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4831
Johan Hedberg42c6b122013-03-05 20:37:49 +02004832 /* If the completed command doesn't match the last one that was
4833 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004834 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004835 if (!hci_sent_cmd_data(hdev, opcode)) {
4836 /* Some CSR based controllers generate a spontaneous
4837 * reset complete event during init and any pending
4838 * command will never be completed. In such a case we
4839 * need to resend whatever was the last sent
4840 * command.
4841 */
4842 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4843 hci_resend_last(hdev);
4844
Johan Hedberg9238f362013-03-05 20:37:48 +02004845 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004846 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004847
4848 /* If the command succeeded and there's still more commands in
4849 * this request the request is not yet complete.
4850 */
4851 if (!status && !hci_req_is_complete(hdev))
4852 return;
4853
4854 /* If this was the last command in a request the complete
4855 * callback would be found in hdev->sent_cmd instead of the
4856 * command queue (hdev->cmd_q).
4857 */
4858 if (hdev->sent_cmd) {
4859 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004860
4861 if (req_complete) {
4862 /* We must set the complete callback to NULL to
4863 * avoid calling the callback more than once if
4864 * this function gets called again.
4865 */
4866 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4867
Johan Hedberg9238f362013-03-05 20:37:48 +02004868 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004869 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004870 }
4871
4872 /* Remove all pending commands belonging to this request */
4873 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4874 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4875 if (bt_cb(skb)->req.start) {
4876 __skb_queue_head(&hdev->cmd_q, skb);
4877 break;
4878 }
4879
4880 req_complete = bt_cb(skb)->req.complete;
4881 kfree_skb(skb);
4882 }
4883 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4884
4885call_complete:
4886 if (req_complete)
4887 req_complete(hdev, status);
4888}
4889
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004890static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004892 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 struct sk_buff *skb;
4894
4895 BT_DBG("%s", hdev->name);
4896
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004898 /* Send copy to monitor */
4899 hci_send_to_monitor(hdev, skb);
4900
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 if (atomic_read(&hdev->promisc)) {
4902 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004903 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904 }
4905
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004906 if (test_bit(HCI_RAW, &hdev->flags) ||
4907 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908 kfree_skb(skb);
4909 continue;
4910 }
4911
4912 if (test_bit(HCI_INIT, &hdev->flags)) {
4913 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004914 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915 case HCI_ACLDATA_PKT:
4916 case HCI_SCODATA_PKT:
4917 kfree_skb(skb);
4918 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 }
4921
4922 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004923 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004925 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926 hci_event_packet(hdev, skb);
4927 break;
4928
4929 case HCI_ACLDATA_PKT:
4930 BT_DBG("%s ACL data packet", hdev->name);
4931 hci_acldata_packet(hdev, skb);
4932 break;
4933
4934 case HCI_SCODATA_PKT:
4935 BT_DBG("%s SCO data packet", hdev->name);
4936 hci_scodata_packet(hdev, skb);
4937 break;
4938
4939 default:
4940 kfree_skb(skb);
4941 break;
4942 }
4943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944}
4945
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004946static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004948 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949 struct sk_buff *skb;
4950
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004951 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4952 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004955 if (atomic_read(&hdev->cmd_cnt)) {
4956 skb = skb_dequeue(&hdev->cmd_q);
4957 if (!skb)
4958 return;
4959
Wei Yongjun7585b972009-02-25 18:29:52 +08004960 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004962 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004963 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004965 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004966 if (test_bit(HCI_RESET, &hdev->flags))
4967 del_timer(&hdev->cmd_timer);
4968 else
4969 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004970 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 } else {
4972 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004973 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 }
4975 }
4976}
Andre Guedesb1efcc22014-02-26 20:21:40 -03004977
4978void hci_req_add_le_scan_disable(struct hci_request *req)
4979{
4980 struct hci_cp_le_set_scan_enable cp;
4981
4982 memset(&cp, 0, sizeof(cp));
4983 cp.enable = LE_SCAN_DISABLE;
4984 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4985}
Andre Guedesa4790db2014-02-26 20:21:47 -03004986
4987static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
4988{
4989 if (status)
4990 BT_DBG("HCI request failed to update background scanning: "
4991 "status 0x%2.2x", status);
4992}
4993
4994/* This function controls the background scanning based on hdev->pend_le_conns
4995 * list. If there are pending LE connection we start the background scanning,
4996 * otherwise we stop it.
4997 *
4998 * This function requires the caller holds hdev->lock.
4999 */
5000void hci_update_background_scan(struct hci_dev *hdev)
5001{
5002 struct hci_cp_le_set_scan_param param_cp;
5003 struct hci_cp_le_set_scan_enable enable_cp;
5004 struct hci_request req;
5005 struct hci_conn *conn;
5006 int err;
5007
5008 hci_req_init(&req, hdev);
5009
5010 if (list_empty(&hdev->pend_le_conns)) {
5011 /* If there is no pending LE connections, we should stop
5012 * the background scanning.
5013 */
5014
5015 /* If controller is not scanning we are done. */
5016 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5017 return;
5018
5019 hci_req_add_le_scan_disable(&req);
5020
5021 BT_DBG("%s stopping background scanning", hdev->name);
5022 } else {
5023 u8 own_addr_type;
5024
5025 /* If there is at least one pending LE connection, we should
5026 * keep the background scan running.
5027 */
5028
5029 /* If controller is already scanning we are done. */
5030 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5031 return;
5032
5033 /* If controller is connecting, we should not start scanning
5034 * since some controllers are not able to scan and connect at
5035 * the same time.
5036 */
5037 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5038 if (conn)
5039 return;
5040
5041 /* Set require_privacy to true to avoid identification from
5042 * unknown peer devices. Since this is passive scanning, no
5043 * SCAN_REQ using the local identity should be sent. Mandating
5044 * privacy is just an extra precaution.
5045 */
5046 if (hci_update_random_address(&req, true, &own_addr_type))
5047 return;
5048
5049 memset(&param_cp, 0, sizeof(param_cp));
5050 param_cp.type = LE_SCAN_PASSIVE;
5051 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5052 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5053 param_cp.own_address_type = own_addr_type;
5054 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5055 &param_cp);
5056
5057 memset(&enable_cp, 0, sizeof(enable_cp));
5058 enable_cp.enable = LE_SCAN_ENABLE;
5059 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5060 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5061 &enable_cp);
5062
5063 BT_DBG("%s starting background scanning", hdev->name);
5064 }
5065
5066 err = hci_req_run(&req, update_background_scan_complete);
5067 if (err)
5068 BT_ERR("Failed to run HCI request: err %d", err);
5069}