blob: 507a137a584b2445fb23b5865988193336d9bcc6 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800601
602 hci_dev_unlock(hdev);
603
604 return 0;
605}
606
607static int identity_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, identity_show, inode->i_private);
610}
611
612static const struct file_operations identity_fops = {
613 .open = identity_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800619static int random_address_show(struct seq_file *f, void *p)
620{
621 struct hci_dev *hdev = f->private;
622
623 hci_dev_lock(hdev);
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
626
627 return 0;
628}
629
630static int random_address_open(struct inode *inode, struct file *file)
631{
632 return single_open(file, random_address_show, inode->i_private);
633}
634
635static const struct file_operations random_address_fops = {
636 .open = random_address_open,
637 .read = seq_read,
638 .llseek = seq_lseek,
639 .release = single_release,
640};
641
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700642static int static_address_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
645
646 hci_dev_lock(hdev);
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
649
650 return 0;
651}
652
653static int static_address_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, static_address_show, inode->i_private);
656}
657
658static const struct file_operations static_address_fops = {
659 .open = static_address_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700668{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800669 struct hci_dev *hdev = file->private_data;
670 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700671
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673 buf[1] = '\n';
674 buf[2] = '\0';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
676}
677
678static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
681{
682 struct hci_dev *hdev = file->private_data;
683 char buf[32];
684 size_t buf_size = min(count, (sizeof(buf)-1));
685 bool enable;
686
687 if (test_bit(HCI_UP, &hdev->flags))
688 return -EBUSY;
689
690 if (copy_from_user(buf, user_buf, buf_size))
691 return -EFAULT;
692
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700695 return -EINVAL;
696
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700699
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700703}
704
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705static const struct file_operations force_static_address_fops = {
706 .open = simple_open,
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
710};
Marcel Holtmann92202182013-10-18 16:38:10 -0700711
Marcel Holtmann3698d702014-02-18 21:54:49 -0800712static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713{
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
716
717 hci_dev_lock(hdev);
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
723 }
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730{
731 return single_open(file, identity_resolving_keys_show,
732 inode->i_private);
733}
734
735static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700742static int long_term_keys_show(struct seq_file *f, void *ptr)
743{
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
746
747 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800748 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700751 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
754 }
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760static int long_term_keys_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, long_term_keys_show, inode->i_private);
763}
764
765static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700772static int conn_min_interval_set(void *data, u64 val)
773{
774 struct hci_dev *hdev = data;
775
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777 return -EINVAL;
778
779 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700780 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int conn_min_interval_get(void *data, u64 *val)
787{
788 struct hci_dev *hdev = data;
789
790 hci_dev_lock(hdev);
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
793
794 return 0;
795}
796
797DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
799
800static int conn_max_interval_set(void *data, u64 val)
801{
802 struct hci_dev *hdev = data;
803
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805 return -EINVAL;
806
807 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700808 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700809 hci_dev_unlock(hdev);
810
811 return 0;
812}
813
814static int conn_max_interval_get(void *data, u64 *val)
815{
816 struct hci_dev *hdev = data;
817
818 hci_dev_lock(hdev);
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
821
822 return 0;
823}
824
825DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
827
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800828static int adv_channel_map_set(void *data, u64 val)
829{
830 struct hci_dev *hdev = data;
831
832 if (val < 0x01 || val > 0x07)
833 return -EINVAL;
834
835 hci_dev_lock(hdev);
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
838
839 return 0;
840}
841
842static int adv_channel_map_get(void *data, u64 *val)
843{
844 struct hci_dev *hdev = data;
845
846 hci_dev_lock(hdev);
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
849
850 return 0;
851}
852
853DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
855
Jukka Rissanen89863102013-12-11 17:05:38 +0200856static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct hci_dev *hdev = file->private_data;
860 char buf[3];
861
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863 buf[1] = '\n';
864 buf[2] = '\0';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866}
867
868static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
870{
871 struct hci_dev *hdev = fp->private_data;
872 bool enable;
873 char buf[32];
874 size_t buf_size = min(count, (sizeof(buf)-1));
875
876 if (copy_from_user(buf, user_buffer, buf_size))
877 return -EFAULT;
878
879 buf[buf_size] = '\0';
880
881 if (strtobool(buf, &enable) < 0)
882 return -EINVAL;
883
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885 return -EALREADY;
886
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889 return count;
890}
891
892static const struct file_operations lowpan_debugfs_fops = {
893 .open = simple_open,
894 .read = lowpan_read,
895 .write = lowpan_write,
896 .llseek = default_llseek,
897};
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899/* ---- HCI requests ---- */
900
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 if (hdev->req_status == HCI_REQ_PEND) {
906 hdev->req_result = result;
907 hdev->req_status = HCI_REQ_DONE;
908 wake_up_interruptible(&hdev->req_wait_q);
909 }
910}
911
912static void hci_req_cancel(struct hci_dev *hdev, int err)
913{
914 BT_DBG("%s err 0x%2.2x", hdev->name, err);
915
916 if (hdev->req_status == HCI_REQ_PEND) {
917 hdev->req_result = err;
918 hdev->req_status = HCI_REQ_CANCELED;
919 wake_up_interruptible(&hdev->req_wait_q);
920 }
921}
922
Fengguang Wu77a63e02013-04-20 16:24:31 +0300923static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
924 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300925{
926 struct hci_ev_cmd_complete *ev;
927 struct hci_event_hdr *hdr;
928 struct sk_buff *skb;
929
930 hci_dev_lock(hdev);
931
932 skb = hdev->recv_evt;
933 hdev->recv_evt = NULL;
934
935 hci_dev_unlock(hdev);
936
937 if (!skb)
938 return ERR_PTR(-ENODATA);
939
940 if (skb->len < sizeof(*hdr)) {
941 BT_ERR("Too short HCI event");
942 goto failed;
943 }
944
945 hdr = (void *) skb->data;
946 skb_pull(skb, HCI_EVENT_HDR_SIZE);
947
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300948 if (event) {
949 if (hdr->evt != event)
950 goto failed;
951 return skb;
952 }
953
Johan Hedberg75e84b72013-04-02 13:35:04 +0300954 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
955 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
956 goto failed;
957 }
958
959 if (skb->len < sizeof(*ev)) {
960 BT_ERR("Too short cmd_complete event");
961 goto failed;
962 }
963
964 ev = (void *) skb->data;
965 skb_pull(skb, sizeof(*ev));
966
967 if (opcode == __le16_to_cpu(ev->opcode))
968 return skb;
969
970 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
971 __le16_to_cpu(ev->opcode));
972
973failed:
974 kfree_skb(skb);
975 return ERR_PTR(-ENODATA);
976}
977
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300978struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300979 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300980{
981 DECLARE_WAITQUEUE(wait, current);
982 struct hci_request req;
983 int err = 0;
984
985 BT_DBG("%s", hdev->name);
986
987 hci_req_init(&req, hdev);
988
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300989 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300990
991 hdev->req_status = HCI_REQ_PEND;
992
993 err = hci_req_run(&req, hci_req_sync_complete);
994 if (err < 0)
995 return ERR_PTR(err);
996
997 add_wait_queue(&hdev->req_wait_q, &wait);
998 set_current_state(TASK_INTERRUPTIBLE);
999
1000 schedule_timeout(timeout);
1001
1002 remove_wait_queue(&hdev->req_wait_q, &wait);
1003
1004 if (signal_pending(current))
1005 return ERR_PTR(-EINTR);
1006
1007 switch (hdev->req_status) {
1008 case HCI_REQ_DONE:
1009 err = -bt_to_errno(hdev->req_result);
1010 break;
1011
1012 case HCI_REQ_CANCELED:
1013 err = -hdev->req_result;
1014 break;
1015
1016 default:
1017 err = -ETIMEDOUT;
1018 break;
1019 }
1020
1021 hdev->req_status = hdev->req_result = 0;
1022
1023 BT_DBG("%s end: err %d", hdev->name, err);
1024
1025 if (err < 0)
1026 return ERR_PTR(err);
1027
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001028 return hci_get_cmd_complete(hdev, opcode, event);
1029}
1030EXPORT_SYMBOL(__hci_cmd_sync_ev);
1031
1032struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001033 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001034{
1035 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001036}
1037EXPORT_SYMBOL(__hci_cmd_sync);
1038
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001040static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 void (*func)(struct hci_request *req,
1042 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001043 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001045 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 DECLARE_WAITQUEUE(wait, current);
1047 int err = 0;
1048
1049 BT_DBG("%s start", hdev->name);
1050
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_init(&req, hdev);
1052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 hdev->req_status = HCI_REQ_PEND;
1054
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001056
Johan Hedberg42c6b122013-03-05 20:37:49 +02001057 err = hci_req_run(&req, hci_req_sync_complete);
1058 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001059 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001060
1061 /* ENODATA means the HCI request command queue is empty.
1062 * This can happen when a request with conditionals doesn't
1063 * trigger any commands to be sent. This is normal behavior
1064 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 */
Andre Guedes920c8302013-03-08 11:20:15 -03001066 if (err == -ENODATA)
1067 return 0;
1068
1069 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001070 }
1071
Andre Guedesbc4445c2013-03-08 11:20:13 -03001072 add_wait_queue(&hdev->req_wait_q, &wait);
1073 set_current_state(TASK_INTERRUPTIBLE);
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 schedule_timeout(timeout);
1076
1077 remove_wait_queue(&hdev->req_wait_q, &wait);
1078
1079 if (signal_pending(current))
1080 return -EINTR;
1081
1082 switch (hdev->req_status) {
1083 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001084 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 break;
1086
1087 case HCI_REQ_CANCELED:
1088 err = -hdev->req_result;
1089 break;
1090
1091 default:
1092 err = -ETIMEDOUT;
1093 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Johan Hedberga5040ef2011-01-10 13:28:59 +02001096 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
1098 BT_DBG("%s end: err %d", hdev->name, err);
1099
1100 return err;
1101}
1102
Johan Hedberg01178cd2013-03-05 20:37:41 +02001103static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 void (*req)(struct hci_request *req,
1105 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001106 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
1108 int ret;
1109
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001110 if (!test_bit(HCI_UP, &hdev->flags))
1111 return -ENETDOWN;
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 /* Serialize all requests */
1114 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001115 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 hci_req_unlock(hdev);
1117
1118 return ret;
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 set_bit(HCI_RESET, &req->hdev->flags);
1127 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001135 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001137 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001138 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001139
1140 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142}
1143
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001147
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001148 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001150
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001151 /* Read Local Supported Commands */
1152 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1153
1154 /* Read Local Supported Features */
1155 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1156
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001157 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001159
1160 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001161 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001162
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001163 /* Read Flow Control Mode */
1164 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1165
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001166 /* Read Location Data */
1167 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001168}
1169
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001171{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001173
1174 BT_DBG("%s %ld", hdev->name, opt);
1175
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001176 /* Reset */
1177 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001179
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001180 switch (hdev->dev_type) {
1181 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001183 break;
1184
1185 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001186 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001187 break;
1188
1189 default:
1190 BT_ERR("Unknown device type %d", hdev->dev_type);
1191 break;
1192 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001193}
1194
Johan Hedberg42c6b122013-03-05 20:37:49 +02001195static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001196{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001197 struct hci_dev *hdev = req->hdev;
1198
Johan Hedberg2177bab2013-03-05 20:37:43 +02001199 __le16 param;
1200 __u8 flt_type;
1201
1202 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001203 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001204
1205 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001207
1208 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210
1211 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001213
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001214 /* Read Number of Supported IAC */
1215 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1216
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001217 /* Read Current IAC LAP */
1218 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1219
Johan Hedberg2177bab2013-03-05 20:37:43 +02001220 /* Clear Event Filters */
1221 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223
1224 /* Connection accept timeout ~20 secs */
1225 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001228 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1229 * but it does not support page scan related HCI commands.
1230 */
1231 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1233 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1234 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001238{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001239 struct hci_dev *hdev = req->hdev;
1240
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001243
1244 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001246
1247 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001249
1250 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001251 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001252
1253 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001254 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001255
1256 /* LE-only controllers have LE implicitly enabled */
1257 if (!lmp_bredr_capable(hdev))
1258 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259}
1260
1261static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1262{
1263 if (lmp_ext_inq_capable(hdev))
1264 return 0x02;
1265
1266 if (lmp_inq_rssi_capable(hdev))
1267 return 0x01;
1268
1269 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1270 hdev->lmp_subver == 0x0757)
1271 return 0x01;
1272
1273 if (hdev->manufacturer == 15) {
1274 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1275 return 0x01;
1276 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1277 return 0x01;
1278 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1279 return 0x01;
1280 }
1281
1282 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1283 hdev->lmp_subver == 0x1805)
1284 return 0x01;
1285
1286 return 0x00;
1287}
1288
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290{
1291 u8 mode;
1292
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296}
1297
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 struct hci_dev *hdev = req->hdev;
1301
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302 /* The second byte is 0xff instead of 0x9f (two reserved bits
1303 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1304 * command otherwise.
1305 */
1306 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1307
1308 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1309 * any event mask for pre 1.2 devices.
1310 */
1311 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1312 return;
1313
1314 if (lmp_bredr_capable(hdev)) {
1315 events[4] |= 0x01; /* Flow Specification Complete */
1316 events[4] |= 0x02; /* Inquiry Result with RSSI */
1317 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1318 events[5] |= 0x08; /* Synchronous Connection Complete */
1319 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001320 } else {
1321 /* Use a different default for LE-only devices */
1322 memset(events, 0, sizeof(events));
1323 events[0] |= 0x10; /* Disconnection Complete */
1324 events[0] |= 0x80; /* Encryption Change */
1325 events[1] |= 0x08; /* Read Remote Version Information Complete */
1326 events[1] |= 0x20; /* Command Complete */
1327 events[1] |= 0x40; /* Command Status */
1328 events[1] |= 0x80; /* Hardware Error */
1329 events[2] |= 0x04; /* Number of Completed Packets */
1330 events[3] |= 0x02; /* Data Buffer Overflow */
1331 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332 }
1333
1334 if (lmp_inq_rssi_capable(hdev))
1335 events[4] |= 0x02; /* Inquiry Result with RSSI */
1336
1337 if (lmp_sniffsubr_capable(hdev))
1338 events[5] |= 0x20; /* Sniff Subrating */
1339
1340 if (lmp_pause_enc_capable(hdev))
1341 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1342
1343 if (lmp_ext_inq_capable(hdev))
1344 events[5] |= 0x40; /* Extended Inquiry Result */
1345
1346 if (lmp_no_flush_capable(hdev))
1347 events[7] |= 0x01; /* Enhanced Flush Complete */
1348
1349 if (lmp_lsto_capable(hdev))
1350 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1351
1352 if (lmp_ssp_capable(hdev)) {
1353 events[6] |= 0x01; /* IO Capability Request */
1354 events[6] |= 0x02; /* IO Capability Response */
1355 events[6] |= 0x04; /* User Confirmation Request */
1356 events[6] |= 0x08; /* User Passkey Request */
1357 events[6] |= 0x10; /* Remote OOB Data Request */
1358 events[6] |= 0x20; /* Simple Pairing Complete */
1359 events[7] |= 0x04; /* User Passkey Notification */
1360 events[7] |= 0x08; /* Keypress Notification */
1361 events[7] |= 0x10; /* Remote Host Supported
1362 * Features Notification
1363 */
1364 }
1365
1366 if (lmp_le_capable(hdev))
1367 events[7] |= 0x20; /* LE Meta-Event */
1368
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001370
1371 if (lmp_le_capable(hdev)) {
1372 memset(events, 0, sizeof(events));
1373 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1375 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376 }
1377}
1378
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 struct hci_dev *hdev = req->hdev;
1382
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001385 else
1386 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387
1388 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001393 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1394 * local supported commands HCI command.
1395 */
1396 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398
1399 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001400 /* When SSP is available, then the host features page
1401 * should also be available as well. However some
1402 * controllers list the max_page as 0 as long as SSP
1403 * has not been enabled. To achieve proper debugging
1404 * output, force the minimum max_page to 1 at least.
1405 */
1406 hdev->max_page = 0x01;
1407
Johan Hedberg2177bab2013-03-05 20:37:43 +02001408 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1409 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001410 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1411 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001412 } else {
1413 struct hci_cp_write_eir cp;
1414
1415 memset(hdev->eir, 0, sizeof(hdev->eir));
1416 memset(&cp, 0, sizeof(cp));
1417
Johan Hedberg42c6b122013-03-05 20:37:49 +02001418 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001419 }
1420 }
1421
1422 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424
1425 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001426 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001427
1428 if (lmp_ext_feat_capable(hdev)) {
1429 struct hci_cp_read_local_ext_features cp;
1430
1431 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1433 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434 }
1435
1436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1437 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1439 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440 }
1441}
1442
Johan Hedberg42c6b122013-03-05 20:37:49 +02001443static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001444{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001445 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 struct hci_cp_write_def_link_policy cp;
1447 u16 link_policy = 0;
1448
1449 if (lmp_rswitch_capable(hdev))
1450 link_policy |= HCI_LP_RSWITCH;
1451 if (lmp_hold_capable(hdev))
1452 link_policy |= HCI_LP_HOLD;
1453 if (lmp_sniff_capable(hdev))
1454 link_policy |= HCI_LP_SNIFF;
1455 if (lmp_park_capable(hdev))
1456 link_policy |= HCI_LP_PARK;
1457
1458 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460}
1461
Johan Hedberg42c6b122013-03-05 20:37:49 +02001462static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001463{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001465 struct hci_cp_write_le_host_supported cp;
1466
Johan Hedbergc73eee92013-04-19 18:35:21 +03001467 /* LE-only devices do not support explicit enablement */
1468 if (!lmp_bredr_capable(hdev))
1469 return;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 memset(&cp, 0, sizeof(cp));
1472
1473 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1474 cp.le = 0x01;
1475 cp.simul = lmp_le_br_capable(hdev);
1476 }
1477
1478 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1480 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481}
1482
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001483static void hci_set_event_mask_page_2(struct hci_request *req)
1484{
1485 struct hci_dev *hdev = req->hdev;
1486 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1487
1488 /* If Connectionless Slave Broadcast master role is supported
1489 * enable all necessary events for it.
1490 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001491 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001492 events[1] |= 0x40; /* Triggered Clock Capture */
1493 events[1] |= 0x80; /* Synchronization Train Complete */
1494 events[2] |= 0x10; /* Slave Page Response Timeout */
1495 events[2] |= 0x20; /* CSB Channel Map Change */
1496 }
1497
1498 /* If Connectionless Slave Broadcast slave role is supported
1499 * enable all necessary events for it.
1500 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001501 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001502 events[2] |= 0x01; /* Synchronization Train Received */
1503 events[2] |= 0x02; /* CSB Receive */
1504 events[2] |= 0x04; /* CSB Timeout */
1505 events[2] |= 0x08; /* Truncated Page Complete */
1506 }
1507
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001508 /* Enable Authenticated Payload Timeout Expired event if supported */
1509 if (lmp_ping_capable(hdev))
1510 events[2] |= 0x80;
1511
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001512 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1513}
1514
Johan Hedberg42c6b122013-03-05 20:37:49 +02001515static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001516{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001517 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001518 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001519
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001520 /* Some Broadcom based Bluetooth controllers do not support the
1521 * Delete Stored Link Key command. They are clearly indicating its
1522 * absence in the bit mask of supported commands.
1523 *
1524 * Check the supported commands and only if the the command is marked
1525 * as supported send it. If not supported assume that the controller
1526 * does not have actual support for stored link keys which makes this
1527 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001528 *
1529 * Some controllers indicate that they support handling deleting
1530 * stored link keys, but they don't. The quirk lets a driver
1531 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001532 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001533 if (hdev->commands[6] & 0x80 &&
1534 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001535 struct hci_cp_delete_stored_link_key cp;
1536
1537 bacpy(&cp.bdaddr, BDADDR_ANY);
1538 cp.delete_all = 0x01;
1539 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1540 sizeof(cp), &cp);
1541 }
1542
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001544 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001545
Johan Hedberg7bf32042014-02-23 19:42:29 +02001546 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001548
1549 /* Read features beyond page 1 if available */
1550 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1551 struct hci_cp_read_local_ext_features cp;
1552
1553 cp.page = p;
1554 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1555 sizeof(cp), &cp);
1556 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557}
1558
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001559static void hci_init4_req(struct hci_request *req, unsigned long opt)
1560{
1561 struct hci_dev *hdev = req->hdev;
1562
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001563 /* Set event mask page 2 if the HCI command for it is supported */
1564 if (hdev->commands[22] & 0x04)
1565 hci_set_event_mask_page_2(req);
1566
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001567 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001568 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001569 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001570
1571 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001572 if ((lmp_sc_capable(hdev) ||
1573 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001574 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1575 u8 support = 0x01;
1576 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1577 sizeof(support), &support);
1578 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001579}
1580
Johan Hedberg2177bab2013-03-05 20:37:43 +02001581static int __hci_init(struct hci_dev *hdev)
1582{
1583 int err;
1584
1585 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1586 if (err < 0)
1587 return err;
1588
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001589 /* The Device Under Test (DUT) mode is special and available for
1590 * all controller types. So just create it early on.
1591 */
1592 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1593 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1594 &dut_mode_fops);
1595 }
1596
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1598 * BR/EDR/LE type controllers. AMP controllers only need the
1599 * first stage init.
1600 */
1601 if (hdev->dev_type != HCI_BREDR)
1602 return 0;
1603
1604 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1605 if (err < 0)
1606 return err;
1607
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001608 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1609 if (err < 0)
1610 return err;
1611
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001612 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1613 if (err < 0)
1614 return err;
1615
1616 /* Only create debugfs entries during the initial setup
1617 * phase and not every time the controller gets powered on.
1618 */
1619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1620 return 0;
1621
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001622 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1623 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001624 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1625 &hdev->manufacturer);
1626 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1627 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001628 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1629 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001630 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1631
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001632 if (lmp_bredr_capable(hdev)) {
1633 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1634 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001635 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1636 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001637 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1638 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001639 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1640 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001641 }
1642
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001643 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001644 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1645 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001646 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1647 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001648 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1649 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001650 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1651 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001652 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001653
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001654 if (lmp_sniff_capable(hdev)) {
1655 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1656 hdev, &idle_timeout_fops);
1657 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1658 hdev, &sniff_min_interval_fops);
1659 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1660 hdev, &sniff_max_interval_fops);
1661 }
1662
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001663 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001664 debugfs_create_file("identity", 0400, hdev->debugfs,
1665 hdev, &identity_fops);
1666 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1667 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001668 debugfs_create_file("random_address", 0444, hdev->debugfs,
1669 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001670 debugfs_create_file("static_address", 0444, hdev->debugfs,
1671 hdev, &static_address_fops);
1672
1673 /* For controllers with a public address, provide a debug
1674 * option to force the usage of the configured static
1675 * address. By default the public address is used.
1676 */
1677 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1678 debugfs_create_file("force_static_address", 0644,
1679 hdev->debugfs, hdev,
1680 &force_static_address_fops);
1681
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001682 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1683 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001684 debugfs_create_file("identity_resolving_keys", 0400,
1685 hdev->debugfs, hdev,
1686 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001687 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1688 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001689 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1690 hdev, &conn_min_interval_fops);
1691 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1692 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001693 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1694 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001695 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1696 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001697 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001698
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001699 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001700}
1701
Johan Hedberg42c6b122013-03-05 20:37:49 +02001702static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
1704 __u8 scan = opt;
1705
Johan Hedberg42c6b122013-03-05 20:37:49 +02001706 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001709 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710}
1711
Johan Hedberg42c6b122013-03-05 20:37:49 +02001712static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
1714 __u8 auth = opt;
1715
Johan Hedberg42c6b122013-03-05 20:37:49 +02001716 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001719 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720}
1721
Johan Hedberg42c6b122013-03-05 20:37:49 +02001722static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
1724 __u8 encrypt = opt;
1725
Johan Hedberg42c6b122013-03-05 20:37:49 +02001726 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001728 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001729 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730}
1731
Johan Hedberg42c6b122013-03-05 20:37:49 +02001732static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001733{
1734 __le16 policy = cpu_to_le16(opt);
1735
Johan Hedberg42c6b122013-03-05 20:37:49 +02001736 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001737
1738 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001739 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001740}
1741
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001742/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 * Device is held on return. */
1744struct hci_dev *hci_dev_get(int index)
1745{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001746 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 BT_DBG("%d", index);
1749
1750 if (index < 0)
1751 return NULL;
1752
1753 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001754 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 if (d->id == index) {
1756 hdev = hci_dev_hold(d);
1757 break;
1758 }
1759 }
1760 read_unlock(&hci_dev_list_lock);
1761 return hdev;
1762}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
1764/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001765
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001766bool hci_discovery_active(struct hci_dev *hdev)
1767{
1768 struct discovery_state *discov = &hdev->discovery;
1769
Andre Guedes6fbe1952012-02-03 17:47:58 -03001770 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001771 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001772 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001773 return true;
1774
Andre Guedes6fbe1952012-02-03 17:47:58 -03001775 default:
1776 return false;
1777 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001778}
1779
Johan Hedbergff9ef572012-01-04 14:23:45 +02001780void hci_discovery_set_state(struct hci_dev *hdev, int state)
1781{
1782 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1783
1784 if (hdev->discovery.state == state)
1785 return;
1786
1787 switch (state) {
1788 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001789 hci_update_background_scan(hdev);
1790
Andre Guedes7b99b652012-02-13 15:41:02 -03001791 if (hdev->discovery.state != DISCOVERY_STARTING)
1792 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001793 break;
1794 case DISCOVERY_STARTING:
1795 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001796 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001797 mgmt_discovering(hdev, 1);
1798 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001799 case DISCOVERY_RESOLVING:
1800 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001801 case DISCOVERY_STOPPING:
1802 break;
1803 }
1804
1805 hdev->discovery.state = state;
1806}
1807
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001808void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809{
Johan Hedberg30883512012-01-04 14:16:21 +02001810 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001811 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Johan Hedberg561aafb2012-01-04 13:31:59 +02001813 list_for_each_entry_safe(p, n, &cache->all, all) {
1814 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001815 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001817
1818 INIT_LIST_HEAD(&cache->unknown);
1819 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820}
1821
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001822struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1823 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824{
Johan Hedberg30883512012-01-04 14:16:21 +02001825 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 struct inquiry_entry *e;
1827
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001828 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Johan Hedberg561aafb2012-01-04 13:31:59 +02001830 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001832 return e;
1833 }
1834
1835 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836}
1837
Johan Hedberg561aafb2012-01-04 13:31:59 +02001838struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001839 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001840{
Johan Hedberg30883512012-01-04 14:16:21 +02001841 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001842 struct inquiry_entry *e;
1843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001844 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001845
1846 list_for_each_entry(e, &cache->unknown, list) {
1847 if (!bacmp(&e->data.bdaddr, bdaddr))
1848 return e;
1849 }
1850
1851 return NULL;
1852}
1853
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001854struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001855 bdaddr_t *bdaddr,
1856 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001857{
1858 struct discovery_state *cache = &hdev->discovery;
1859 struct inquiry_entry *e;
1860
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001861 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001862
1863 list_for_each_entry(e, &cache->resolve, list) {
1864 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1865 return e;
1866 if (!bacmp(&e->data.bdaddr, bdaddr))
1867 return e;
1868 }
1869
1870 return NULL;
1871}
1872
Johan Hedberga3d4e202012-01-09 00:53:02 +02001873void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001874 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001875{
1876 struct discovery_state *cache = &hdev->discovery;
1877 struct list_head *pos = &cache->resolve;
1878 struct inquiry_entry *p;
1879
1880 list_del(&ie->list);
1881
1882 list_for_each_entry(p, &cache->resolve, list) {
1883 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001884 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001885 break;
1886 pos = &p->list;
1887 }
1888
1889 list_add(&ie->list, pos);
1890}
1891
Johan Hedberg31754052012-01-04 13:39:52 +02001892bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001893 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894{
Johan Hedberg30883512012-01-04 14:16:21 +02001895 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001896 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001898 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Szymon Janc2b2fec42012-11-20 11:38:54 +01001900 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1901
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001902 if (ssp)
1903 *ssp = data->ssp_mode;
1904
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001905 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001906 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001907 if (ie->data.ssp_mode && ssp)
1908 *ssp = true;
1909
Johan Hedberga3d4e202012-01-09 00:53:02 +02001910 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001911 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001912 ie->data.rssi = data->rssi;
1913 hci_inquiry_cache_update_resolve(hdev, ie);
1914 }
1915
Johan Hedberg561aafb2012-01-04 13:31:59 +02001916 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001917 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001918
Johan Hedberg561aafb2012-01-04 13:31:59 +02001919 /* Entry not in the cache. Add new one. */
1920 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1921 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001922 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001923
1924 list_add(&ie->all, &cache->all);
1925
1926 if (name_known) {
1927 ie->name_state = NAME_KNOWN;
1928 } else {
1929 ie->name_state = NAME_NOT_KNOWN;
1930 list_add(&ie->list, &cache->unknown);
1931 }
1932
1933update:
1934 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001935 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001936 ie->name_state = NAME_KNOWN;
1937 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001940 memcpy(&ie->data, data, sizeof(*data));
1941 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001943
1944 if (ie->name_state == NAME_NOT_KNOWN)
1945 return false;
1946
1947 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
1950static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1951{
Johan Hedberg30883512012-01-04 14:16:21 +02001952 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 struct inquiry_info *info = (struct inquiry_info *) buf;
1954 struct inquiry_entry *e;
1955 int copied = 0;
1956
Johan Hedberg561aafb2012-01-04 13:31:59 +02001957 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001959
1960 if (copied >= num)
1961 break;
1962
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 bacpy(&info->bdaddr, &data->bdaddr);
1964 info->pscan_rep_mode = data->pscan_rep_mode;
1965 info->pscan_period_mode = data->pscan_period_mode;
1966 info->pscan_mode = data->pscan_mode;
1967 memcpy(info->dev_class, data->dev_class, 3);
1968 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001971 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 }
1973
1974 BT_DBG("cache %p, copied %d", cache, copied);
1975 return copied;
1976}
1977
Johan Hedberg42c6b122013-03-05 20:37:49 +02001978static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979{
1980 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001981 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 struct hci_cp_inquiry cp;
1983
1984 BT_DBG("%s", hdev->name);
1985
1986 if (test_bit(HCI_INQUIRY, &hdev->flags))
1987 return;
1988
1989 /* Start Inquiry */
1990 memcpy(&cp.lap, &ir->lap, 3);
1991 cp.length = ir->length;
1992 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001993 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
1995
Andre Guedes3e13fa12013-03-27 20:04:56 -03001996static int wait_inquiry(void *word)
1997{
1998 schedule();
1999 return signal_pending(current);
2000}
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002int hci_inquiry(void __user *arg)
2003{
2004 __u8 __user *ptr = arg;
2005 struct hci_inquiry_req ir;
2006 struct hci_dev *hdev;
2007 int err = 0, do_inquiry = 0, max_rsp;
2008 long timeo;
2009 __u8 *buf;
2010
2011 if (copy_from_user(&ir, ptr, sizeof(ir)))
2012 return -EFAULT;
2013
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002014 hdev = hci_dev_get(ir.dev_id);
2015 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 return -ENODEV;
2017
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002018 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2019 err = -EBUSY;
2020 goto done;
2021 }
2022
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002023 if (hdev->dev_type != HCI_BREDR) {
2024 err = -EOPNOTSUPP;
2025 goto done;
2026 }
2027
Johan Hedberg56f87902013-10-02 13:43:13 +03002028 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2029 err = -EOPNOTSUPP;
2030 goto done;
2031 }
2032
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002033 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002034 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002035 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002036 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 do_inquiry = 1;
2038 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002039 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
Marcel Holtmann04837f62006-07-03 10:02:33 +02002041 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002042
2043 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002044 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2045 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002046 if (err < 0)
2047 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002048
2049 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2050 * cleared). If it is interrupted by a signal, return -EINTR.
2051 */
2052 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2053 TASK_INTERRUPTIBLE))
2054 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002057 /* for unlimited number of responses we will use buffer with
2058 * 255 entries
2059 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2061
2062 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2063 * copy it to the user space.
2064 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002065 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002066 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 err = -ENOMEM;
2068 goto done;
2069 }
2070
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002071 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002073 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
2075 BT_DBG("num_rsp %d", ir.num_rsp);
2076
2077 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2078 ptr += sizeof(ir);
2079 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002080 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002082 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 err = -EFAULT;
2084
2085 kfree(buf);
2086
2087done:
2088 hci_dev_put(hdev);
2089 return err;
2090}
2091
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002092static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 int ret = 0;
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 BT_DBG("%s %p", hdev->name, hdev);
2097
2098 hci_req_lock(hdev);
2099
Johan Hovold94324962012-03-15 14:48:41 +01002100 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2101 ret = -ENODEV;
2102 goto done;
2103 }
2104
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002105 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2106 /* Check for rfkill but allow the HCI setup stage to
2107 * proceed (which in itself doesn't cause any RF activity).
2108 */
2109 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2110 ret = -ERFKILL;
2111 goto done;
2112 }
2113
2114 /* Check for valid public address or a configured static
2115 * random adddress, but let the HCI setup proceed to
2116 * be able to determine if there is a public address
2117 * or not.
2118 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002119 * In case of user channel usage, it is not important
2120 * if a public address or static random address is
2121 * available.
2122 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002123 * This check is only valid for BR/EDR controllers
2124 * since AMP controllers do not have an address.
2125 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002126 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2127 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002128 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2129 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2130 ret = -EADDRNOTAVAIL;
2131 goto done;
2132 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002133 }
2134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 if (test_bit(HCI_UP, &hdev->flags)) {
2136 ret = -EALREADY;
2137 goto done;
2138 }
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 if (hdev->open(hdev)) {
2141 ret = -EIO;
2142 goto done;
2143 }
2144
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002145 atomic_set(&hdev->cmd_cnt, 1);
2146 set_bit(HCI_INIT, &hdev->flags);
2147
2148 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2149 ret = hdev->setup(hdev);
2150
2151 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002152 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2153 set_bit(HCI_RAW, &hdev->flags);
2154
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002155 if (!test_bit(HCI_RAW, &hdev->flags) &&
2156 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002157 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 }
2159
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002160 clear_bit(HCI_INIT, &hdev->flags);
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 if (!ret) {
2163 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002164 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 set_bit(HCI_UP, &hdev->flags);
2166 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002168 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002169 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002170 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002171 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002172 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002173 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002174 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002176 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002177 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002178 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180 skb_queue_purge(&hdev->cmd_q);
2181 skb_queue_purge(&hdev->rx_q);
2182
2183 if (hdev->flush)
2184 hdev->flush(hdev);
2185
2186 if (hdev->sent_cmd) {
2187 kfree_skb(hdev->sent_cmd);
2188 hdev->sent_cmd = NULL;
2189 }
2190
2191 hdev->close(hdev);
2192 hdev->flags = 0;
2193 }
2194
2195done:
2196 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 return ret;
2198}
2199
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002200/* ---- HCI ioctl helpers ---- */
2201
2202int hci_dev_open(__u16 dev)
2203{
2204 struct hci_dev *hdev;
2205 int err;
2206
2207 hdev = hci_dev_get(dev);
2208 if (!hdev)
2209 return -ENODEV;
2210
Johan Hedberge1d08f42013-10-01 22:44:50 +03002211 /* We need to ensure that no other power on/off work is pending
2212 * before proceeding to call hci_dev_do_open. This is
2213 * particularly important if the setup procedure has not yet
2214 * completed.
2215 */
2216 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2217 cancel_delayed_work(&hdev->power_off);
2218
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002219 /* After this call it is guaranteed that the setup procedure
2220 * has finished. This means that error conditions like RFKILL
2221 * or no valid public or static random address apply.
2222 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002223 flush_workqueue(hdev->req_workqueue);
2224
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002225 err = hci_dev_do_open(hdev);
2226
2227 hci_dev_put(hdev);
2228
2229 return err;
2230}
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232static int hci_dev_do_close(struct hci_dev *hdev)
2233{
2234 BT_DBG("%s %p", hdev->name, hdev);
2235
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002236 cancel_delayed_work(&hdev->power_off);
2237
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 hci_req_cancel(hdev, ENODEV);
2239 hci_req_lock(hdev);
2240
2241 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002242 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 hci_req_unlock(hdev);
2244 return 0;
2245 }
2246
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002247 /* Flush RX and TX works */
2248 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002249 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002251 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002252 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002253 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002254 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002255 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002256 }
2257
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002258 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002259 cancel_delayed_work(&hdev->service_cache);
2260
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002261 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002262
2263 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2264 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002265
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002266 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002267 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002269 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271 hci_notify(hdev, HCI_DEV_DOWN);
2272
2273 if (hdev->flush)
2274 hdev->flush(hdev);
2275
2276 /* Reset device */
2277 skb_queue_purge(&hdev->cmd_q);
2278 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002279 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002280 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002281 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002283 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 clear_bit(HCI_INIT, &hdev->flags);
2285 }
2286
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002287 /* flush cmd work */
2288 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
2290 /* Drop queues */
2291 skb_queue_purge(&hdev->rx_q);
2292 skb_queue_purge(&hdev->cmd_q);
2293 skb_queue_purge(&hdev->raw_q);
2294
2295 /* Drop last sent command */
2296 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002297 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 kfree_skb(hdev->sent_cmd);
2299 hdev->sent_cmd = NULL;
2300 }
2301
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002302 kfree_skb(hdev->recv_evt);
2303 hdev->recv_evt = NULL;
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 /* After this point our queues are empty
2306 * and no tasks are scheduled. */
2307 hdev->close(hdev);
2308
Johan Hedberg35b973c2013-03-15 17:06:59 -05002309 /* Clear flags */
2310 hdev->flags = 0;
2311 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2312
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002313 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2314 if (hdev->dev_type == HCI_BREDR) {
2315 hci_dev_lock(hdev);
2316 mgmt_powered(hdev, 0);
2317 hci_dev_unlock(hdev);
2318 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002319 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002320
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002321 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002322 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002323
Johan Hedberge59fda82012-02-22 18:11:53 +02002324 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002325 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002326 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 hci_req_unlock(hdev);
2329
2330 hci_dev_put(hdev);
2331 return 0;
2332}
2333
2334int hci_dev_close(__u16 dev)
2335{
2336 struct hci_dev *hdev;
2337 int err;
2338
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002339 hdev = hci_dev_get(dev);
2340 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002342
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002343 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2344 err = -EBUSY;
2345 goto done;
2346 }
2347
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002348 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2349 cancel_delayed_work(&hdev->power_off);
2350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002352
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002353done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 hci_dev_put(hdev);
2355 return err;
2356}
2357
2358int hci_dev_reset(__u16 dev)
2359{
2360 struct hci_dev *hdev;
2361 int ret = 0;
2362
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002363 hdev = hci_dev_get(dev);
2364 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 return -ENODEV;
2366
2367 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Marcel Holtmann808a0492013-08-26 20:57:58 -07002369 if (!test_bit(HCI_UP, &hdev->flags)) {
2370 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002374 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2375 ret = -EBUSY;
2376 goto done;
2377 }
2378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 /* Drop queues */
2380 skb_queue_purge(&hdev->rx_q);
2381 skb_queue_purge(&hdev->cmd_q);
2382
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002383 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002384 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002386 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388 if (hdev->flush)
2389 hdev->flush(hdev);
2390
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002391 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002392 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002395 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
2397done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 hci_req_unlock(hdev);
2399 hci_dev_put(hdev);
2400 return ret;
2401}
2402
2403int hci_dev_reset_stat(__u16 dev)
2404{
2405 struct hci_dev *hdev;
2406 int ret = 0;
2407
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002408 hdev = hci_dev_get(dev);
2409 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 return -ENODEV;
2411
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002412 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2413 ret = -EBUSY;
2414 goto done;
2415 }
2416
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2418
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002419done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 return ret;
2422}
2423
2424int hci_dev_cmd(unsigned int cmd, void __user *arg)
2425{
2426 struct hci_dev *hdev;
2427 struct hci_dev_req dr;
2428 int err = 0;
2429
2430 if (copy_from_user(&dr, arg, sizeof(dr)))
2431 return -EFAULT;
2432
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002433 hdev = hci_dev_get(dr.dev_id);
2434 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 return -ENODEV;
2436
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002437 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2438 err = -EBUSY;
2439 goto done;
2440 }
2441
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002442 if (hdev->dev_type != HCI_BREDR) {
2443 err = -EOPNOTSUPP;
2444 goto done;
2445 }
2446
Johan Hedberg56f87902013-10-02 13:43:13 +03002447 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2448 err = -EOPNOTSUPP;
2449 goto done;
2450 }
2451
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 switch (cmd) {
2453 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002454 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2455 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 break;
2457
2458 case HCISETENCRYPT:
2459 if (!lmp_encrypt_capable(hdev)) {
2460 err = -EOPNOTSUPP;
2461 break;
2462 }
2463
2464 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2465 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002466 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2467 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 if (err)
2469 break;
2470 }
2471
Johan Hedberg01178cd2013-03-05 20:37:41 +02002472 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2473 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 break;
2475
2476 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002477 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2478 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 break;
2480
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002481 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002482 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2483 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002484 break;
2485
2486 case HCISETLINKMODE:
2487 hdev->link_mode = ((__u16) dr.dev_opt) &
2488 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2489 break;
2490
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 case HCISETPTYPE:
2492 hdev->pkt_type = (__u16) dr.dev_opt;
2493 break;
2494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002496 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2497 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 break;
2499
2500 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002501 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2502 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 break;
2504
2505 default:
2506 err = -EINVAL;
2507 break;
2508 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002509
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002510done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 hci_dev_put(hdev);
2512 return err;
2513}
2514
2515int hci_get_dev_list(void __user *arg)
2516{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002517 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 struct hci_dev_list_req *dl;
2519 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 int n = 0, size, err;
2521 __u16 dev_num;
2522
2523 if (get_user(dev_num, (__u16 __user *) arg))
2524 return -EFAULT;
2525
2526 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2527 return -EINVAL;
2528
2529 size = sizeof(*dl) + dev_num * sizeof(*dr);
2530
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002531 dl = kzalloc(size, GFP_KERNEL);
2532 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return -ENOMEM;
2534
2535 dr = dl->dev_req;
2536
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002537 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002538 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002539 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002540 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002541
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002542 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2543 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 (dr + n)->dev_id = hdev->id;
2546 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002547
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 if (++n >= dev_num)
2549 break;
2550 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002551 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
2553 dl->dev_num = n;
2554 size = sizeof(*dl) + n * sizeof(*dr);
2555
2556 err = copy_to_user(arg, dl, size);
2557 kfree(dl);
2558
2559 return err ? -EFAULT : 0;
2560}
2561
2562int hci_get_dev_info(void __user *arg)
2563{
2564 struct hci_dev *hdev;
2565 struct hci_dev_info di;
2566 int err = 0;
2567
2568 if (copy_from_user(&di, arg, sizeof(di)))
2569 return -EFAULT;
2570
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002571 hdev = hci_dev_get(di.dev_id);
2572 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 return -ENODEV;
2574
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002575 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002576 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002577
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002578 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2579 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 strcpy(di.name, hdev->name);
2582 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002583 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 di.flags = hdev->flags;
2585 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002586 if (lmp_bredr_capable(hdev)) {
2587 di.acl_mtu = hdev->acl_mtu;
2588 di.acl_pkts = hdev->acl_pkts;
2589 di.sco_mtu = hdev->sco_mtu;
2590 di.sco_pkts = hdev->sco_pkts;
2591 } else {
2592 di.acl_mtu = hdev->le_mtu;
2593 di.acl_pkts = hdev->le_pkts;
2594 di.sco_mtu = 0;
2595 di.sco_pkts = 0;
2596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 di.link_policy = hdev->link_policy;
2598 di.link_mode = hdev->link_mode;
2599
2600 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2601 memcpy(&di.features, &hdev->features, sizeof(di.features));
2602
2603 if (copy_to_user(arg, &di, sizeof(di)))
2604 err = -EFAULT;
2605
2606 hci_dev_put(hdev);
2607
2608 return err;
2609}
2610
2611/* ---- Interface to HCI drivers ---- */
2612
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002613static int hci_rfkill_set_block(void *data, bool blocked)
2614{
2615 struct hci_dev *hdev = data;
2616
2617 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2618
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002619 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2620 return -EBUSY;
2621
Johan Hedberg5e130362013-09-13 08:58:17 +03002622 if (blocked) {
2623 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002624 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2625 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002626 } else {
2627 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002628 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002629
2630 return 0;
2631}
2632
2633static const struct rfkill_ops hci_rfkill_ops = {
2634 .set_block = hci_rfkill_set_block,
2635};
2636
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002637static void hci_power_on(struct work_struct *work)
2638{
2639 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002640 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002641
2642 BT_DBG("%s", hdev->name);
2643
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002644 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002645 if (err < 0) {
2646 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002647 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002648 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002649
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002650 /* During the HCI setup phase, a few error conditions are
2651 * ignored and they need to be checked now. If they are still
2652 * valid, it is important to turn the device back off.
2653 */
2654 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2655 (hdev->dev_type == HCI_BREDR &&
2656 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2657 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002658 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2659 hci_dev_do_close(hdev);
2660 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002661 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2662 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002663 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002664
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002665 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002666 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002667}
2668
2669static void hci_power_off(struct work_struct *work)
2670{
Johan Hedberg32435532011-11-07 22:16:04 +02002671 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002672 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002673
2674 BT_DBG("%s", hdev->name);
2675
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002676 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002677}
2678
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002679static void hci_discov_off(struct work_struct *work)
2680{
2681 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002682
2683 hdev = container_of(work, struct hci_dev, discov_off.work);
2684
2685 BT_DBG("%s", hdev->name);
2686
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002687 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002688}
2689
Johan Hedberg35f74982014-02-18 17:14:32 +02002690void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002691{
Johan Hedberg48210022013-01-27 00:31:28 +02002692 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002693
Johan Hedberg48210022013-01-27 00:31:28 +02002694 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2695 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002696 kfree(uuid);
2697 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002698}
2699
Johan Hedberg35f74982014-02-18 17:14:32 +02002700void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002701{
2702 struct list_head *p, *n;
2703
2704 list_for_each_safe(p, n, &hdev->link_keys) {
2705 struct link_key *key;
2706
2707 key = list_entry(p, struct link_key, list);
2708
2709 list_del(p);
2710 kfree(key);
2711 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002712}
2713
Johan Hedberg35f74982014-02-18 17:14:32 +02002714void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002715{
2716 struct smp_ltk *k, *tmp;
2717
2718 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2719 list_del(&k->list);
2720 kfree(k);
2721 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002722}
2723
Johan Hedberg970c4e42014-02-18 10:19:33 +02002724void hci_smp_irks_clear(struct hci_dev *hdev)
2725{
2726 struct smp_irk *k, *tmp;
2727
2728 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2729 list_del(&k->list);
2730 kfree(k);
2731 }
2732}
2733
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002734struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2735{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002736 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002737
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002738 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002739 if (bacmp(bdaddr, &k->bdaddr) == 0)
2740 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002741
2742 return NULL;
2743}
2744
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302745static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002746 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002747{
2748 /* Legacy key */
2749 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302750 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002751
2752 /* Debug keys are insecure so don't store them persistently */
2753 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302754 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002755
2756 /* Changed combination key and there's no previous one */
2757 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302758 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002759
2760 /* Security mode 3 case */
2761 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302762 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002763
2764 /* Neither local nor remote side had no-bonding as requirement */
2765 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302766 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002767
2768 /* Local side had dedicated bonding as requirement */
2769 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302770 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002771
2772 /* Remote side had dedicated bonding as requirement */
2773 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302774 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002775
2776 /* If none of the above criteria match, then don't store the key
2777 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302778 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002779}
2780
Johan Hedberg98a0b842014-01-30 19:40:00 -08002781static bool ltk_type_master(u8 type)
2782{
2783 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2784 return true;
2785
2786 return false;
2787}
2788
2789struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2790 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002791{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002792 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002793
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002794 list_for_each_entry(k, &hdev->long_term_keys, list) {
2795 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002796 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002797 continue;
2798
Johan Hedberg98a0b842014-01-30 19:40:00 -08002799 if (ltk_type_master(k->type) != master)
2800 continue;
2801
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002802 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002803 }
2804
2805 return NULL;
2806}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002807
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002808struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002809 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002810{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002811 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002812
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002813 list_for_each_entry(k, &hdev->long_term_keys, list)
2814 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002815 bacmp(bdaddr, &k->bdaddr) == 0 &&
2816 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002817 return k;
2818
2819 return NULL;
2820}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002821
Johan Hedberg970c4e42014-02-18 10:19:33 +02002822struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2823{
2824 struct smp_irk *irk;
2825
2826 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2827 if (!bacmp(&irk->rpa, rpa))
2828 return irk;
2829 }
2830
2831 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2832 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2833 bacpy(&irk->rpa, rpa);
2834 return irk;
2835 }
2836 }
2837
2838 return NULL;
2839}
2840
2841struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2842 u8 addr_type)
2843{
2844 struct smp_irk *irk;
2845
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002846 /* Identity Address must be public or static random */
2847 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2848 return NULL;
2849
Johan Hedberg970c4e42014-02-18 10:19:33 +02002850 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2851 if (addr_type == irk->addr_type &&
2852 bacmp(bdaddr, &irk->bdaddr) == 0)
2853 return irk;
2854 }
2855
2856 return NULL;
2857}
2858
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002859int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002860 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002861{
2862 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302863 u8 old_key_type;
2864 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002865
2866 old_key = hci_find_link_key(hdev, bdaddr);
2867 if (old_key) {
2868 old_key_type = old_key->type;
2869 key = old_key;
2870 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002871 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002872 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002873 if (!key)
2874 return -ENOMEM;
2875 list_add(&key->list, &hdev->link_keys);
2876 }
2877
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002878 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002879
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002880 /* Some buggy controller combinations generate a changed
2881 * combination key for legacy pairing even when there's no
2882 * previous key */
2883 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002884 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002885 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002886 if (conn)
2887 conn->key_type = type;
2888 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002889
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002890 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002891 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002892 key->pin_len = pin_len;
2893
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002894 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002895 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002896 else
2897 key->type = type;
2898
Johan Hedberg4df378a2011-04-28 11:29:03 -07002899 if (!new_key)
2900 return 0;
2901
2902 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2903
Johan Hedberg744cf192011-11-08 20:40:14 +02002904 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002905
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302906 if (conn)
2907 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002908
2909 return 0;
2910}
2911
Johan Hedbergca9142b2014-02-19 14:57:44 +02002912struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002913 u8 addr_type, u8 type, u8 authenticated,
2914 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002915{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002916 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002917 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002918
Johan Hedberg98a0b842014-01-30 19:40:00 -08002919 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002920 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002921 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002922 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002923 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002924 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002925 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002926 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002927 }
2928
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002929 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002930 key->bdaddr_type = addr_type;
2931 memcpy(key->val, tk, sizeof(key->val));
2932 key->authenticated = authenticated;
2933 key->ediv = ediv;
2934 key->enc_size = enc_size;
2935 key->type = type;
2936 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937
Johan Hedbergca9142b2014-02-19 14:57:44 +02002938 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002939}
2940
Johan Hedbergca9142b2014-02-19 14:57:44 +02002941struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2942 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002943{
2944 struct smp_irk *irk;
2945
2946 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2947 if (!irk) {
2948 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2949 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002950 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002951
2952 bacpy(&irk->bdaddr, bdaddr);
2953 irk->addr_type = addr_type;
2954
2955 list_add(&irk->list, &hdev->identity_resolving_keys);
2956 }
2957
2958 memcpy(irk->val, val, 16);
2959 bacpy(&irk->rpa, rpa);
2960
Johan Hedbergca9142b2014-02-19 14:57:44 +02002961 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002962}
2963
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002964int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2965{
2966 struct link_key *key;
2967
2968 key = hci_find_link_key(hdev, bdaddr);
2969 if (!key)
2970 return -ENOENT;
2971
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002972 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002973
2974 list_del(&key->list);
2975 kfree(key);
2976
2977 return 0;
2978}
2979
Johan Hedberge0b2b272014-02-18 17:14:31 +02002980int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002981{
2982 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002983 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002984
2985 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002986 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002987 continue;
2988
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002989 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002990
2991 list_del(&k->list);
2992 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002993 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002994 }
2995
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002996 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002997}
2998
Johan Hedberga7ec7332014-02-18 17:14:35 +02002999void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3000{
3001 struct smp_irk *k, *tmp;
3002
Johan Hedberg668b7b12014-02-21 16:03:31 +02003003 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003004 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3005 continue;
3006
3007 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3008
3009 list_del(&k->list);
3010 kfree(k);
3011 }
3012}
3013
Ville Tervo6bd32322011-02-16 16:32:41 +02003014/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003015static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003016{
3017 struct hci_dev *hdev = (void *) arg;
3018
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003019 if (hdev->sent_cmd) {
3020 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3021 u16 opcode = __le16_to_cpu(sent->opcode);
3022
3023 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3024 } else {
3025 BT_ERR("%s command tx timeout", hdev->name);
3026 }
3027
Ville Tervo6bd32322011-02-16 16:32:41 +02003028 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003029 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003030}
3031
Szymon Janc2763eda2011-03-22 13:12:22 +01003032struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003033 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003034{
3035 struct oob_data *data;
3036
3037 list_for_each_entry(data, &hdev->remote_oob_data, list)
3038 if (bacmp(bdaddr, &data->bdaddr) == 0)
3039 return data;
3040
3041 return NULL;
3042}
3043
3044int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3045{
3046 struct oob_data *data;
3047
3048 data = hci_find_remote_oob_data(hdev, bdaddr);
3049 if (!data)
3050 return -ENOENT;
3051
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003052 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003053
3054 list_del(&data->list);
3055 kfree(data);
3056
3057 return 0;
3058}
3059
Johan Hedberg35f74982014-02-18 17:14:32 +02003060void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003061{
3062 struct oob_data *data, *n;
3063
3064 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3065 list_del(&data->list);
3066 kfree(data);
3067 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003068}
3069
Marcel Holtmann07988722014-01-10 02:07:29 -08003070int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3071 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003072{
3073 struct oob_data *data;
3074
3075 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003076 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003077 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003078 if (!data)
3079 return -ENOMEM;
3080
3081 bacpy(&data->bdaddr, bdaddr);
3082 list_add(&data->list, &hdev->remote_oob_data);
3083 }
3084
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003085 memcpy(data->hash192, hash, sizeof(data->hash192));
3086 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003087
Marcel Holtmann07988722014-01-10 02:07:29 -08003088 memset(data->hash256, 0, sizeof(data->hash256));
3089 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3090
3091 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3092
3093 return 0;
3094}
3095
3096int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3097 u8 *hash192, u8 *randomizer192,
3098 u8 *hash256, u8 *randomizer256)
3099{
3100 struct oob_data *data;
3101
3102 data = hci_find_remote_oob_data(hdev, bdaddr);
3103 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003104 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003105 if (!data)
3106 return -ENOMEM;
3107
3108 bacpy(&data->bdaddr, bdaddr);
3109 list_add(&data->list, &hdev->remote_oob_data);
3110 }
3111
3112 memcpy(data->hash192, hash192, sizeof(data->hash192));
3113 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3114
3115 memcpy(data->hash256, hash256, sizeof(data->hash256));
3116 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3117
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003118 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003119
3120 return 0;
3121}
3122
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003123struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3124 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003125{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003126 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003127
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003128 list_for_each_entry(b, &hdev->blacklist, list) {
3129 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003130 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003131 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003132
3133 return NULL;
3134}
3135
Johan Hedberg35f74982014-02-18 17:14:32 +02003136void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003137{
3138 struct list_head *p, *n;
3139
3140 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003141 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003142
3143 list_del(p);
3144 kfree(b);
3145 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003146}
3147
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003148int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003149{
3150 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003151
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003152 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003153 return -EBADF;
3154
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003155 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003156 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003157
3158 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003159 if (!entry)
3160 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003161
3162 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003163 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003164
3165 list_add(&entry->list, &hdev->blacklist);
3166
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003167 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003168}
3169
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003170int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003171{
3172 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003173
Johan Hedberg35f74982014-02-18 17:14:32 +02003174 if (!bacmp(bdaddr, BDADDR_ANY)) {
3175 hci_blacklist_clear(hdev);
3176 return 0;
3177 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003178
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003179 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003180 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003181 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003182
3183 list_del(&entry->list);
3184 kfree(entry);
3185
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003186 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003187}
3188
Andre Guedes15819a72014-02-03 13:56:18 -03003189/* This function requires the caller holds hdev->lock */
3190struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3191 bdaddr_t *addr, u8 addr_type)
3192{
3193 struct hci_conn_params *params;
3194
3195 list_for_each_entry(params, &hdev->le_conn_params, list) {
3196 if (bacmp(&params->addr, addr) == 0 &&
3197 params->addr_type == addr_type) {
3198 return params;
3199 }
3200 }
3201
3202 return NULL;
3203}
3204
Andre Guedescef952c2014-02-26 20:21:49 -03003205static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3206{
3207 struct hci_conn *conn;
3208
3209 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3210 if (!conn)
3211 return false;
3212
3213 if (conn->dst_type != type)
3214 return false;
3215
3216 if (conn->state != BT_CONNECTED)
3217 return false;
3218
3219 return true;
3220}
3221
Andre Guedes15819a72014-02-03 13:56:18 -03003222/* This function requires the caller holds hdev->lock */
3223void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003224 u8 auto_connect, u16 conn_min_interval,
3225 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003226{
3227 struct hci_conn_params *params;
3228
3229 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003230 if (params)
3231 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003232
3233 params = kzalloc(sizeof(*params), GFP_KERNEL);
3234 if (!params) {
3235 BT_ERR("Out of memory");
3236 return;
3237 }
3238
3239 bacpy(&params->addr, addr);
3240 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003241
3242 list_add(&params->list, &hdev->le_conn_params);
3243
3244update:
Andre Guedes15819a72014-02-03 13:56:18 -03003245 params->conn_min_interval = conn_min_interval;
3246 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003247 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003248
Andre Guedescef952c2014-02-26 20:21:49 -03003249 switch (auto_connect) {
3250 case HCI_AUTO_CONN_DISABLED:
3251 case HCI_AUTO_CONN_LINK_LOSS:
3252 hci_pend_le_conn_del(hdev, addr, addr_type);
3253 break;
3254 case HCI_AUTO_CONN_ALWAYS:
3255 if (!is_connected(hdev, addr, addr_type))
3256 hci_pend_le_conn_add(hdev, addr, addr_type);
3257 break;
3258 }
Andre Guedes15819a72014-02-03 13:56:18 -03003259
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003260 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3261 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3262 conn_min_interval, conn_max_interval);
Andre Guedes15819a72014-02-03 13:56:18 -03003263}
3264
3265/* This function requires the caller holds hdev->lock */
3266void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3267{
3268 struct hci_conn_params *params;
3269
3270 params = hci_conn_params_lookup(hdev, addr, addr_type);
3271 if (!params)
3272 return;
3273
Andre Guedescef952c2014-02-26 20:21:49 -03003274 hci_pend_le_conn_del(hdev, addr, addr_type);
3275
Andre Guedes15819a72014-02-03 13:56:18 -03003276 list_del(&params->list);
3277 kfree(params);
3278
3279 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3280}
3281
3282/* This function requires the caller holds hdev->lock */
3283void hci_conn_params_clear(struct hci_dev *hdev)
3284{
3285 struct hci_conn_params *params, *tmp;
3286
3287 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3288 list_del(&params->list);
3289 kfree(params);
3290 }
3291
3292 BT_DBG("All LE connection parameters were removed");
3293}
3294
Andre Guedes77a77a32014-02-26 20:21:46 -03003295/* This function requires the caller holds hdev->lock */
3296struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3297 bdaddr_t *addr, u8 addr_type)
3298{
3299 struct bdaddr_list *entry;
3300
3301 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3302 if (bacmp(&entry->bdaddr, addr) == 0 &&
3303 entry->bdaddr_type == addr_type)
3304 return entry;
3305 }
3306
3307 return NULL;
3308}
3309
3310/* This function requires the caller holds hdev->lock */
3311void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3312{
3313 struct bdaddr_list *entry;
3314
3315 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3316 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003317 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003318
3319 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3320 if (!entry) {
3321 BT_ERR("Out of memory");
3322 return;
3323 }
3324
3325 bacpy(&entry->bdaddr, addr);
3326 entry->bdaddr_type = addr_type;
3327
3328 list_add(&entry->list, &hdev->pend_le_conns);
3329
3330 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003331
3332done:
3333 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003334}
3335
3336/* This function requires the caller holds hdev->lock */
3337void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3338{
3339 struct bdaddr_list *entry;
3340
3341 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3342 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003343 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003344
3345 list_del(&entry->list);
3346 kfree(entry);
3347
3348 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003349
3350done:
3351 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003352}
3353
3354/* This function requires the caller holds hdev->lock */
3355void hci_pend_le_conns_clear(struct hci_dev *hdev)
3356{
3357 struct bdaddr_list *entry, *tmp;
3358
3359 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3360 list_del(&entry->list);
3361 kfree(entry);
3362 }
3363
3364 BT_DBG("All LE pending connections cleared");
3365}
3366
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003367static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003368{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003369 if (status) {
3370 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003371
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003372 hci_dev_lock(hdev);
3373 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3374 hci_dev_unlock(hdev);
3375 return;
3376 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003377}
3378
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003379static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003380{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003381 /* General inquiry access code (GIAC) */
3382 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3383 struct hci_request req;
3384 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003385 int err;
3386
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003387 if (status) {
3388 BT_ERR("Failed to disable LE scanning: status %d", status);
3389 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003390 }
3391
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003392 switch (hdev->discovery.type) {
3393 case DISCOV_TYPE_LE:
3394 hci_dev_lock(hdev);
3395 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3396 hci_dev_unlock(hdev);
3397 break;
3398
3399 case DISCOV_TYPE_INTERLEAVED:
3400 hci_req_init(&req, hdev);
3401
3402 memset(&cp, 0, sizeof(cp));
3403 memcpy(&cp.lap, lap, sizeof(cp.lap));
3404 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3405 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3406
3407 hci_dev_lock(hdev);
3408
3409 hci_inquiry_cache_flush(hdev);
3410
3411 err = hci_req_run(&req, inquiry_complete);
3412 if (err) {
3413 BT_ERR("Inquiry request failed: err %d", err);
3414 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3415 }
3416
3417 hci_dev_unlock(hdev);
3418 break;
3419 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003420}
3421
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003422static void le_scan_disable_work(struct work_struct *work)
3423{
3424 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003425 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003426 struct hci_request req;
3427 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003428
3429 BT_DBG("%s", hdev->name);
3430
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003431 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003432
Andre Guedesb1efcc22014-02-26 20:21:40 -03003433 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003434
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003435 err = hci_req_run(&req, le_scan_disable_work_complete);
3436 if (err)
3437 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003438}
3439
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003440int hci_update_random_address(struct hci_request *req, bool require_privacy,
3441 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003442{
3443 struct hci_dev *hdev = req->hdev;
3444 int err;
3445
3446 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003447 * current RPA has expired or there is something else than
3448 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003449 */
3450 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003451 int to;
3452
3453 *own_addr_type = ADDR_LE_DEV_RANDOM;
3454
3455 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003456 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003457 return 0;
3458
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003459 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003460 if (err < 0) {
3461 BT_ERR("%s failed to generate new RPA", hdev->name);
3462 return err;
3463 }
3464
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003465 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003466
3467 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3468 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3469
3470 return 0;
3471 }
3472
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003473 /* In case of required privacy without resolvable private address,
3474 * use an unresolvable private address. This is useful for active
3475 * scanning and non-connectable advertising.
3476 */
3477 if (require_privacy) {
3478 bdaddr_t urpa;
3479
3480 get_random_bytes(&urpa, 6);
3481 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3482
3483 *own_addr_type = ADDR_LE_DEV_RANDOM;
3484 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3485 return 0;
3486 }
3487
Johan Hedbergebd3a742014-02-23 19:42:21 +02003488 /* If forcing static address is in use or there is no public
3489 * address use the static address as random address (but skip
3490 * the HCI command if the current random address is already the
3491 * static one.
3492 */
3493 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3494 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3495 *own_addr_type = ADDR_LE_DEV_RANDOM;
3496 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3497 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3498 &hdev->static_addr);
3499 return 0;
3500 }
3501
3502 /* Neither privacy nor static address is being used so use a
3503 * public address.
3504 */
3505 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3506
3507 return 0;
3508}
3509
David Herrmann9be0dab2012-04-22 14:39:57 +02003510/* Alloc HCI device */
3511struct hci_dev *hci_alloc_dev(void)
3512{
3513 struct hci_dev *hdev;
3514
3515 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3516 if (!hdev)
3517 return NULL;
3518
David Herrmannb1b813d2012-04-22 14:39:58 +02003519 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3520 hdev->esco_type = (ESCO_HV1);
3521 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003522 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3523 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003524 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3525 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003526
David Herrmannb1b813d2012-04-22 14:39:58 +02003527 hdev->sniff_max_interval = 800;
3528 hdev->sniff_min_interval = 80;
3529
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003530 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003531 hdev->le_scan_interval = 0x0060;
3532 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003533 hdev->le_conn_min_interval = 0x0028;
3534 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003535
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003536 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3537
David Herrmannb1b813d2012-04-22 14:39:58 +02003538 mutex_init(&hdev->lock);
3539 mutex_init(&hdev->req_lock);
3540
3541 INIT_LIST_HEAD(&hdev->mgmt_pending);
3542 INIT_LIST_HEAD(&hdev->blacklist);
3543 INIT_LIST_HEAD(&hdev->uuids);
3544 INIT_LIST_HEAD(&hdev->link_keys);
3545 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003546 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003547 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003548 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003549 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003550 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003551
3552 INIT_WORK(&hdev->rx_work, hci_rx_work);
3553 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3554 INIT_WORK(&hdev->tx_work, hci_tx_work);
3555 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003556
David Herrmannb1b813d2012-04-22 14:39:58 +02003557 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3558 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3559 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3560
David Herrmannb1b813d2012-04-22 14:39:58 +02003561 skb_queue_head_init(&hdev->rx_q);
3562 skb_queue_head_init(&hdev->cmd_q);
3563 skb_queue_head_init(&hdev->raw_q);
3564
3565 init_waitqueue_head(&hdev->req_wait_q);
3566
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003567 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003568
David Herrmannb1b813d2012-04-22 14:39:58 +02003569 hci_init_sysfs(hdev);
3570 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003571
3572 return hdev;
3573}
3574EXPORT_SYMBOL(hci_alloc_dev);
3575
3576/* Free HCI device */
3577void hci_free_dev(struct hci_dev *hdev)
3578{
David Herrmann9be0dab2012-04-22 14:39:57 +02003579 /* will free via device release */
3580 put_device(&hdev->dev);
3581}
3582EXPORT_SYMBOL(hci_free_dev);
3583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584/* Register HCI device */
3585int hci_register_dev(struct hci_dev *hdev)
3586{
David Herrmannb1b813d2012-04-22 14:39:58 +02003587 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588
David Herrmann010666a2012-01-07 15:47:07 +01003589 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 return -EINVAL;
3591
Mat Martineau08add512011-11-02 16:18:36 -07003592 /* Do not allow HCI_AMP devices to register at index 0,
3593 * so the index can be used as the AMP controller ID.
3594 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003595 switch (hdev->dev_type) {
3596 case HCI_BREDR:
3597 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3598 break;
3599 case HCI_AMP:
3600 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3601 break;
3602 default:
3603 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003605
Sasha Levin3df92b32012-05-27 22:36:56 +02003606 if (id < 0)
3607 return id;
3608
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 sprintf(hdev->name, "hci%d", id);
3610 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003611
3612 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3613
Kees Cookd8537542013-07-03 15:04:57 -07003614 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3615 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003616 if (!hdev->workqueue) {
3617 error = -ENOMEM;
3618 goto err;
3619 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003620
Kees Cookd8537542013-07-03 15:04:57 -07003621 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3622 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003623 if (!hdev->req_workqueue) {
3624 destroy_workqueue(hdev->workqueue);
3625 error = -ENOMEM;
3626 goto err;
3627 }
3628
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003629 if (!IS_ERR_OR_NULL(bt_debugfs))
3630 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3631
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003632 dev_set_name(&hdev->dev, "%s", hdev->name);
3633
Johan Hedberg99780a72014-02-18 10:40:07 +02003634 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3635 CRYPTO_ALG_ASYNC);
3636 if (IS_ERR(hdev->tfm_aes)) {
3637 BT_ERR("Unable to create crypto context");
3638 error = PTR_ERR(hdev->tfm_aes);
3639 hdev->tfm_aes = NULL;
3640 goto err_wqueue;
3641 }
3642
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003643 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003644 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003645 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003647 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003648 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3649 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003650 if (hdev->rfkill) {
3651 if (rfkill_register(hdev->rfkill) < 0) {
3652 rfkill_destroy(hdev->rfkill);
3653 hdev->rfkill = NULL;
3654 }
3655 }
3656
Johan Hedberg5e130362013-09-13 08:58:17 +03003657 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3658 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3659
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003660 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003661 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003662
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003663 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003664 /* Assume BR/EDR support until proven otherwise (such as
3665 * through reading supported features during init.
3666 */
3667 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3668 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003669
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003670 write_lock(&hci_dev_list_lock);
3671 list_add(&hdev->list, &hci_dev_list);
3672 write_unlock(&hci_dev_list_lock);
3673
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003675 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676
Johan Hedberg19202572013-01-14 22:33:51 +02003677 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003678
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003680
Johan Hedberg99780a72014-02-18 10:40:07 +02003681err_tfm:
3682 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003683err_wqueue:
3684 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003685 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003686err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003687 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003688
David Herrmann33ca9542011-10-08 14:58:49 +02003689 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690}
3691EXPORT_SYMBOL(hci_register_dev);
3692
3693/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003694void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695{
Sasha Levin3df92b32012-05-27 22:36:56 +02003696 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003697
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003698 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699
Johan Hovold94324962012-03-15 14:48:41 +01003700 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3701
Sasha Levin3df92b32012-05-27 22:36:56 +02003702 id = hdev->id;
3703
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003704 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003706 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
3708 hci_dev_do_close(hdev);
3709
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303710 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003711 kfree_skb(hdev->reassembly[i]);
3712
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003713 cancel_work_sync(&hdev->power_on);
3714
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003715 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003716 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003717 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003718 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003719 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003720 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003721
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003722 /* mgmt_index_removed should take care of emptying the
3723 * pending list */
3724 BUG_ON(!list_empty(&hdev->mgmt_pending));
3725
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 hci_notify(hdev, HCI_DEV_UNREG);
3727
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003728 if (hdev->rfkill) {
3729 rfkill_unregister(hdev->rfkill);
3730 rfkill_destroy(hdev->rfkill);
3731 }
3732
Johan Hedberg99780a72014-02-18 10:40:07 +02003733 if (hdev->tfm_aes)
3734 crypto_free_blkcipher(hdev->tfm_aes);
3735
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003736 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003737
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003738 debugfs_remove_recursive(hdev->debugfs);
3739
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003740 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003741 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003742
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003743 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003744 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003745 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003746 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003747 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003748 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003749 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003750 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003751 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003752 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003753
David Herrmanndc946bd2012-01-07 15:47:24 +01003754 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003755
3756 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757}
3758EXPORT_SYMBOL(hci_unregister_dev);
3759
3760/* Suspend HCI device */
3761int hci_suspend_dev(struct hci_dev *hdev)
3762{
3763 hci_notify(hdev, HCI_DEV_SUSPEND);
3764 return 0;
3765}
3766EXPORT_SYMBOL(hci_suspend_dev);
3767
3768/* Resume HCI device */
3769int hci_resume_dev(struct hci_dev *hdev)
3770{
3771 hci_notify(hdev, HCI_DEV_RESUME);
3772 return 0;
3773}
3774EXPORT_SYMBOL(hci_resume_dev);
3775
Marcel Holtmann76bca882009-11-18 00:40:39 +01003776/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003777int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003778{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003779 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003780 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003781 kfree_skb(skb);
3782 return -ENXIO;
3783 }
3784
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003785 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003786 bt_cb(skb)->incoming = 1;
3787
3788 /* Time stamp */
3789 __net_timestamp(skb);
3790
Marcel Holtmann76bca882009-11-18 00:40:39 +01003791 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003792 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003793
Marcel Holtmann76bca882009-11-18 00:40:39 +01003794 return 0;
3795}
3796EXPORT_SYMBOL(hci_recv_frame);
3797
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303798static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003799 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303800{
3801 int len = 0;
3802 int hlen = 0;
3803 int remain = count;
3804 struct sk_buff *skb;
3805 struct bt_skb_cb *scb;
3806
3807 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003808 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303809 return -EILSEQ;
3810
3811 skb = hdev->reassembly[index];
3812
3813 if (!skb) {
3814 switch (type) {
3815 case HCI_ACLDATA_PKT:
3816 len = HCI_MAX_FRAME_SIZE;
3817 hlen = HCI_ACL_HDR_SIZE;
3818 break;
3819 case HCI_EVENT_PKT:
3820 len = HCI_MAX_EVENT_SIZE;
3821 hlen = HCI_EVENT_HDR_SIZE;
3822 break;
3823 case HCI_SCODATA_PKT:
3824 len = HCI_MAX_SCO_SIZE;
3825 hlen = HCI_SCO_HDR_SIZE;
3826 break;
3827 }
3828
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003829 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303830 if (!skb)
3831 return -ENOMEM;
3832
3833 scb = (void *) skb->cb;
3834 scb->expect = hlen;
3835 scb->pkt_type = type;
3836
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303837 hdev->reassembly[index] = skb;
3838 }
3839
3840 while (count) {
3841 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003842 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303843
3844 memcpy(skb_put(skb, len), data, len);
3845
3846 count -= len;
3847 data += len;
3848 scb->expect -= len;
3849 remain = count;
3850
3851 switch (type) {
3852 case HCI_EVENT_PKT:
3853 if (skb->len == HCI_EVENT_HDR_SIZE) {
3854 struct hci_event_hdr *h = hci_event_hdr(skb);
3855 scb->expect = h->plen;
3856
3857 if (skb_tailroom(skb) < scb->expect) {
3858 kfree_skb(skb);
3859 hdev->reassembly[index] = NULL;
3860 return -ENOMEM;
3861 }
3862 }
3863 break;
3864
3865 case HCI_ACLDATA_PKT:
3866 if (skb->len == HCI_ACL_HDR_SIZE) {
3867 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3868 scb->expect = __le16_to_cpu(h->dlen);
3869
3870 if (skb_tailroom(skb) < scb->expect) {
3871 kfree_skb(skb);
3872 hdev->reassembly[index] = NULL;
3873 return -ENOMEM;
3874 }
3875 }
3876 break;
3877
3878 case HCI_SCODATA_PKT:
3879 if (skb->len == HCI_SCO_HDR_SIZE) {
3880 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3881 scb->expect = h->dlen;
3882
3883 if (skb_tailroom(skb) < scb->expect) {
3884 kfree_skb(skb);
3885 hdev->reassembly[index] = NULL;
3886 return -ENOMEM;
3887 }
3888 }
3889 break;
3890 }
3891
3892 if (scb->expect == 0) {
3893 /* Complete frame */
3894
3895 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003896 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303897
3898 hdev->reassembly[index] = NULL;
3899 return remain;
3900 }
3901 }
3902
3903 return remain;
3904}
3905
Marcel Holtmannef222012007-07-11 06:42:04 +02003906int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3907{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303908 int rem = 0;
3909
Marcel Holtmannef222012007-07-11 06:42:04 +02003910 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3911 return -EILSEQ;
3912
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003913 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003914 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303915 if (rem < 0)
3916 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003917
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303918 data += (count - rem);
3919 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003920 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003921
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303922 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003923}
3924EXPORT_SYMBOL(hci_recv_fragment);
3925
Suraj Sumangala99811512010-07-14 13:02:19 +05303926#define STREAM_REASSEMBLY 0
3927
3928int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3929{
3930 int type;
3931 int rem = 0;
3932
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003933 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303934 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3935
3936 if (!skb) {
3937 struct { char type; } *pkt;
3938
3939 /* Start of the frame */
3940 pkt = data;
3941 type = pkt->type;
3942
3943 data++;
3944 count--;
3945 } else
3946 type = bt_cb(skb)->pkt_type;
3947
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003948 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003949 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303950 if (rem < 0)
3951 return rem;
3952
3953 data += (count - rem);
3954 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003955 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303956
3957 return rem;
3958}
3959EXPORT_SYMBOL(hci_recv_stream_fragment);
3960
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961/* ---- Interface to upper protocols ---- */
3962
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963int hci_register_cb(struct hci_cb *cb)
3964{
3965 BT_DBG("%p name %s", cb, cb->name);
3966
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003967 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003969 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970
3971 return 0;
3972}
3973EXPORT_SYMBOL(hci_register_cb);
3974
3975int hci_unregister_cb(struct hci_cb *cb)
3976{
3977 BT_DBG("%p name %s", cb, cb->name);
3978
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003979 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003981 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982
3983 return 0;
3984}
3985EXPORT_SYMBOL(hci_unregister_cb);
3986
Marcel Holtmann51086992013-10-10 14:54:19 -07003987static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003989 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003991 /* Time stamp */
3992 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003994 /* Send copy to monitor */
3995 hci_send_to_monitor(hdev, skb);
3996
3997 if (atomic_read(&hdev->promisc)) {
3998 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003999 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 }
4001
4002 /* Get rid of skb owner, prior to sending to the driver. */
4003 skb_orphan(skb);
4004
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004005 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004006 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007}
4008
Johan Hedberg3119ae92013-03-05 20:37:44 +02004009void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4010{
4011 skb_queue_head_init(&req->cmd_q);
4012 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004013 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004014}
4015
4016int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4017{
4018 struct hci_dev *hdev = req->hdev;
4019 struct sk_buff *skb;
4020 unsigned long flags;
4021
4022 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4023
Andre Guedes5d73e032013-03-08 11:20:16 -03004024 /* If an error occured during request building, remove all HCI
4025 * commands queued on the HCI request queue.
4026 */
4027 if (req->err) {
4028 skb_queue_purge(&req->cmd_q);
4029 return req->err;
4030 }
4031
Johan Hedberg3119ae92013-03-05 20:37:44 +02004032 /* Do not allow empty requests */
4033 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004034 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004035
4036 skb = skb_peek_tail(&req->cmd_q);
4037 bt_cb(skb)->req.complete = complete;
4038
4039 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4040 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4041 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4042
4043 queue_work(hdev->workqueue, &hdev->cmd_work);
4044
4045 return 0;
4046}
4047
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004048static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004049 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050{
4051 int len = HCI_COMMAND_HDR_SIZE + plen;
4052 struct hci_command_hdr *hdr;
4053 struct sk_buff *skb;
4054
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004056 if (!skb)
4057 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
4059 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004060 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061 hdr->plen = plen;
4062
4063 if (plen)
4064 memcpy(skb_put(skb, plen), param, plen);
4065
4066 BT_DBG("skb len %d", skb->len);
4067
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004068 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004069
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004070 return skb;
4071}
4072
4073/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004074int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4075 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004076{
4077 struct sk_buff *skb;
4078
4079 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4080
4081 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4082 if (!skb) {
4083 BT_ERR("%s no memory for command", hdev->name);
4084 return -ENOMEM;
4085 }
4086
Johan Hedberg11714b32013-03-05 20:37:47 +02004087 /* Stand-alone HCI commands must be flaged as
4088 * single-command requests.
4089 */
4090 bt_cb(skb)->req.start = true;
4091
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004093 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
4095 return 0;
4096}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097
Johan Hedberg71c76a12013-03-05 20:37:46 +02004098/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004099void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4100 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004101{
4102 struct hci_dev *hdev = req->hdev;
4103 struct sk_buff *skb;
4104
4105 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4106
Andre Guedes34739c12013-03-08 11:20:18 -03004107 /* If an error occured during request building, there is no point in
4108 * queueing the HCI command. We can simply return.
4109 */
4110 if (req->err)
4111 return;
4112
Johan Hedberg71c76a12013-03-05 20:37:46 +02004113 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4114 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004115 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4116 hdev->name, opcode);
4117 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004118 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004119 }
4120
4121 if (skb_queue_empty(&req->cmd_q))
4122 bt_cb(skb)->req.start = true;
4123
Johan Hedberg02350a72013-04-03 21:50:29 +03004124 bt_cb(skb)->req.event = event;
4125
Johan Hedberg71c76a12013-03-05 20:37:46 +02004126 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004127}
4128
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004129void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4130 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004131{
4132 hci_req_add_ev(req, opcode, plen, param, 0);
4133}
4134
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004136void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137{
4138 struct hci_command_hdr *hdr;
4139
4140 if (!hdev->sent_cmd)
4141 return NULL;
4142
4143 hdr = (void *) hdev->sent_cmd->data;
4144
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004145 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 return NULL;
4147
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004148 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149
4150 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4151}
4152
4153/* Send ACL data */
4154static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4155{
4156 struct hci_acl_hdr *hdr;
4157 int len = skb->len;
4158
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004159 skb_push(skb, HCI_ACL_HDR_SIZE);
4160 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004161 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004162 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4163 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164}
4165
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004166static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004167 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004169 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 struct hci_dev *hdev = conn->hdev;
4171 struct sk_buff *list;
4172
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004173 skb->len = skb_headlen(skb);
4174 skb->data_len = 0;
4175
4176 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004177
4178 switch (hdev->dev_type) {
4179 case HCI_BREDR:
4180 hci_add_acl_hdr(skb, conn->handle, flags);
4181 break;
4182 case HCI_AMP:
4183 hci_add_acl_hdr(skb, chan->handle, flags);
4184 break;
4185 default:
4186 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4187 return;
4188 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004189
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004190 list = skb_shinfo(skb)->frag_list;
4191 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 /* Non fragmented */
4193 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4194
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004195 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 } else {
4197 /* Fragmented */
4198 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4199
4200 skb_shinfo(skb)->frag_list = NULL;
4201
4202 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004203 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004205 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004206
4207 flags &= ~ACL_START;
4208 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 do {
4210 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004211
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004212 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004213 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214
4215 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4216
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004217 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 } while (list);
4219
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004220 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004222}
4223
4224void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4225{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004226 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004227
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004228 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004229
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004230 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004232 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234
4235/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004236void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237{
4238 struct hci_dev *hdev = conn->hdev;
4239 struct hci_sco_hdr hdr;
4240
4241 BT_DBG("%s len %d", hdev->name, skb->len);
4242
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004243 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 hdr.dlen = skb->len;
4245
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004246 skb_push(skb, HCI_SCO_HDR_SIZE);
4247 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004248 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004250 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004251
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004253 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255
4256/* ---- HCI TX task (outgoing data) ---- */
4257
4258/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004259static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4260 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261{
4262 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004263 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004264 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004266 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004268
4269 rcu_read_lock();
4270
4271 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004272 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004274
4275 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4276 continue;
4277
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 num++;
4279
4280 if (c->sent < min) {
4281 min = c->sent;
4282 conn = c;
4283 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004284
4285 if (hci_conn_num(hdev, type) == num)
4286 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 }
4288
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004289 rcu_read_unlock();
4290
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004292 int cnt, q;
4293
4294 switch (conn->type) {
4295 case ACL_LINK:
4296 cnt = hdev->acl_cnt;
4297 break;
4298 case SCO_LINK:
4299 case ESCO_LINK:
4300 cnt = hdev->sco_cnt;
4301 break;
4302 case LE_LINK:
4303 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4304 break;
4305 default:
4306 cnt = 0;
4307 BT_ERR("Unknown link type");
4308 }
4309
4310 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 *quote = q ? q : 1;
4312 } else
4313 *quote = 0;
4314
4315 BT_DBG("conn %p quote %d", conn, *quote);
4316 return conn;
4317}
4318
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004319static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320{
4321 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004322 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
Ville Tervobae1f5d92011-02-10 22:38:53 -03004324 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004326 rcu_read_lock();
4327
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004329 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004330 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004331 BT_ERR("%s killing stalled connection %pMR",
4332 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004333 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 }
4335 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004336
4337 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338}
4339
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004340static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4341 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004342{
4343 struct hci_conn_hash *h = &hdev->conn_hash;
4344 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004345 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004346 struct hci_conn *conn;
4347 int cnt, q, conn_num = 0;
4348
4349 BT_DBG("%s", hdev->name);
4350
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004351 rcu_read_lock();
4352
4353 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004354 struct hci_chan *tmp;
4355
4356 if (conn->type != type)
4357 continue;
4358
4359 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4360 continue;
4361
4362 conn_num++;
4363
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004364 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004365 struct sk_buff *skb;
4366
4367 if (skb_queue_empty(&tmp->data_q))
4368 continue;
4369
4370 skb = skb_peek(&tmp->data_q);
4371 if (skb->priority < cur_prio)
4372 continue;
4373
4374 if (skb->priority > cur_prio) {
4375 num = 0;
4376 min = ~0;
4377 cur_prio = skb->priority;
4378 }
4379
4380 num++;
4381
4382 if (conn->sent < min) {
4383 min = conn->sent;
4384 chan = tmp;
4385 }
4386 }
4387
4388 if (hci_conn_num(hdev, type) == conn_num)
4389 break;
4390 }
4391
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004392 rcu_read_unlock();
4393
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004394 if (!chan)
4395 return NULL;
4396
4397 switch (chan->conn->type) {
4398 case ACL_LINK:
4399 cnt = hdev->acl_cnt;
4400 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004401 case AMP_LINK:
4402 cnt = hdev->block_cnt;
4403 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004404 case SCO_LINK:
4405 case ESCO_LINK:
4406 cnt = hdev->sco_cnt;
4407 break;
4408 case LE_LINK:
4409 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4410 break;
4411 default:
4412 cnt = 0;
4413 BT_ERR("Unknown link type");
4414 }
4415
4416 q = cnt / num;
4417 *quote = q ? q : 1;
4418 BT_DBG("chan %p quote %d", chan, *quote);
4419 return chan;
4420}
4421
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004422static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4423{
4424 struct hci_conn_hash *h = &hdev->conn_hash;
4425 struct hci_conn *conn;
4426 int num = 0;
4427
4428 BT_DBG("%s", hdev->name);
4429
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004430 rcu_read_lock();
4431
4432 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004433 struct hci_chan *chan;
4434
4435 if (conn->type != type)
4436 continue;
4437
4438 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4439 continue;
4440
4441 num++;
4442
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004443 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004444 struct sk_buff *skb;
4445
4446 if (chan->sent) {
4447 chan->sent = 0;
4448 continue;
4449 }
4450
4451 if (skb_queue_empty(&chan->data_q))
4452 continue;
4453
4454 skb = skb_peek(&chan->data_q);
4455 if (skb->priority >= HCI_PRIO_MAX - 1)
4456 continue;
4457
4458 skb->priority = HCI_PRIO_MAX - 1;
4459
4460 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004461 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004462 }
4463
4464 if (hci_conn_num(hdev, type) == num)
4465 break;
4466 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004467
4468 rcu_read_unlock();
4469
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004470}
4471
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004472static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4473{
4474 /* Calculate count of blocks used by this packet */
4475 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4476}
4477
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004478static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 if (!test_bit(HCI_RAW, &hdev->flags)) {
4481 /* ACL tx timeout must be longer than maximum
4482 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004483 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004484 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004485 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004487}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004489static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004490{
4491 unsigned int cnt = hdev->acl_cnt;
4492 struct hci_chan *chan;
4493 struct sk_buff *skb;
4494 int quote;
4495
4496 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004497
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004498 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004499 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004500 u32 priority = (skb_peek(&chan->data_q))->priority;
4501 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004502 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004503 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004504
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004505 /* Stop if priority has changed */
4506 if (skb->priority < priority)
4507 break;
4508
4509 skb = skb_dequeue(&chan->data_q);
4510
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004511 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004512 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004513
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004514 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 hdev->acl_last_tx = jiffies;
4516
4517 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004518 chan->sent++;
4519 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 }
4521 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004522
4523 if (cnt != hdev->acl_cnt)
4524 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525}
4526
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004527static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004528{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004529 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004530 struct hci_chan *chan;
4531 struct sk_buff *skb;
4532 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004533 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004534
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004535 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004536
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004537 BT_DBG("%s", hdev->name);
4538
4539 if (hdev->dev_type == HCI_AMP)
4540 type = AMP_LINK;
4541 else
4542 type = ACL_LINK;
4543
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004544 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004545 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004546 u32 priority = (skb_peek(&chan->data_q))->priority;
4547 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4548 int blocks;
4549
4550 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004551 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004552
4553 /* Stop if priority has changed */
4554 if (skb->priority < priority)
4555 break;
4556
4557 skb = skb_dequeue(&chan->data_q);
4558
4559 blocks = __get_blocks(hdev, skb);
4560 if (blocks > hdev->block_cnt)
4561 return;
4562
4563 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004564 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004565
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004566 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004567 hdev->acl_last_tx = jiffies;
4568
4569 hdev->block_cnt -= blocks;
4570 quote -= blocks;
4571
4572 chan->sent += blocks;
4573 chan->conn->sent += blocks;
4574 }
4575 }
4576
4577 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004578 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004579}
4580
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004581static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004582{
4583 BT_DBG("%s", hdev->name);
4584
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004585 /* No ACL link over BR/EDR controller */
4586 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4587 return;
4588
4589 /* No AMP link over AMP controller */
4590 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004591 return;
4592
4593 switch (hdev->flow_ctl_mode) {
4594 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4595 hci_sched_acl_pkt(hdev);
4596 break;
4597
4598 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4599 hci_sched_acl_blk(hdev);
4600 break;
4601 }
4602}
4603
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004605static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606{
4607 struct hci_conn *conn;
4608 struct sk_buff *skb;
4609 int quote;
4610
4611 BT_DBG("%s", hdev->name);
4612
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004613 if (!hci_conn_num(hdev, SCO_LINK))
4614 return;
4615
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4617 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4618 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004619 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620
4621 conn->sent++;
4622 if (conn->sent == ~0)
4623 conn->sent = 0;
4624 }
4625 }
4626}
4627
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004628static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004629{
4630 struct hci_conn *conn;
4631 struct sk_buff *skb;
4632 int quote;
4633
4634 BT_DBG("%s", hdev->name);
4635
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004636 if (!hci_conn_num(hdev, ESCO_LINK))
4637 return;
4638
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004639 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4640 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004641 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4642 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004643 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004644
4645 conn->sent++;
4646 if (conn->sent == ~0)
4647 conn->sent = 0;
4648 }
4649 }
4650}
4651
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004652static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004653{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004654 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004655 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004656 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004657
4658 BT_DBG("%s", hdev->name);
4659
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004660 if (!hci_conn_num(hdev, LE_LINK))
4661 return;
4662
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004663 if (!test_bit(HCI_RAW, &hdev->flags)) {
4664 /* LE tx timeout must be longer than maximum
4665 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004666 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004667 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004668 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004669 }
4670
4671 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004672 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004673 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004674 u32 priority = (skb_peek(&chan->data_q))->priority;
4675 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004676 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004677 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004678
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004679 /* Stop if priority has changed */
4680 if (skb->priority < priority)
4681 break;
4682
4683 skb = skb_dequeue(&chan->data_q);
4684
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004685 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004686 hdev->le_last_tx = jiffies;
4687
4688 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004689 chan->sent++;
4690 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004691 }
4692 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004693
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004694 if (hdev->le_pkts)
4695 hdev->le_cnt = cnt;
4696 else
4697 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004698
4699 if (cnt != tmp)
4700 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004701}
4702
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004703static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004705 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706 struct sk_buff *skb;
4707
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004708 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004709 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710
Marcel Holtmann52de5992013-09-03 18:08:38 -07004711 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4712 /* Schedule queues and send stuff to HCI driver */
4713 hci_sched_acl(hdev);
4714 hci_sched_sco(hdev);
4715 hci_sched_esco(hdev);
4716 hci_sched_le(hdev);
4717 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004718
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 /* Send next queued raw (unknown type) packet */
4720 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004721 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722}
4723
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004724/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725
4726/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004727static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728{
4729 struct hci_acl_hdr *hdr = (void *) skb->data;
4730 struct hci_conn *conn;
4731 __u16 handle, flags;
4732
4733 skb_pull(skb, HCI_ACL_HDR_SIZE);
4734
4735 handle = __le16_to_cpu(hdr->handle);
4736 flags = hci_flags(handle);
4737 handle = hci_handle(handle);
4738
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004739 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004740 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741
4742 hdev->stat.acl_rx++;
4743
4744 hci_dev_lock(hdev);
4745 conn = hci_conn_hash_lookup_handle(hdev, handle);
4746 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004747
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004749 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004750
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004752 l2cap_recv_acldata(conn, skb, flags);
4753 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004755 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004756 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 }
4758
4759 kfree_skb(skb);
4760}
4761
4762/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004763static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764{
4765 struct hci_sco_hdr *hdr = (void *) skb->data;
4766 struct hci_conn *conn;
4767 __u16 handle;
4768
4769 skb_pull(skb, HCI_SCO_HDR_SIZE);
4770
4771 handle = __le16_to_cpu(hdr->handle);
4772
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004773 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774
4775 hdev->stat.sco_rx++;
4776
4777 hci_dev_lock(hdev);
4778 conn = hci_conn_hash_lookup_handle(hdev, handle);
4779 hci_dev_unlock(hdev);
4780
4781 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004783 sco_recv_scodata(conn, skb);
4784 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004786 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004787 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788 }
4789
4790 kfree_skb(skb);
4791}
4792
Johan Hedberg9238f362013-03-05 20:37:48 +02004793static bool hci_req_is_complete(struct hci_dev *hdev)
4794{
4795 struct sk_buff *skb;
4796
4797 skb = skb_peek(&hdev->cmd_q);
4798 if (!skb)
4799 return true;
4800
4801 return bt_cb(skb)->req.start;
4802}
4803
Johan Hedberg42c6b122013-03-05 20:37:49 +02004804static void hci_resend_last(struct hci_dev *hdev)
4805{
4806 struct hci_command_hdr *sent;
4807 struct sk_buff *skb;
4808 u16 opcode;
4809
4810 if (!hdev->sent_cmd)
4811 return;
4812
4813 sent = (void *) hdev->sent_cmd->data;
4814 opcode = __le16_to_cpu(sent->opcode);
4815 if (opcode == HCI_OP_RESET)
4816 return;
4817
4818 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4819 if (!skb)
4820 return;
4821
4822 skb_queue_head(&hdev->cmd_q, skb);
4823 queue_work(hdev->workqueue, &hdev->cmd_work);
4824}
4825
Johan Hedberg9238f362013-03-05 20:37:48 +02004826void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4827{
4828 hci_req_complete_t req_complete = NULL;
4829 struct sk_buff *skb;
4830 unsigned long flags;
4831
4832 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4833
Johan Hedberg42c6b122013-03-05 20:37:49 +02004834 /* If the completed command doesn't match the last one that was
4835 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004836 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004837 if (!hci_sent_cmd_data(hdev, opcode)) {
4838 /* Some CSR based controllers generate a spontaneous
4839 * reset complete event during init and any pending
4840 * command will never be completed. In such a case we
4841 * need to resend whatever was the last sent
4842 * command.
4843 */
4844 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4845 hci_resend_last(hdev);
4846
Johan Hedberg9238f362013-03-05 20:37:48 +02004847 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004848 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004849
4850 /* If the command succeeded and there's still more commands in
4851 * this request the request is not yet complete.
4852 */
4853 if (!status && !hci_req_is_complete(hdev))
4854 return;
4855
4856 /* If this was the last command in a request the complete
4857 * callback would be found in hdev->sent_cmd instead of the
4858 * command queue (hdev->cmd_q).
4859 */
4860 if (hdev->sent_cmd) {
4861 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004862
4863 if (req_complete) {
4864 /* We must set the complete callback to NULL to
4865 * avoid calling the callback more than once if
4866 * this function gets called again.
4867 */
4868 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4869
Johan Hedberg9238f362013-03-05 20:37:48 +02004870 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004871 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004872 }
4873
4874 /* Remove all pending commands belonging to this request */
4875 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4876 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4877 if (bt_cb(skb)->req.start) {
4878 __skb_queue_head(&hdev->cmd_q, skb);
4879 break;
4880 }
4881
4882 req_complete = bt_cb(skb)->req.complete;
4883 kfree_skb(skb);
4884 }
4885 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4886
4887call_complete:
4888 if (req_complete)
4889 req_complete(hdev, status);
4890}
4891
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004892static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004894 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 struct sk_buff *skb;
4896
4897 BT_DBG("%s", hdev->name);
4898
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004900 /* Send copy to monitor */
4901 hci_send_to_monitor(hdev, skb);
4902
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 if (atomic_read(&hdev->promisc)) {
4904 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004905 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906 }
4907
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004908 if (test_bit(HCI_RAW, &hdev->flags) ||
4909 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004910 kfree_skb(skb);
4911 continue;
4912 }
4913
4914 if (test_bit(HCI_INIT, &hdev->flags)) {
4915 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004916 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917 case HCI_ACLDATA_PKT:
4918 case HCI_SCODATA_PKT:
4919 kfree_skb(skb);
4920 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004921 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922 }
4923
4924 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004925 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004927 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928 hci_event_packet(hdev, skb);
4929 break;
4930
4931 case HCI_ACLDATA_PKT:
4932 BT_DBG("%s ACL data packet", hdev->name);
4933 hci_acldata_packet(hdev, skb);
4934 break;
4935
4936 case HCI_SCODATA_PKT:
4937 BT_DBG("%s SCO data packet", hdev->name);
4938 hci_scodata_packet(hdev, skb);
4939 break;
4940
4941 default:
4942 kfree_skb(skb);
4943 break;
4944 }
4945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946}
4947
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004948static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004950 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 struct sk_buff *skb;
4952
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004953 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4954 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004957 if (atomic_read(&hdev->cmd_cnt)) {
4958 skb = skb_dequeue(&hdev->cmd_q);
4959 if (!skb)
4960 return;
4961
Wei Yongjun7585b972009-02-25 18:29:52 +08004962 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004964 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004965 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004967 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004968 if (test_bit(HCI_RESET, &hdev->flags))
4969 del_timer(&hdev->cmd_timer);
4970 else
4971 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004972 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973 } else {
4974 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004975 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 }
4977 }
4978}
Andre Guedesb1efcc22014-02-26 20:21:40 -03004979
4980void hci_req_add_le_scan_disable(struct hci_request *req)
4981{
4982 struct hci_cp_le_set_scan_enable cp;
4983
4984 memset(&cp, 0, sizeof(cp));
4985 cp.enable = LE_SCAN_DISABLE;
4986 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4987}
Andre Guedesa4790db2014-02-26 20:21:47 -03004988
4989static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
4990{
4991 if (status)
4992 BT_DBG("HCI request failed to update background scanning: "
4993 "status 0x%2.2x", status);
4994}
4995
4996/* This function controls the background scanning based on hdev->pend_le_conns
4997 * list. If there are pending LE connection we start the background scanning,
4998 * otherwise we stop it.
4999 *
5000 * This function requires the caller holds hdev->lock.
5001 */
5002void hci_update_background_scan(struct hci_dev *hdev)
5003{
5004 struct hci_cp_le_set_scan_param param_cp;
5005 struct hci_cp_le_set_scan_enable enable_cp;
5006 struct hci_request req;
5007 struct hci_conn *conn;
5008 int err;
5009
5010 hci_req_init(&req, hdev);
5011
5012 if (list_empty(&hdev->pend_le_conns)) {
5013 /* If there is no pending LE connections, we should stop
5014 * the background scanning.
5015 */
5016
5017 /* If controller is not scanning we are done. */
5018 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5019 return;
5020
5021 hci_req_add_le_scan_disable(&req);
5022
5023 BT_DBG("%s stopping background scanning", hdev->name);
5024 } else {
5025 u8 own_addr_type;
5026
5027 /* If there is at least one pending LE connection, we should
5028 * keep the background scan running.
5029 */
5030
5031 /* If controller is already scanning we are done. */
5032 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5033 return;
5034
5035 /* If controller is connecting, we should not start scanning
5036 * since some controllers are not able to scan and connect at
5037 * the same time.
5038 */
5039 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5040 if (conn)
5041 return;
5042
5043 /* Set require_privacy to true to avoid identification from
5044 * unknown peer devices. Since this is passive scanning, no
5045 * SCAN_REQ using the local identity should be sent. Mandating
5046 * privacy is just an extra precaution.
5047 */
5048 if (hci_update_random_address(&req, true, &own_addr_type))
5049 return;
5050
5051 memset(&param_cp, 0, sizeof(param_cp));
5052 param_cp.type = LE_SCAN_PASSIVE;
5053 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5054 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5055 param_cp.own_address_type = own_addr_type;
5056 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5057 &param_cp);
5058
5059 memset(&enable_cp, 0, sizeof(enable_cp));
5060 enable_cp.enable = LE_SCAN_ENABLE;
5061 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5062 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5063 &enable_cp);
5064
5065 BT_DBG("%s starting background scanning", hdev->name);
5066 }
5067
5068 err = hci_req_run(&req, update_background_scan_complete);
5069 if (err)
5070 BT_ERR("Failed to run HCI request: err %d", err);
5071}