blob: bbd085d32d78588947f7c3e1da7008592503b4dc [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800601
602 hci_dev_unlock(hdev);
603
604 return 0;
605}
606
607static int identity_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, identity_show, inode->i_private);
610}
611
612static const struct file_operations identity_fops = {
613 .open = identity_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800619static int random_address_show(struct seq_file *f, void *p)
620{
621 struct hci_dev *hdev = f->private;
622
623 hci_dev_lock(hdev);
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
626
627 return 0;
628}
629
630static int random_address_open(struct inode *inode, struct file *file)
631{
632 return single_open(file, random_address_show, inode->i_private);
633}
634
635static const struct file_operations random_address_fops = {
636 .open = random_address_open,
637 .read = seq_read,
638 .llseek = seq_lseek,
639 .release = single_release,
640};
641
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700642static int static_address_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
645
646 hci_dev_lock(hdev);
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
649
650 return 0;
651}
652
653static int static_address_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, static_address_show, inode->i_private);
656}
657
658static const struct file_operations static_address_fops = {
659 .open = static_address_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700668{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800669 struct hci_dev *hdev = file->private_data;
670 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700671
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673 buf[1] = '\n';
674 buf[2] = '\0';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
676}
677
678static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
681{
682 struct hci_dev *hdev = file->private_data;
683 char buf[32];
684 size_t buf_size = min(count, (sizeof(buf)-1));
685 bool enable;
686
687 if (test_bit(HCI_UP, &hdev->flags))
688 return -EBUSY;
689
690 if (copy_from_user(buf, user_buf, buf_size))
691 return -EFAULT;
692
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700695 return -EINVAL;
696
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700699
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700703}
704
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705static const struct file_operations force_static_address_fops = {
706 .open = simple_open,
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
710};
Marcel Holtmann92202182013-10-18 16:38:10 -0700711
Marcel Holtmann3698d702014-02-18 21:54:49 -0800712static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713{
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
716
717 hci_dev_lock(hdev);
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
723 }
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730{
731 return single_open(file, identity_resolving_keys_show,
732 inode->i_private);
733}
734
735static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700742static int long_term_keys_show(struct seq_file *f, void *ptr)
743{
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
746
747 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800748 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700751 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
754 }
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760static int long_term_keys_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, long_term_keys_show, inode->i_private);
763}
764
765static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700772static int conn_min_interval_set(void *data, u64 val)
773{
774 struct hci_dev *hdev = data;
775
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777 return -EINVAL;
778
779 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700780 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int conn_min_interval_get(void *data, u64 *val)
787{
788 struct hci_dev *hdev = data;
789
790 hci_dev_lock(hdev);
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
793
794 return 0;
795}
796
797DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
799
800static int conn_max_interval_set(void *data, u64 val)
801{
802 struct hci_dev *hdev = data;
803
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805 return -EINVAL;
806
807 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700808 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700809 hci_dev_unlock(hdev);
810
811 return 0;
812}
813
814static int conn_max_interval_get(void *data, u64 *val)
815{
816 struct hci_dev *hdev = data;
817
818 hci_dev_lock(hdev);
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
821
822 return 0;
823}
824
825DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
827
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800828static int adv_channel_map_set(void *data, u64 val)
829{
830 struct hci_dev *hdev = data;
831
832 if (val < 0x01 || val > 0x07)
833 return -EINVAL;
834
835 hci_dev_lock(hdev);
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
838
839 return 0;
840}
841
842static int adv_channel_map_get(void *data, u64 *val)
843{
844 struct hci_dev *hdev = data;
845
846 hci_dev_lock(hdev);
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
849
850 return 0;
851}
852
853DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
855
Jukka Rissanen89863102013-12-11 17:05:38 +0200856static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct hci_dev *hdev = file->private_data;
860 char buf[3];
861
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863 buf[1] = '\n';
864 buf[2] = '\0';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866}
867
868static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
870{
871 struct hci_dev *hdev = fp->private_data;
872 bool enable;
873 char buf[32];
874 size_t buf_size = min(count, (sizeof(buf)-1));
875
876 if (copy_from_user(buf, user_buffer, buf_size))
877 return -EFAULT;
878
879 buf[buf_size] = '\0';
880
881 if (strtobool(buf, &enable) < 0)
882 return -EINVAL;
883
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885 return -EALREADY;
886
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889 return count;
890}
891
892static const struct file_operations lowpan_debugfs_fops = {
893 .open = simple_open,
894 .read = lowpan_read,
895 .write = lowpan_write,
896 .llseek = default_llseek,
897};
898
Andre Guedes7d474e02014-02-26 20:21:54 -0300899static int le_auto_conn_show(struct seq_file *sf, void *ptr)
900{
901 struct hci_dev *hdev = sf->private;
902 struct hci_conn_params *p;
903
904 hci_dev_lock(hdev);
905
906 list_for_each_entry(p, &hdev->le_conn_params, list) {
907 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
908 p->auto_connect);
909 }
910
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int le_auto_conn_open(struct inode *inode, struct file *file)
917{
918 return single_open(file, le_auto_conn_show, inode->i_private);
919}
920
921static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
922 size_t count, loff_t *offset)
923{
924 struct seq_file *sf = file->private_data;
925 struct hci_dev *hdev = sf->private;
926 u8 auto_connect = 0;
927 bdaddr_t addr;
928 u8 addr_type;
929 char *buf;
930 int err = 0;
931 int n;
932
933 /* Don't allow partial write */
934 if (*offset != 0)
935 return -EINVAL;
936
937 if (count < 3)
938 return -EINVAL;
939
940 buf = kzalloc(count, GFP_KERNEL);
941 if (!buf)
942 return -ENOMEM;
943
944 if (copy_from_user(buf, data, count)) {
945 err = -EFAULT;
946 goto done;
947 }
948
949 if (memcmp(buf, "add", 3) == 0) {
950 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
951 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
952 &addr.b[1], &addr.b[0], &addr_type,
953 &auto_connect);
954
955 if (n < 7) {
956 err = -EINVAL;
957 goto done;
958 }
959
960 hci_dev_lock(hdev);
961 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
962 hdev->le_conn_min_interval,
963 hdev->le_conn_max_interval);
964 hci_dev_unlock(hdev);
965
966 if (err)
967 goto done;
968 } else if (memcmp(buf, "del", 3) == 0) {
969 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
970 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
971 &addr.b[1], &addr.b[0], &addr_type);
972
973 if (n < 7) {
974 err = -EINVAL;
975 goto done;
976 }
977
978 hci_dev_lock(hdev);
979 hci_conn_params_del(hdev, &addr, addr_type);
980 hci_dev_unlock(hdev);
981 } else if (memcmp(buf, "clr", 3) == 0) {
982 hci_dev_lock(hdev);
983 hci_conn_params_clear(hdev);
984 hci_pend_le_conns_clear(hdev);
985 hci_update_background_scan(hdev);
986 hci_dev_unlock(hdev);
987 } else {
988 err = -EINVAL;
989 }
990
991done:
992 kfree(buf);
993
994 if (err)
995 return err;
996 else
997 return count;
998}
999
1000static const struct file_operations le_auto_conn_fops = {
1001 .open = le_auto_conn_open,
1002 .read = seq_read,
1003 .write = le_auto_conn_write,
1004 .llseek = seq_lseek,
1005 .release = single_release,
1006};
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008/* ---- HCI requests ---- */
1009
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001012 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 if (hdev->req_status == HCI_REQ_PEND) {
1015 hdev->req_result = result;
1016 hdev->req_status = HCI_REQ_DONE;
1017 wake_up_interruptible(&hdev->req_wait_q);
1018 }
1019}
1020
1021static void hci_req_cancel(struct hci_dev *hdev, int err)
1022{
1023 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1024
1025 if (hdev->req_status == HCI_REQ_PEND) {
1026 hdev->req_result = err;
1027 hdev->req_status = HCI_REQ_CANCELED;
1028 wake_up_interruptible(&hdev->req_wait_q);
1029 }
1030}
1031
Fengguang Wu77a63e02013-04-20 16:24:31 +03001032static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1033 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001034{
1035 struct hci_ev_cmd_complete *ev;
1036 struct hci_event_hdr *hdr;
1037 struct sk_buff *skb;
1038
1039 hci_dev_lock(hdev);
1040
1041 skb = hdev->recv_evt;
1042 hdev->recv_evt = NULL;
1043
1044 hci_dev_unlock(hdev);
1045
1046 if (!skb)
1047 return ERR_PTR(-ENODATA);
1048
1049 if (skb->len < sizeof(*hdr)) {
1050 BT_ERR("Too short HCI event");
1051 goto failed;
1052 }
1053
1054 hdr = (void *) skb->data;
1055 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1056
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001057 if (event) {
1058 if (hdr->evt != event)
1059 goto failed;
1060 return skb;
1061 }
1062
Johan Hedberg75e84b72013-04-02 13:35:04 +03001063 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1064 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1065 goto failed;
1066 }
1067
1068 if (skb->len < sizeof(*ev)) {
1069 BT_ERR("Too short cmd_complete event");
1070 goto failed;
1071 }
1072
1073 ev = (void *) skb->data;
1074 skb_pull(skb, sizeof(*ev));
1075
1076 if (opcode == __le16_to_cpu(ev->opcode))
1077 return skb;
1078
1079 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1080 __le16_to_cpu(ev->opcode));
1081
1082failed:
1083 kfree_skb(skb);
1084 return ERR_PTR(-ENODATA);
1085}
1086
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001087struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001088 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001089{
1090 DECLARE_WAITQUEUE(wait, current);
1091 struct hci_request req;
1092 int err = 0;
1093
1094 BT_DBG("%s", hdev->name);
1095
1096 hci_req_init(&req, hdev);
1097
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001098 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001099
1100 hdev->req_status = HCI_REQ_PEND;
1101
1102 err = hci_req_run(&req, hci_req_sync_complete);
1103 if (err < 0)
1104 return ERR_PTR(err);
1105
1106 add_wait_queue(&hdev->req_wait_q, &wait);
1107 set_current_state(TASK_INTERRUPTIBLE);
1108
1109 schedule_timeout(timeout);
1110
1111 remove_wait_queue(&hdev->req_wait_q, &wait);
1112
1113 if (signal_pending(current))
1114 return ERR_PTR(-EINTR);
1115
1116 switch (hdev->req_status) {
1117 case HCI_REQ_DONE:
1118 err = -bt_to_errno(hdev->req_result);
1119 break;
1120
1121 case HCI_REQ_CANCELED:
1122 err = -hdev->req_result;
1123 break;
1124
1125 default:
1126 err = -ETIMEDOUT;
1127 break;
1128 }
1129
1130 hdev->req_status = hdev->req_result = 0;
1131
1132 BT_DBG("%s end: err %d", hdev->name, err);
1133
1134 if (err < 0)
1135 return ERR_PTR(err);
1136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001137 return hci_get_cmd_complete(hdev, opcode, event);
1138}
1139EXPORT_SYMBOL(__hci_cmd_sync_ev);
1140
1141struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001142 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001143{
1144 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001145}
1146EXPORT_SYMBOL(__hci_cmd_sync);
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001149static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001150 void (*func)(struct hci_request *req,
1151 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001152 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001154 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 DECLARE_WAITQUEUE(wait, current);
1156 int err = 0;
1157
1158 BT_DBG("%s start", hdev->name);
1159
Johan Hedberg42c6b122013-03-05 20:37:49 +02001160 hci_req_init(&req, hdev);
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 hdev->req_status = HCI_REQ_PEND;
1163
Johan Hedberg42c6b122013-03-05 20:37:49 +02001164 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001165
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 err = hci_req_run(&req, hci_req_sync_complete);
1167 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001168 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001169
1170 /* ENODATA means the HCI request command queue is empty.
1171 * This can happen when a request with conditionals doesn't
1172 * trigger any commands to be sent. This is normal behavior
1173 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 */
Andre Guedes920c8302013-03-08 11:20:15 -03001175 if (err == -ENODATA)
1176 return 0;
1177
1178 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001179 }
1180
Andre Guedesbc4445c2013-03-08 11:20:13 -03001181 add_wait_queue(&hdev->req_wait_q, &wait);
1182 set_current_state(TASK_INTERRUPTIBLE);
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 schedule_timeout(timeout);
1185
1186 remove_wait_queue(&hdev->req_wait_q, &wait);
1187
1188 if (signal_pending(current))
1189 return -EINTR;
1190
1191 switch (hdev->req_status) {
1192 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001193 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 break;
1195
1196 case HCI_REQ_CANCELED:
1197 err = -hdev->req_result;
1198 break;
1199
1200 default:
1201 err = -ETIMEDOUT;
1202 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Johan Hedberga5040ef2011-01-10 13:28:59 +02001205 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 BT_DBG("%s end: err %d", hdev->name, err);
1208
1209 return err;
1210}
1211
Johan Hedberg01178cd2013-03-05 20:37:41 +02001212static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 void (*req)(struct hci_request *req,
1214 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001215 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
1217 int ret;
1218
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001219 if (!test_bit(HCI_UP, &hdev->flags))
1220 return -ENETDOWN;
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 /* Serialize all requests */
1223 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001224 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 hci_req_unlock(hdev);
1226
1227 return ret;
1228}
1229
Johan Hedberg42c6b122013-03-05 20:37:49 +02001230static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001235 set_bit(HCI_RESET, &req->hdev->flags);
1236 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237}
1238
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001241 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001246 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248
1249 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251}
1252
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001256
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001257 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001259
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001260 /* Read Local Supported Commands */
1261 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1262
1263 /* Read Local Supported Features */
1264 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1265
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001266 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001268
1269 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001271
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001272 /* Read Flow Control Mode */
1273 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1274
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001275 /* Read Location Data */
1276 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001277}
1278
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001280{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001281 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001282
1283 BT_DBG("%s %ld", hdev->name, opt);
1284
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001285 /* Reset */
1286 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001288
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001289 switch (hdev->dev_type) {
1290 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001292 break;
1293
1294 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001296 break;
1297
1298 default:
1299 BT_ERR("Unknown device type %d", hdev->dev_type);
1300 break;
1301 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 __le16 param;
1309 __u8 flt_type;
1310
1311 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
1314 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001316
1317 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001323 /* Read Number of Supported IAC */
1324 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1325
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001326 /* Read Current IAC LAP */
1327 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1328
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329 /* Clear Event Filters */
1330 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332
1333 /* Connection accept timeout ~20 secs */
1334 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001335 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001337 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1338 * but it does not support page scan related HCI commands.
1339 */
1340 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001341 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1342 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1343 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344}
1345
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001348 struct hci_dev *hdev = req->hdev;
1349
Johan Hedberg2177bab2013-03-05 20:37:43 +02001350 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001352
1353 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001354 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001355
1356 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001358
1359 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361
1362 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001364
1365 /* LE-only controllers have LE implicitly enabled */
1366 if (!lmp_bredr_capable(hdev))
1367 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368}
1369
1370static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1371{
1372 if (lmp_ext_inq_capable(hdev))
1373 return 0x02;
1374
1375 if (lmp_inq_rssi_capable(hdev))
1376 return 0x01;
1377
1378 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1379 hdev->lmp_subver == 0x0757)
1380 return 0x01;
1381
1382 if (hdev->manufacturer == 15) {
1383 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1384 return 0x01;
1385 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1386 return 0x01;
1387 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1388 return 0x01;
1389 }
1390
1391 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1392 hdev->lmp_subver == 0x1805)
1393 return 0x01;
1394
1395 return 0x00;
1396}
1397
Johan Hedberg42c6b122013-03-05 20:37:49 +02001398static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399{
1400 u8 mode;
1401
Johan Hedberg42c6b122013-03-05 20:37:49 +02001402 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403
Johan Hedberg42c6b122013-03-05 20:37:49 +02001404 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001405}
1406
Johan Hedberg42c6b122013-03-05 20:37:49 +02001407static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001408{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001409 struct hci_dev *hdev = req->hdev;
1410
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411 /* The second byte is 0xff instead of 0x9f (two reserved bits
1412 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1413 * command otherwise.
1414 */
1415 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1416
1417 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1418 * any event mask for pre 1.2 devices.
1419 */
1420 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1421 return;
1422
1423 if (lmp_bredr_capable(hdev)) {
1424 events[4] |= 0x01; /* Flow Specification Complete */
1425 events[4] |= 0x02; /* Inquiry Result with RSSI */
1426 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1427 events[5] |= 0x08; /* Synchronous Connection Complete */
1428 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001429 } else {
1430 /* Use a different default for LE-only devices */
1431 memset(events, 0, sizeof(events));
1432 events[0] |= 0x10; /* Disconnection Complete */
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[1] |= 0x08; /* Read Remote Version Information Complete */
1435 events[1] |= 0x20; /* Command Complete */
1436 events[1] |= 0x40; /* Command Status */
1437 events[1] |= 0x80; /* Hardware Error */
1438 events[2] |= 0x04; /* Number of Completed Packets */
1439 events[3] |= 0x02; /* Data Buffer Overflow */
1440 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001441 }
1442
1443 if (lmp_inq_rssi_capable(hdev))
1444 events[4] |= 0x02; /* Inquiry Result with RSSI */
1445
1446 if (lmp_sniffsubr_capable(hdev))
1447 events[5] |= 0x20; /* Sniff Subrating */
1448
1449 if (lmp_pause_enc_capable(hdev))
1450 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1451
1452 if (lmp_ext_inq_capable(hdev))
1453 events[5] |= 0x40; /* Extended Inquiry Result */
1454
1455 if (lmp_no_flush_capable(hdev))
1456 events[7] |= 0x01; /* Enhanced Flush Complete */
1457
1458 if (lmp_lsto_capable(hdev))
1459 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1460
1461 if (lmp_ssp_capable(hdev)) {
1462 events[6] |= 0x01; /* IO Capability Request */
1463 events[6] |= 0x02; /* IO Capability Response */
1464 events[6] |= 0x04; /* User Confirmation Request */
1465 events[6] |= 0x08; /* User Passkey Request */
1466 events[6] |= 0x10; /* Remote OOB Data Request */
1467 events[6] |= 0x20; /* Simple Pairing Complete */
1468 events[7] |= 0x04; /* User Passkey Notification */
1469 events[7] |= 0x08; /* Keypress Notification */
1470 events[7] |= 0x10; /* Remote Host Supported
1471 * Features Notification
1472 */
1473 }
1474
1475 if (lmp_le_capable(hdev))
1476 events[7] |= 0x20; /* LE Meta-Event */
1477
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001479
1480 if (lmp_le_capable(hdev)) {
1481 memset(events, 0, sizeof(events));
1482 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001483 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1484 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001485 }
1486}
1487
Johan Hedberg42c6b122013-03-05 20:37:49 +02001488static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001489{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001490 struct hci_dev *hdev = req->hdev;
1491
Johan Hedberg2177bab2013-03-05 20:37:43 +02001492 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001493 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001494 else
1495 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001496
1497 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001498 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001499
Johan Hedberg42c6b122013-03-05 20:37:49 +02001500 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001501
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001502 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1503 * local supported commands HCI command.
1504 */
1505 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507
1508 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001509 /* When SSP is available, then the host features page
1510 * should also be available as well. However some
1511 * controllers list the max_page as 0 as long as SSP
1512 * has not been enabled. To achieve proper debugging
1513 * output, force the minimum max_page to 1 at least.
1514 */
1515 hdev->max_page = 0x01;
1516
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1518 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001519 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1520 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001521 } else {
1522 struct hci_cp_write_eir cp;
1523
1524 memset(hdev->eir, 0, sizeof(hdev->eir));
1525 memset(&cp, 0, sizeof(cp));
1526
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 }
1529 }
1530
1531 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533
1534 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001536
1537 if (lmp_ext_feat_capable(hdev)) {
1538 struct hci_cp_read_local_ext_features cp;
1539
1540 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001541 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1542 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543 }
1544
1545 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1546 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1548 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550}
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555 struct hci_cp_write_def_link_policy cp;
1556 u16 link_policy = 0;
1557
1558 if (lmp_rswitch_capable(hdev))
1559 link_policy |= HCI_LP_RSWITCH;
1560 if (lmp_hold_capable(hdev))
1561 link_policy |= HCI_LP_HOLD;
1562 if (lmp_sniff_capable(hdev))
1563 link_policy |= HCI_LP_SNIFF;
1564 if (lmp_park_capable(hdev))
1565 link_policy |= HCI_LP_PARK;
1566
1567 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001569}
1570
Johan Hedberg42c6b122013-03-05 20:37:49 +02001571static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001572{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574 struct hci_cp_write_le_host_supported cp;
1575
Johan Hedbergc73eee92013-04-19 18:35:21 +03001576 /* LE-only devices do not support explicit enablement */
1577 if (!lmp_bredr_capable(hdev))
1578 return;
1579
Johan Hedberg2177bab2013-03-05 20:37:43 +02001580 memset(&cp, 0, sizeof(cp));
1581
1582 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1583 cp.le = 0x01;
1584 cp.simul = lmp_le_br_capable(hdev);
1585 }
1586
1587 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001588 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1589 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001592static void hci_set_event_mask_page_2(struct hci_request *req)
1593{
1594 struct hci_dev *hdev = req->hdev;
1595 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1596
1597 /* If Connectionless Slave Broadcast master role is supported
1598 * enable all necessary events for it.
1599 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001600 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001601 events[1] |= 0x40; /* Triggered Clock Capture */
1602 events[1] |= 0x80; /* Synchronization Train Complete */
1603 events[2] |= 0x10; /* Slave Page Response Timeout */
1604 events[2] |= 0x20; /* CSB Channel Map Change */
1605 }
1606
1607 /* If Connectionless Slave Broadcast slave role is supported
1608 * enable all necessary events for it.
1609 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001610 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001611 events[2] |= 0x01; /* Synchronization Train Received */
1612 events[2] |= 0x02; /* CSB Receive */
1613 events[2] |= 0x04; /* CSB Timeout */
1614 events[2] |= 0x08; /* Truncated Page Complete */
1615 }
1616
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001617 /* Enable Authenticated Payload Timeout Expired event if supported */
1618 if (lmp_ping_capable(hdev))
1619 events[2] |= 0x80;
1620
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001621 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1622}
1623
Johan Hedberg42c6b122013-03-05 20:37:49 +02001624static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001625{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001627 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001628
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001629 /* Some Broadcom based Bluetooth controllers do not support the
1630 * Delete Stored Link Key command. They are clearly indicating its
1631 * absence in the bit mask of supported commands.
1632 *
1633 * Check the supported commands and only if the the command is marked
1634 * as supported send it. If not supported assume that the controller
1635 * does not have actual support for stored link keys which makes this
1636 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001637 *
1638 * Some controllers indicate that they support handling deleting
1639 * stored link keys, but they don't. The quirk lets a driver
1640 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001641 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001642 if (hdev->commands[6] & 0x80 &&
1643 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001644 struct hci_cp_delete_stored_link_key cp;
1645
1646 bacpy(&cp.bdaddr, BDADDR_ANY);
1647 cp.delete_all = 0x01;
1648 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1649 sizeof(cp), &cp);
1650 }
1651
Johan Hedberg2177bab2013-03-05 20:37:43 +02001652 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001653 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001654
Johan Hedberg7bf32042014-02-23 19:42:29 +02001655 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001656 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001657
1658 /* Read features beyond page 1 if available */
1659 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1660 struct hci_cp_read_local_ext_features cp;
1661
1662 cp.page = p;
1663 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1664 sizeof(cp), &cp);
1665 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001666}
1667
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001668static void hci_init4_req(struct hci_request *req, unsigned long opt)
1669{
1670 struct hci_dev *hdev = req->hdev;
1671
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001672 /* Set event mask page 2 if the HCI command for it is supported */
1673 if (hdev->commands[22] & 0x04)
1674 hci_set_event_mask_page_2(req);
1675
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001676 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001677 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001678 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001679
1680 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001681 if ((lmp_sc_capable(hdev) ||
1682 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001683 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1684 u8 support = 0x01;
1685 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1686 sizeof(support), &support);
1687 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001688}
1689
Johan Hedberg2177bab2013-03-05 20:37:43 +02001690static int __hci_init(struct hci_dev *hdev)
1691{
1692 int err;
1693
1694 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1695 if (err < 0)
1696 return err;
1697
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001698 /* The Device Under Test (DUT) mode is special and available for
1699 * all controller types. So just create it early on.
1700 */
1701 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1702 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1703 &dut_mode_fops);
1704 }
1705
Johan Hedberg2177bab2013-03-05 20:37:43 +02001706 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1707 * BR/EDR/LE type controllers. AMP controllers only need the
1708 * first stage init.
1709 */
1710 if (hdev->dev_type != HCI_BREDR)
1711 return 0;
1712
1713 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1714 if (err < 0)
1715 return err;
1716
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001717 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1718 if (err < 0)
1719 return err;
1720
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001721 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1722 if (err < 0)
1723 return err;
1724
1725 /* Only create debugfs entries during the initial setup
1726 * phase and not every time the controller gets powered on.
1727 */
1728 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1729 return 0;
1730
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001731 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1732 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001733 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1734 &hdev->manufacturer);
1735 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1736 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001737 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1738 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001739 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1740
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001741 if (lmp_bredr_capable(hdev)) {
1742 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1743 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001744 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1745 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001746 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1747 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001748 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1749 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001750 }
1751
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001752 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001753 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1754 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001755 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1756 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001757 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1758 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001759 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1760 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001761 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001762
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001763 if (lmp_sniff_capable(hdev)) {
1764 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1765 hdev, &idle_timeout_fops);
1766 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1767 hdev, &sniff_min_interval_fops);
1768 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1769 hdev, &sniff_max_interval_fops);
1770 }
1771
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001772 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001773 debugfs_create_file("identity", 0400, hdev->debugfs,
1774 hdev, &identity_fops);
1775 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1776 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001777 debugfs_create_file("random_address", 0444, hdev->debugfs,
1778 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001779 debugfs_create_file("static_address", 0444, hdev->debugfs,
1780 hdev, &static_address_fops);
1781
1782 /* For controllers with a public address, provide a debug
1783 * option to force the usage of the configured static
1784 * address. By default the public address is used.
1785 */
1786 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1787 debugfs_create_file("force_static_address", 0644,
1788 hdev->debugfs, hdev,
1789 &force_static_address_fops);
1790
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001791 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1792 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001793 debugfs_create_file("identity_resolving_keys", 0400,
1794 hdev->debugfs, hdev,
1795 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001796 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1797 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001798 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1799 hdev, &conn_min_interval_fops);
1800 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1801 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001802 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1803 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001804 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1805 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001806 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1807 &le_auto_conn_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001808 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001809
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001810 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001811}
1812
Johan Hedberg42c6b122013-03-05 20:37:49 +02001813static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814{
1815 __u8 scan = opt;
1816
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
1819 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001820 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821}
1822
Johan Hedberg42c6b122013-03-05 20:37:49 +02001823static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824{
1825 __u8 auth = opt;
1826
Johan Hedberg42c6b122013-03-05 20:37:49 +02001827 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
1829 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831}
1832
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834{
1835 __u8 encrypt = opt;
1836
Johan Hedberg42c6b122013-03-05 20:37:49 +02001837 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001839 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001840 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841}
1842
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001844{
1845 __le16 policy = cpu_to_le16(opt);
1846
Johan Hedberg42c6b122013-03-05 20:37:49 +02001847 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001848
1849 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001850 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001851}
1852
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001853/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 * Device is held on return. */
1855struct hci_dev *hci_dev_get(int index)
1856{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001857 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
1859 BT_DBG("%d", index);
1860
1861 if (index < 0)
1862 return NULL;
1863
1864 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001865 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 if (d->id == index) {
1867 hdev = hci_dev_hold(d);
1868 break;
1869 }
1870 }
1871 read_unlock(&hci_dev_list_lock);
1872 return hdev;
1873}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
1875/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001876
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001877bool hci_discovery_active(struct hci_dev *hdev)
1878{
1879 struct discovery_state *discov = &hdev->discovery;
1880
Andre Guedes6fbe1952012-02-03 17:47:58 -03001881 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001882 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001883 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001884 return true;
1885
Andre Guedes6fbe1952012-02-03 17:47:58 -03001886 default:
1887 return false;
1888 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001889}
1890
Johan Hedbergff9ef572012-01-04 14:23:45 +02001891void hci_discovery_set_state(struct hci_dev *hdev, int state)
1892{
1893 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1894
1895 if (hdev->discovery.state == state)
1896 return;
1897
1898 switch (state) {
1899 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001900 hci_update_background_scan(hdev);
1901
Andre Guedes7b99b652012-02-13 15:41:02 -03001902 if (hdev->discovery.state != DISCOVERY_STARTING)
1903 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001904 break;
1905 case DISCOVERY_STARTING:
1906 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001907 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001908 mgmt_discovering(hdev, 1);
1909 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001910 case DISCOVERY_RESOLVING:
1911 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001912 case DISCOVERY_STOPPING:
1913 break;
1914 }
1915
1916 hdev->discovery.state = state;
1917}
1918
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001919void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
Johan Hedberg30883512012-01-04 14:16:21 +02001921 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001922 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
Johan Hedberg561aafb2012-01-04 13:31:59 +02001924 list_for_each_entry_safe(p, n, &cache->all, all) {
1925 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001926 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001928
1929 INIT_LIST_HEAD(&cache->unknown);
1930 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931}
1932
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001933struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1934 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
Johan Hedberg30883512012-01-04 14:16:21 +02001936 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 struct inquiry_entry *e;
1938
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001939 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Johan Hedberg561aafb2012-01-04 13:31:59 +02001941 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001943 return e;
1944 }
1945
1946 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947}
1948
Johan Hedberg561aafb2012-01-04 13:31:59 +02001949struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001950 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001951{
Johan Hedberg30883512012-01-04 14:16:21 +02001952 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001953 struct inquiry_entry *e;
1954
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001955 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001956
1957 list_for_each_entry(e, &cache->unknown, list) {
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1960 }
1961
1962 return NULL;
1963}
1964
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001965struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001966 bdaddr_t *bdaddr,
1967 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001968{
1969 struct discovery_state *cache = &hdev->discovery;
1970 struct inquiry_entry *e;
1971
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001972 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001973
1974 list_for_each_entry(e, &cache->resolve, list) {
1975 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1976 return e;
1977 if (!bacmp(&e->data.bdaddr, bdaddr))
1978 return e;
1979 }
1980
1981 return NULL;
1982}
1983
Johan Hedberga3d4e202012-01-09 00:53:02 +02001984void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001985 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001986{
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct list_head *pos = &cache->resolve;
1989 struct inquiry_entry *p;
1990
1991 list_del(&ie->list);
1992
1993 list_for_each_entry(p, &cache->resolve, list) {
1994 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001995 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001996 break;
1997 pos = &p->list;
1998 }
1999
2000 list_add(&ie->list, pos);
2001}
2002
Johan Hedberg31754052012-01-04 13:39:52 +02002003bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002004 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
Johan Hedberg30883512012-01-04 14:16:21 +02002006 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002007 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002009 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Szymon Janc2b2fec42012-11-20 11:38:54 +01002011 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2012
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002013 if (ssp)
2014 *ssp = data->ssp_mode;
2015
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002016 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002017 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002018 if (ie->data.ssp_mode && ssp)
2019 *ssp = true;
2020
Johan Hedberga3d4e202012-01-09 00:53:02 +02002021 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002022 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002023 ie->data.rssi = data->rssi;
2024 hci_inquiry_cache_update_resolve(hdev, ie);
2025 }
2026
Johan Hedberg561aafb2012-01-04 13:31:59 +02002027 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002028 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002029
Johan Hedberg561aafb2012-01-04 13:31:59 +02002030 /* Entry not in the cache. Add new one. */
2031 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2032 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002033 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002034
2035 list_add(&ie->all, &cache->all);
2036
2037 if (name_known) {
2038 ie->name_state = NAME_KNOWN;
2039 } else {
2040 ie->name_state = NAME_NOT_KNOWN;
2041 list_add(&ie->list, &cache->unknown);
2042 }
2043
2044update:
2045 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002046 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002047 ie->name_state = NAME_KNOWN;
2048 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 }
2050
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002051 memcpy(&ie->data, data, sizeof(*data));
2052 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002054
2055 if (ie->name_state == NAME_NOT_KNOWN)
2056 return false;
2057
2058 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059}
2060
2061static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2062{
Johan Hedberg30883512012-01-04 14:16:21 +02002063 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 struct inquiry_info *info = (struct inquiry_info *) buf;
2065 struct inquiry_entry *e;
2066 int copied = 0;
2067
Johan Hedberg561aafb2012-01-04 13:31:59 +02002068 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002070
2071 if (copied >= num)
2072 break;
2073
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 bacpy(&info->bdaddr, &data->bdaddr);
2075 info->pscan_rep_mode = data->pscan_rep_mode;
2076 info->pscan_period_mode = data->pscan_period_mode;
2077 info->pscan_mode = data->pscan_mode;
2078 memcpy(info->dev_class, data->dev_class, 3);
2079 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002082 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
2084
2085 BT_DBG("cache %p, copied %d", cache, copied);
2086 return copied;
2087}
2088
Johan Hedberg42c6b122013-03-05 20:37:49 +02002089static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
2091 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002092 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 struct hci_cp_inquiry cp;
2094
2095 BT_DBG("%s", hdev->name);
2096
2097 if (test_bit(HCI_INQUIRY, &hdev->flags))
2098 return;
2099
2100 /* Start Inquiry */
2101 memcpy(&cp.lap, &ir->lap, 3);
2102 cp.length = ir->length;
2103 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002104 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105}
2106
Andre Guedes3e13fa12013-03-27 20:04:56 -03002107static int wait_inquiry(void *word)
2108{
2109 schedule();
2110 return signal_pending(current);
2111}
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113int hci_inquiry(void __user *arg)
2114{
2115 __u8 __user *ptr = arg;
2116 struct hci_inquiry_req ir;
2117 struct hci_dev *hdev;
2118 int err = 0, do_inquiry = 0, max_rsp;
2119 long timeo;
2120 __u8 *buf;
2121
2122 if (copy_from_user(&ir, ptr, sizeof(ir)))
2123 return -EFAULT;
2124
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002125 hdev = hci_dev_get(ir.dev_id);
2126 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 return -ENODEV;
2128
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002129 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2130 err = -EBUSY;
2131 goto done;
2132 }
2133
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002134 if (hdev->dev_type != HCI_BREDR) {
2135 err = -EOPNOTSUPP;
2136 goto done;
2137 }
2138
Johan Hedberg56f87902013-10-02 13:43:13 +03002139 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2140 err = -EOPNOTSUPP;
2141 goto done;
2142 }
2143
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002144 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002145 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002146 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002147 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 do_inquiry = 1;
2149 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002150 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
Marcel Holtmann04837f62006-07-03 10:02:33 +02002152 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002153
2154 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002155 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2156 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002157 if (err < 0)
2158 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002159
2160 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2161 * cleared). If it is interrupted by a signal, return -EINTR.
2162 */
2163 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2164 TASK_INTERRUPTIBLE))
2165 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002168 /* for unlimited number of responses we will use buffer with
2169 * 255 entries
2170 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2172
2173 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2174 * copy it to the user space.
2175 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002176 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002177 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 err = -ENOMEM;
2179 goto done;
2180 }
2181
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002182 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002184 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
2186 BT_DBG("num_rsp %d", ir.num_rsp);
2187
2188 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2189 ptr += sizeof(ir);
2190 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002191 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002193 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 err = -EFAULT;
2195
2196 kfree(buf);
2197
2198done:
2199 hci_dev_put(hdev);
2200 return err;
2201}
2202
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002203static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 int ret = 0;
2206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 BT_DBG("%s %p", hdev->name, hdev);
2208
2209 hci_req_lock(hdev);
2210
Johan Hovold94324962012-03-15 14:48:41 +01002211 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2212 ret = -ENODEV;
2213 goto done;
2214 }
2215
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002216 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2217 /* Check for rfkill but allow the HCI setup stage to
2218 * proceed (which in itself doesn't cause any RF activity).
2219 */
2220 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2221 ret = -ERFKILL;
2222 goto done;
2223 }
2224
2225 /* Check for valid public address or a configured static
2226 * random adddress, but let the HCI setup proceed to
2227 * be able to determine if there is a public address
2228 * or not.
2229 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002230 * In case of user channel usage, it is not important
2231 * if a public address or static random address is
2232 * available.
2233 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002234 * This check is only valid for BR/EDR controllers
2235 * since AMP controllers do not have an address.
2236 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002237 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2238 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002239 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2240 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2241 ret = -EADDRNOTAVAIL;
2242 goto done;
2243 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002244 }
2245
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 if (test_bit(HCI_UP, &hdev->flags)) {
2247 ret = -EALREADY;
2248 goto done;
2249 }
2250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 if (hdev->open(hdev)) {
2252 ret = -EIO;
2253 goto done;
2254 }
2255
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002256 atomic_set(&hdev->cmd_cnt, 1);
2257 set_bit(HCI_INIT, &hdev->flags);
2258
2259 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2260 ret = hdev->setup(hdev);
2261
2262 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002263 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2264 set_bit(HCI_RAW, &hdev->flags);
2265
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002266 if (!test_bit(HCI_RAW, &hdev->flags) &&
2267 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002268 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 }
2270
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002271 clear_bit(HCI_INIT, &hdev->flags);
2272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 if (!ret) {
2274 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002275 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 set_bit(HCI_UP, &hdev->flags);
2277 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002278 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002279 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002280 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002281 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002282 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002283 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002284 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002285 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002287 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002288 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002289 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
2291 skb_queue_purge(&hdev->cmd_q);
2292 skb_queue_purge(&hdev->rx_q);
2293
2294 if (hdev->flush)
2295 hdev->flush(hdev);
2296
2297 if (hdev->sent_cmd) {
2298 kfree_skb(hdev->sent_cmd);
2299 hdev->sent_cmd = NULL;
2300 }
2301
2302 hdev->close(hdev);
2303 hdev->flags = 0;
2304 }
2305
2306done:
2307 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 return ret;
2309}
2310
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002311/* ---- HCI ioctl helpers ---- */
2312
2313int hci_dev_open(__u16 dev)
2314{
2315 struct hci_dev *hdev;
2316 int err;
2317
2318 hdev = hci_dev_get(dev);
2319 if (!hdev)
2320 return -ENODEV;
2321
Johan Hedberge1d08f42013-10-01 22:44:50 +03002322 /* We need to ensure that no other power on/off work is pending
2323 * before proceeding to call hci_dev_do_open. This is
2324 * particularly important if the setup procedure has not yet
2325 * completed.
2326 */
2327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2328 cancel_delayed_work(&hdev->power_off);
2329
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002330 /* After this call it is guaranteed that the setup procedure
2331 * has finished. This means that error conditions like RFKILL
2332 * or no valid public or static random address apply.
2333 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002334 flush_workqueue(hdev->req_workqueue);
2335
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002336 err = hci_dev_do_open(hdev);
2337
2338 hci_dev_put(hdev);
2339
2340 return err;
2341}
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343static int hci_dev_do_close(struct hci_dev *hdev)
2344{
2345 BT_DBG("%s %p", hdev->name, hdev);
2346
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002347 cancel_delayed_work(&hdev->power_off);
2348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 hci_req_cancel(hdev, ENODEV);
2350 hci_req_lock(hdev);
2351
2352 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002353 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 hci_req_unlock(hdev);
2355 return 0;
2356 }
2357
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002358 /* Flush RX and TX works */
2359 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002360 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002362 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002363 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002364 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002365 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002366 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002367 }
2368
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002369 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002370 cancel_delayed_work(&hdev->service_cache);
2371
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002372 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002373
2374 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2375 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002376
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002377 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002378 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002380 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002381 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
2383 hci_notify(hdev, HCI_DEV_DOWN);
2384
2385 if (hdev->flush)
2386 hdev->flush(hdev);
2387
2388 /* Reset device */
2389 skb_queue_purge(&hdev->cmd_q);
2390 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002391 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002392 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002393 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002395 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 clear_bit(HCI_INIT, &hdev->flags);
2397 }
2398
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002399 /* flush cmd work */
2400 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
2402 /* Drop queues */
2403 skb_queue_purge(&hdev->rx_q);
2404 skb_queue_purge(&hdev->cmd_q);
2405 skb_queue_purge(&hdev->raw_q);
2406
2407 /* Drop last sent command */
2408 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002409 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 kfree_skb(hdev->sent_cmd);
2411 hdev->sent_cmd = NULL;
2412 }
2413
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002414 kfree_skb(hdev->recv_evt);
2415 hdev->recv_evt = NULL;
2416
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 /* After this point our queues are empty
2418 * and no tasks are scheduled. */
2419 hdev->close(hdev);
2420
Johan Hedberg35b973c2013-03-15 17:06:59 -05002421 /* Clear flags */
2422 hdev->flags = 0;
2423 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2424
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002425 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2426 if (hdev->dev_type == HCI_BREDR) {
2427 hci_dev_lock(hdev);
2428 mgmt_powered(hdev, 0);
2429 hci_dev_unlock(hdev);
2430 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002431 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002432
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002433 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002434 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002435
Johan Hedberge59fda82012-02-22 18:11:53 +02002436 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002437 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002438 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002439
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 hci_req_unlock(hdev);
2441
2442 hci_dev_put(hdev);
2443 return 0;
2444}
2445
2446int hci_dev_close(__u16 dev)
2447{
2448 struct hci_dev *hdev;
2449 int err;
2450
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002451 hdev = hci_dev_get(dev);
2452 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002454
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002455 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2456 err = -EBUSY;
2457 goto done;
2458 }
2459
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002460 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2461 cancel_delayed_work(&hdev->power_off);
2462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002464
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002465done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 hci_dev_put(hdev);
2467 return err;
2468}
2469
2470int hci_dev_reset(__u16 dev)
2471{
2472 struct hci_dev *hdev;
2473 int ret = 0;
2474
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002475 hdev = hci_dev_get(dev);
2476 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 return -ENODEV;
2478
2479 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
Marcel Holtmann808a0492013-08-26 20:57:58 -07002481 if (!test_bit(HCI_UP, &hdev->flags)) {
2482 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002486 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2487 ret = -EBUSY;
2488 goto done;
2489 }
2490
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 /* Drop queues */
2492 skb_queue_purge(&hdev->rx_q);
2493 skb_queue_purge(&hdev->cmd_q);
2494
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002495 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002496 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002498 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
2500 if (hdev->flush)
2501 hdev->flush(hdev);
2502
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002503 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002504 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002507 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508
2509done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 hci_req_unlock(hdev);
2511 hci_dev_put(hdev);
2512 return ret;
2513}
2514
2515int hci_dev_reset_stat(__u16 dev)
2516{
2517 struct hci_dev *hdev;
2518 int ret = 0;
2519
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002520 hdev = hci_dev_get(dev);
2521 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 return -ENODEV;
2523
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002524 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2525 ret = -EBUSY;
2526 goto done;
2527 }
2528
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2530
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002531done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return ret;
2534}
2535
2536int hci_dev_cmd(unsigned int cmd, void __user *arg)
2537{
2538 struct hci_dev *hdev;
2539 struct hci_dev_req dr;
2540 int err = 0;
2541
2542 if (copy_from_user(&dr, arg, sizeof(dr)))
2543 return -EFAULT;
2544
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002545 hdev = hci_dev_get(dr.dev_id);
2546 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 return -ENODEV;
2548
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002549 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2550 err = -EBUSY;
2551 goto done;
2552 }
2553
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002554 if (hdev->dev_type != HCI_BREDR) {
2555 err = -EOPNOTSUPP;
2556 goto done;
2557 }
2558
Johan Hedberg56f87902013-10-02 13:43:13 +03002559 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2560 err = -EOPNOTSUPP;
2561 goto done;
2562 }
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 switch (cmd) {
2565 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2567 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 break;
2569
2570 case HCISETENCRYPT:
2571 if (!lmp_encrypt_capable(hdev)) {
2572 err = -EOPNOTSUPP;
2573 break;
2574 }
2575
2576 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2577 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002578 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2579 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 if (err)
2581 break;
2582 }
2583
Johan Hedberg01178cd2013-03-05 20:37:41 +02002584 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 break;
2587
2588 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002589 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 break;
2592
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002593 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002594 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002596 break;
2597
2598 case HCISETLINKMODE:
2599 hdev->link_mode = ((__u16) dr.dev_opt) &
2600 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2601 break;
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 case HCISETPTYPE:
2604 hdev->pkt_type = (__u16) dr.dev_opt;
2605 break;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002608 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2609 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 break;
2611
2612 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002613 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2614 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 break;
2616
2617 default:
2618 err = -EINVAL;
2619 break;
2620 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002621
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002622done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 hci_dev_put(hdev);
2624 return err;
2625}
2626
2627int hci_get_dev_list(void __user *arg)
2628{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002629 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 struct hci_dev_list_req *dl;
2631 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 int n = 0, size, err;
2633 __u16 dev_num;
2634
2635 if (get_user(dev_num, (__u16 __user *) arg))
2636 return -EFAULT;
2637
2638 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2639 return -EINVAL;
2640
2641 size = sizeof(*dl) + dev_num * sizeof(*dr);
2642
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002643 dl = kzalloc(size, GFP_KERNEL);
2644 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 return -ENOMEM;
2646
2647 dr = dl->dev_req;
2648
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002649 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002650 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002652 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002653
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002656
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 (dr + n)->dev_id = hdev->id;
2658 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 if (++n >= dev_num)
2661 break;
2662 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002663 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
2665 dl->dev_num = n;
2666 size = sizeof(*dl) + n * sizeof(*dr);
2667
2668 err = copy_to_user(arg, dl, size);
2669 kfree(dl);
2670
2671 return err ? -EFAULT : 0;
2672}
2673
2674int hci_get_dev_info(void __user *arg)
2675{
2676 struct hci_dev *hdev;
2677 struct hci_dev_info di;
2678 int err = 0;
2679
2680 if (copy_from_user(&di, arg, sizeof(di)))
2681 return -EFAULT;
2682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002683 hdev = hci_dev_get(di.dev_id);
2684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 return -ENODEV;
2686
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002687 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002688 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002689
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002690 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2691 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 strcpy(di.name, hdev->name);
2694 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002695 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 di.flags = hdev->flags;
2697 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002698 if (lmp_bredr_capable(hdev)) {
2699 di.acl_mtu = hdev->acl_mtu;
2700 di.acl_pkts = hdev->acl_pkts;
2701 di.sco_mtu = hdev->sco_mtu;
2702 di.sco_pkts = hdev->sco_pkts;
2703 } else {
2704 di.acl_mtu = hdev->le_mtu;
2705 di.acl_pkts = hdev->le_pkts;
2706 di.sco_mtu = 0;
2707 di.sco_pkts = 0;
2708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 di.link_policy = hdev->link_policy;
2710 di.link_mode = hdev->link_mode;
2711
2712 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2713 memcpy(&di.features, &hdev->features, sizeof(di.features));
2714
2715 if (copy_to_user(arg, &di, sizeof(di)))
2716 err = -EFAULT;
2717
2718 hci_dev_put(hdev);
2719
2720 return err;
2721}
2722
2723/* ---- Interface to HCI drivers ---- */
2724
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002725static int hci_rfkill_set_block(void *data, bool blocked)
2726{
2727 struct hci_dev *hdev = data;
2728
2729 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2730
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2732 return -EBUSY;
2733
Johan Hedberg5e130362013-09-13 08:58:17 +03002734 if (blocked) {
2735 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002736 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2737 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002738 } else {
2739 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002740 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002741
2742 return 0;
2743}
2744
2745static const struct rfkill_ops hci_rfkill_ops = {
2746 .set_block = hci_rfkill_set_block,
2747};
2748
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002749static void hci_power_on(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002752 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002753
2754 BT_DBG("%s", hdev->name);
2755
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002756 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002757 if (err < 0) {
2758 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002759 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002760 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002761
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002762 /* During the HCI setup phase, a few error conditions are
2763 * ignored and they need to be checked now. If they are still
2764 * valid, it is important to turn the device back off.
2765 */
2766 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2767 (hdev->dev_type == HCI_BREDR &&
2768 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2769 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002770 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2771 hci_dev_do_close(hdev);
2772 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002773 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2774 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002775 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002777 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002778 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002779}
2780
2781static void hci_power_off(struct work_struct *work)
2782{
Johan Hedberg32435532011-11-07 22:16:04 +02002783 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002784 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002785
2786 BT_DBG("%s", hdev->name);
2787
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002788 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002789}
2790
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002791static void hci_discov_off(struct work_struct *work)
2792{
2793 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002794
2795 hdev = container_of(work, struct hci_dev, discov_off.work);
2796
2797 BT_DBG("%s", hdev->name);
2798
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002799 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002800}
2801
Johan Hedberg35f74982014-02-18 17:14:32 +02002802void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002803{
Johan Hedberg48210022013-01-27 00:31:28 +02002804 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002805
Johan Hedberg48210022013-01-27 00:31:28 +02002806 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2807 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002808 kfree(uuid);
2809 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002810}
2811
Johan Hedberg35f74982014-02-18 17:14:32 +02002812void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002813{
2814 struct list_head *p, *n;
2815
2816 list_for_each_safe(p, n, &hdev->link_keys) {
2817 struct link_key *key;
2818
2819 key = list_entry(p, struct link_key, list);
2820
2821 list_del(p);
2822 kfree(key);
2823 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002824}
2825
Johan Hedberg35f74982014-02-18 17:14:32 +02002826void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002827{
2828 struct smp_ltk *k, *tmp;
2829
2830 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2831 list_del(&k->list);
2832 kfree(k);
2833 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002834}
2835
Johan Hedberg970c4e42014-02-18 10:19:33 +02002836void hci_smp_irks_clear(struct hci_dev *hdev)
2837{
2838 struct smp_irk *k, *tmp;
2839
2840 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2841 list_del(&k->list);
2842 kfree(k);
2843 }
2844}
2845
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002846struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2847{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002848 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002849
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002850 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002851 if (bacmp(bdaddr, &k->bdaddr) == 0)
2852 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002853
2854 return NULL;
2855}
2856
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302857static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002858 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002859{
2860 /* Legacy key */
2861 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302862 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002863
2864 /* Debug keys are insecure so don't store them persistently */
2865 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302866 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002867
2868 /* Changed combination key and there's no previous one */
2869 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302870 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002871
2872 /* Security mode 3 case */
2873 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302874 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002875
2876 /* Neither local nor remote side had no-bonding as requirement */
2877 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302878 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002879
2880 /* Local side had dedicated bonding as requirement */
2881 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302882 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002883
2884 /* Remote side had dedicated bonding as requirement */
2885 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302886 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002887
2888 /* If none of the above criteria match, then don't store the key
2889 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302890 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002891}
2892
Johan Hedberg98a0b842014-01-30 19:40:00 -08002893static bool ltk_type_master(u8 type)
2894{
2895 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2896 return true;
2897
2898 return false;
2899}
2900
2901struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2902 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002903{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002904 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002905
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002906 list_for_each_entry(k, &hdev->long_term_keys, list) {
2907 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002908 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002909 continue;
2910
Johan Hedberg98a0b842014-01-30 19:40:00 -08002911 if (ltk_type_master(k->type) != master)
2912 continue;
2913
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002914 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002915 }
2916
2917 return NULL;
2918}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002919
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002920struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002921 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002923 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002924
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002925 list_for_each_entry(k, &hdev->long_term_keys, list)
2926 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002927 bacmp(bdaddr, &k->bdaddr) == 0 &&
2928 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002929 return k;
2930
2931 return NULL;
2932}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002933
Johan Hedberg970c4e42014-02-18 10:19:33 +02002934struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2935{
2936 struct smp_irk *irk;
2937
2938 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2939 if (!bacmp(&irk->rpa, rpa))
2940 return irk;
2941 }
2942
2943 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2944 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2945 bacpy(&irk->rpa, rpa);
2946 return irk;
2947 }
2948 }
2949
2950 return NULL;
2951}
2952
2953struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2954 u8 addr_type)
2955{
2956 struct smp_irk *irk;
2957
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002958 /* Identity Address must be public or static random */
2959 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2960 return NULL;
2961
Johan Hedberg970c4e42014-02-18 10:19:33 +02002962 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2963 if (addr_type == irk->addr_type &&
2964 bacmp(bdaddr, &irk->bdaddr) == 0)
2965 return irk;
2966 }
2967
2968 return NULL;
2969}
2970
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002971int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002972 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002973{
2974 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302975 u8 old_key_type;
2976 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002977
2978 old_key = hci_find_link_key(hdev, bdaddr);
2979 if (old_key) {
2980 old_key_type = old_key->type;
2981 key = old_key;
2982 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002983 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002984 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002985 if (!key)
2986 return -ENOMEM;
2987 list_add(&key->list, &hdev->link_keys);
2988 }
2989
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002990 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002991
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002992 /* Some buggy controller combinations generate a changed
2993 * combination key for legacy pairing even when there's no
2994 * previous key */
2995 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002996 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002997 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002998 if (conn)
2999 conn->key_type = type;
3000 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003001
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003002 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003003 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003004 key->pin_len = pin_len;
3005
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003006 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003007 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003008 else
3009 key->type = type;
3010
Johan Hedberg4df378a2011-04-28 11:29:03 -07003011 if (!new_key)
3012 return 0;
3013
3014 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3015
Johan Hedberg744cf192011-11-08 20:40:14 +02003016 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003017
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303018 if (conn)
3019 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003020
3021 return 0;
3022}
3023
Johan Hedbergca9142b2014-02-19 14:57:44 +02003024struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003025 u8 addr_type, u8 type, u8 authenticated,
3026 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003027{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003028 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003029 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003030
Johan Hedberg98a0b842014-01-30 19:40:00 -08003031 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003032 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003033 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003034 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003035 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003036 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003037 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003038 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003039 }
3040
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003041 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003042 key->bdaddr_type = addr_type;
3043 memcpy(key->val, tk, sizeof(key->val));
3044 key->authenticated = authenticated;
3045 key->ediv = ediv;
3046 key->enc_size = enc_size;
3047 key->type = type;
3048 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003049
Johan Hedbergca9142b2014-02-19 14:57:44 +02003050 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003051}
3052
Johan Hedbergca9142b2014-02-19 14:57:44 +02003053struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3054 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003055{
3056 struct smp_irk *irk;
3057
3058 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3059 if (!irk) {
3060 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3061 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003062 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003063
3064 bacpy(&irk->bdaddr, bdaddr);
3065 irk->addr_type = addr_type;
3066
3067 list_add(&irk->list, &hdev->identity_resolving_keys);
3068 }
3069
3070 memcpy(irk->val, val, 16);
3071 bacpy(&irk->rpa, rpa);
3072
Johan Hedbergca9142b2014-02-19 14:57:44 +02003073 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003074}
3075
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003076int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3077{
3078 struct link_key *key;
3079
3080 key = hci_find_link_key(hdev, bdaddr);
3081 if (!key)
3082 return -ENOENT;
3083
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003084 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003085
3086 list_del(&key->list);
3087 kfree(key);
3088
3089 return 0;
3090}
3091
Johan Hedberge0b2b272014-02-18 17:14:31 +02003092int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003093{
3094 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003095 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003096
3097 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003098 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003099 continue;
3100
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003101 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003102
3103 list_del(&k->list);
3104 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003105 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003106 }
3107
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003108 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109}
3110
Johan Hedberga7ec7332014-02-18 17:14:35 +02003111void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3112{
3113 struct smp_irk *k, *tmp;
3114
Johan Hedberg668b7b12014-02-21 16:03:31 +02003115 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003116 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3117 continue;
3118
3119 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3120
3121 list_del(&k->list);
3122 kfree(k);
3123 }
3124}
3125
Ville Tervo6bd32322011-02-16 16:32:41 +02003126/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003127static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003128{
3129 struct hci_dev *hdev = (void *) arg;
3130
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003131 if (hdev->sent_cmd) {
3132 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3133 u16 opcode = __le16_to_cpu(sent->opcode);
3134
3135 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3136 } else {
3137 BT_ERR("%s command tx timeout", hdev->name);
3138 }
3139
Ville Tervo6bd32322011-02-16 16:32:41 +02003140 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003141 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003142}
3143
Szymon Janc2763eda2011-03-22 13:12:22 +01003144struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003145 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003146{
3147 struct oob_data *data;
3148
3149 list_for_each_entry(data, &hdev->remote_oob_data, list)
3150 if (bacmp(bdaddr, &data->bdaddr) == 0)
3151 return data;
3152
3153 return NULL;
3154}
3155
3156int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3157{
3158 struct oob_data *data;
3159
3160 data = hci_find_remote_oob_data(hdev, bdaddr);
3161 if (!data)
3162 return -ENOENT;
3163
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003164 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003165
3166 list_del(&data->list);
3167 kfree(data);
3168
3169 return 0;
3170}
3171
Johan Hedberg35f74982014-02-18 17:14:32 +02003172void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003173{
3174 struct oob_data *data, *n;
3175
3176 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3177 list_del(&data->list);
3178 kfree(data);
3179 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003180}
3181
Marcel Holtmann07988722014-01-10 02:07:29 -08003182int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003184{
3185 struct oob_data *data;
3186
3187 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003188 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003189 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003190 if (!data)
3191 return -ENOMEM;
3192
3193 bacpy(&data->bdaddr, bdaddr);
3194 list_add(&data->list, &hdev->remote_oob_data);
3195 }
3196
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003197 memcpy(data->hash192, hash, sizeof(data->hash192));
3198 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003199
Marcel Holtmann07988722014-01-10 02:07:29 -08003200 memset(data->hash256, 0, sizeof(data->hash256));
3201 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3202
3203 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3204
3205 return 0;
3206}
3207
3208int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3209 u8 *hash192, u8 *randomizer192,
3210 u8 *hash256, u8 *randomizer256)
3211{
3212 struct oob_data *data;
3213
3214 data = hci_find_remote_oob_data(hdev, bdaddr);
3215 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003216 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003217 if (!data)
3218 return -ENOMEM;
3219
3220 bacpy(&data->bdaddr, bdaddr);
3221 list_add(&data->list, &hdev->remote_oob_data);
3222 }
3223
3224 memcpy(data->hash192, hash192, sizeof(data->hash192));
3225 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3226
3227 memcpy(data->hash256, hash256, sizeof(data->hash256));
3228 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3229
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003230 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003231
3232 return 0;
3233}
3234
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003235struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3236 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003237{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003238 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003239
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003240 list_for_each_entry(b, &hdev->blacklist, list) {
3241 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003242 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003243 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003244
3245 return NULL;
3246}
3247
Johan Hedberg35f74982014-02-18 17:14:32 +02003248void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003249{
3250 struct list_head *p, *n;
3251
3252 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003253 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254
3255 list_del(p);
3256 kfree(b);
3257 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258}
3259
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003260int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003261{
3262 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003263
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003264 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003265 return -EBADF;
3266
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003267 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003268 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269
3270 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003271 if (!entry)
3272 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273
3274 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003275 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276
3277 list_add(&entry->list, &hdev->blacklist);
3278
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003279 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003280}
3281
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003282int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283{
3284 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003285
Johan Hedberg35f74982014-02-18 17:14:32 +02003286 if (!bacmp(bdaddr, BDADDR_ANY)) {
3287 hci_blacklist_clear(hdev);
3288 return 0;
3289 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003290
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003291 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003292 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003293 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003294
3295 list_del(&entry->list);
3296 kfree(entry);
3297
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003298 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003299}
3300
Andre Guedes15819a72014-02-03 13:56:18 -03003301/* This function requires the caller holds hdev->lock */
3302struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3303 bdaddr_t *addr, u8 addr_type)
3304{
3305 struct hci_conn_params *params;
3306
3307 list_for_each_entry(params, &hdev->le_conn_params, list) {
3308 if (bacmp(&params->addr, addr) == 0 &&
3309 params->addr_type == addr_type) {
3310 return params;
3311 }
3312 }
3313
3314 return NULL;
3315}
3316
Andre Guedescef952c2014-02-26 20:21:49 -03003317static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3318{
3319 struct hci_conn *conn;
3320
3321 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3322 if (!conn)
3323 return false;
3324
3325 if (conn->dst_type != type)
3326 return false;
3327
3328 if (conn->state != BT_CONNECTED)
3329 return false;
3330
3331 return true;
3332}
3333
Andre Guedesa9b0a042014-02-26 20:21:52 -03003334static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3335{
3336 if (addr_type == ADDR_LE_DEV_PUBLIC)
3337 return true;
3338
3339 /* Check for Random Static address type */
3340 if ((addr->b[5] & 0xc0) == 0xc0)
3341 return true;
3342
3343 return false;
3344}
3345
Andre Guedes15819a72014-02-03 13:56:18 -03003346/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003347int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3348 u8 auto_connect, u16 conn_min_interval,
3349 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003350{
3351 struct hci_conn_params *params;
3352
Andre Guedesa9b0a042014-02-26 20:21:52 -03003353 if (!is_identity_address(addr, addr_type))
3354 return -EINVAL;
3355
Andre Guedes15819a72014-02-03 13:56:18 -03003356 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003357 if (params)
3358 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003359
3360 params = kzalloc(sizeof(*params), GFP_KERNEL);
3361 if (!params) {
3362 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003363 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003364 }
3365
3366 bacpy(&params->addr, addr);
3367 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003368
3369 list_add(&params->list, &hdev->le_conn_params);
3370
3371update:
Andre Guedes15819a72014-02-03 13:56:18 -03003372 params->conn_min_interval = conn_min_interval;
3373 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003374 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003375
Andre Guedescef952c2014-02-26 20:21:49 -03003376 switch (auto_connect) {
3377 case HCI_AUTO_CONN_DISABLED:
3378 case HCI_AUTO_CONN_LINK_LOSS:
3379 hci_pend_le_conn_del(hdev, addr, addr_type);
3380 break;
3381 case HCI_AUTO_CONN_ALWAYS:
3382 if (!is_connected(hdev, addr, addr_type))
3383 hci_pend_le_conn_add(hdev, addr, addr_type);
3384 break;
3385 }
Andre Guedes15819a72014-02-03 13:56:18 -03003386
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003387 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3388 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3389 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003390
3391 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003392}
3393
3394/* This function requires the caller holds hdev->lock */
3395void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3396{
3397 struct hci_conn_params *params;
3398
3399 params = hci_conn_params_lookup(hdev, addr, addr_type);
3400 if (!params)
3401 return;
3402
Andre Guedescef952c2014-02-26 20:21:49 -03003403 hci_pend_le_conn_del(hdev, addr, addr_type);
3404
Andre Guedes15819a72014-02-03 13:56:18 -03003405 list_del(&params->list);
3406 kfree(params);
3407
3408 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3409}
3410
3411/* This function requires the caller holds hdev->lock */
3412void hci_conn_params_clear(struct hci_dev *hdev)
3413{
3414 struct hci_conn_params *params, *tmp;
3415
3416 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3417 list_del(&params->list);
3418 kfree(params);
3419 }
3420
3421 BT_DBG("All LE connection parameters were removed");
3422}
3423
Andre Guedes77a77a32014-02-26 20:21:46 -03003424/* This function requires the caller holds hdev->lock */
3425struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3426 bdaddr_t *addr, u8 addr_type)
3427{
3428 struct bdaddr_list *entry;
3429
3430 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3431 if (bacmp(&entry->bdaddr, addr) == 0 &&
3432 entry->bdaddr_type == addr_type)
3433 return entry;
3434 }
3435
3436 return NULL;
3437}
3438
3439/* This function requires the caller holds hdev->lock */
3440void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3441{
3442 struct bdaddr_list *entry;
3443
3444 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3445 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003446 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003447
3448 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3449 if (!entry) {
3450 BT_ERR("Out of memory");
3451 return;
3452 }
3453
3454 bacpy(&entry->bdaddr, addr);
3455 entry->bdaddr_type = addr_type;
3456
3457 list_add(&entry->list, &hdev->pend_le_conns);
3458
3459 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003460
3461done:
3462 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003463}
3464
3465/* This function requires the caller holds hdev->lock */
3466void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3467{
3468 struct bdaddr_list *entry;
3469
3470 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3471 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003472 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003473
3474 list_del(&entry->list);
3475 kfree(entry);
3476
3477 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003478
3479done:
3480 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003481}
3482
3483/* This function requires the caller holds hdev->lock */
3484void hci_pend_le_conns_clear(struct hci_dev *hdev)
3485{
3486 struct bdaddr_list *entry, *tmp;
3487
3488 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3489 list_del(&entry->list);
3490 kfree(entry);
3491 }
3492
3493 BT_DBG("All LE pending connections cleared");
3494}
3495
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003496static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003497{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003498 if (status) {
3499 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003500
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003501 hci_dev_lock(hdev);
3502 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3503 hci_dev_unlock(hdev);
3504 return;
3505 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003506}
3507
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003508static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003509{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003510 /* General inquiry access code (GIAC) */
3511 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3512 struct hci_request req;
3513 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003514 int err;
3515
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003516 if (status) {
3517 BT_ERR("Failed to disable LE scanning: status %d", status);
3518 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003519 }
3520
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003521 switch (hdev->discovery.type) {
3522 case DISCOV_TYPE_LE:
3523 hci_dev_lock(hdev);
3524 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3525 hci_dev_unlock(hdev);
3526 break;
3527
3528 case DISCOV_TYPE_INTERLEAVED:
3529 hci_req_init(&req, hdev);
3530
3531 memset(&cp, 0, sizeof(cp));
3532 memcpy(&cp.lap, lap, sizeof(cp.lap));
3533 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3534 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3535
3536 hci_dev_lock(hdev);
3537
3538 hci_inquiry_cache_flush(hdev);
3539
3540 err = hci_req_run(&req, inquiry_complete);
3541 if (err) {
3542 BT_ERR("Inquiry request failed: err %d", err);
3543 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3544 }
3545
3546 hci_dev_unlock(hdev);
3547 break;
3548 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003549}
3550
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003551static void le_scan_disable_work(struct work_struct *work)
3552{
3553 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003554 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003555 struct hci_request req;
3556 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003557
3558 BT_DBG("%s", hdev->name);
3559
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003560 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003561
Andre Guedesb1efcc22014-02-26 20:21:40 -03003562 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003563
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003564 err = hci_req_run(&req, le_scan_disable_work_complete);
3565 if (err)
3566 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003567}
3568
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003569int hci_update_random_address(struct hci_request *req, bool require_privacy,
3570 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003571{
3572 struct hci_dev *hdev = req->hdev;
3573 int err;
3574
3575 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003576 * current RPA has expired or there is something else than
3577 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003578 */
3579 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003580 int to;
3581
3582 *own_addr_type = ADDR_LE_DEV_RANDOM;
3583
3584 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003585 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003586 return 0;
3587
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003588 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003589 if (err < 0) {
3590 BT_ERR("%s failed to generate new RPA", hdev->name);
3591 return err;
3592 }
3593
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003594 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003595
3596 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3597 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3598
3599 return 0;
3600 }
3601
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003602 /* In case of required privacy without resolvable private address,
3603 * use an unresolvable private address. This is useful for active
3604 * scanning and non-connectable advertising.
3605 */
3606 if (require_privacy) {
3607 bdaddr_t urpa;
3608
3609 get_random_bytes(&urpa, 6);
3610 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3611
3612 *own_addr_type = ADDR_LE_DEV_RANDOM;
3613 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3614 return 0;
3615 }
3616
Johan Hedbergebd3a742014-02-23 19:42:21 +02003617 /* If forcing static address is in use or there is no public
3618 * address use the static address as random address (but skip
3619 * the HCI command if the current random address is already the
3620 * static one.
3621 */
3622 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3623 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3624 *own_addr_type = ADDR_LE_DEV_RANDOM;
3625 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3626 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3627 &hdev->static_addr);
3628 return 0;
3629 }
3630
3631 /* Neither privacy nor static address is being used so use a
3632 * public address.
3633 */
3634 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3635
3636 return 0;
3637}
3638
David Herrmann9be0dab2012-04-22 14:39:57 +02003639/* Alloc HCI device */
3640struct hci_dev *hci_alloc_dev(void)
3641{
3642 struct hci_dev *hdev;
3643
3644 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3645 if (!hdev)
3646 return NULL;
3647
David Herrmannb1b813d2012-04-22 14:39:58 +02003648 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3649 hdev->esco_type = (ESCO_HV1);
3650 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003651 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3652 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003653 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3654 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003655
David Herrmannb1b813d2012-04-22 14:39:58 +02003656 hdev->sniff_max_interval = 800;
3657 hdev->sniff_min_interval = 80;
3658
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003659 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003660 hdev->le_scan_interval = 0x0060;
3661 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003662 hdev->le_conn_min_interval = 0x0028;
3663 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003664
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003665 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3666
David Herrmannb1b813d2012-04-22 14:39:58 +02003667 mutex_init(&hdev->lock);
3668 mutex_init(&hdev->req_lock);
3669
3670 INIT_LIST_HEAD(&hdev->mgmt_pending);
3671 INIT_LIST_HEAD(&hdev->blacklist);
3672 INIT_LIST_HEAD(&hdev->uuids);
3673 INIT_LIST_HEAD(&hdev->link_keys);
3674 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003675 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003676 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003677 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003678 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003679 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003680
3681 INIT_WORK(&hdev->rx_work, hci_rx_work);
3682 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3683 INIT_WORK(&hdev->tx_work, hci_tx_work);
3684 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003685
David Herrmannb1b813d2012-04-22 14:39:58 +02003686 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3687 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3688 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3689
David Herrmannb1b813d2012-04-22 14:39:58 +02003690 skb_queue_head_init(&hdev->rx_q);
3691 skb_queue_head_init(&hdev->cmd_q);
3692 skb_queue_head_init(&hdev->raw_q);
3693
3694 init_waitqueue_head(&hdev->req_wait_q);
3695
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003696 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003697
David Herrmannb1b813d2012-04-22 14:39:58 +02003698 hci_init_sysfs(hdev);
3699 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003700
3701 return hdev;
3702}
3703EXPORT_SYMBOL(hci_alloc_dev);
3704
3705/* Free HCI device */
3706void hci_free_dev(struct hci_dev *hdev)
3707{
David Herrmann9be0dab2012-04-22 14:39:57 +02003708 /* will free via device release */
3709 put_device(&hdev->dev);
3710}
3711EXPORT_SYMBOL(hci_free_dev);
3712
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713/* Register HCI device */
3714int hci_register_dev(struct hci_dev *hdev)
3715{
David Herrmannb1b813d2012-04-22 14:39:58 +02003716 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717
David Herrmann010666a2012-01-07 15:47:07 +01003718 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 return -EINVAL;
3720
Mat Martineau08add512011-11-02 16:18:36 -07003721 /* Do not allow HCI_AMP devices to register at index 0,
3722 * so the index can be used as the AMP controller ID.
3723 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003724 switch (hdev->dev_type) {
3725 case HCI_BREDR:
3726 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3727 break;
3728 case HCI_AMP:
3729 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3730 break;
3731 default:
3732 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003734
Sasha Levin3df92b32012-05-27 22:36:56 +02003735 if (id < 0)
3736 return id;
3737
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738 sprintf(hdev->name, "hci%d", id);
3739 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003740
3741 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3742
Kees Cookd8537542013-07-03 15:04:57 -07003743 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3744 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003745 if (!hdev->workqueue) {
3746 error = -ENOMEM;
3747 goto err;
3748 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003749
Kees Cookd8537542013-07-03 15:04:57 -07003750 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3751 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003752 if (!hdev->req_workqueue) {
3753 destroy_workqueue(hdev->workqueue);
3754 error = -ENOMEM;
3755 goto err;
3756 }
3757
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003758 if (!IS_ERR_OR_NULL(bt_debugfs))
3759 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3760
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003761 dev_set_name(&hdev->dev, "%s", hdev->name);
3762
Johan Hedberg99780a72014-02-18 10:40:07 +02003763 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3764 CRYPTO_ALG_ASYNC);
3765 if (IS_ERR(hdev->tfm_aes)) {
3766 BT_ERR("Unable to create crypto context");
3767 error = PTR_ERR(hdev->tfm_aes);
3768 hdev->tfm_aes = NULL;
3769 goto err_wqueue;
3770 }
3771
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003772 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003773 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003774 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003776 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003777 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3778 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003779 if (hdev->rfkill) {
3780 if (rfkill_register(hdev->rfkill) < 0) {
3781 rfkill_destroy(hdev->rfkill);
3782 hdev->rfkill = NULL;
3783 }
3784 }
3785
Johan Hedberg5e130362013-09-13 08:58:17 +03003786 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3787 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3788
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003789 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003790 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003791
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003792 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003793 /* Assume BR/EDR support until proven otherwise (such as
3794 * through reading supported features during init.
3795 */
3796 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3797 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003798
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003799 write_lock(&hci_dev_list_lock);
3800 list_add(&hdev->list, &hci_dev_list);
3801 write_unlock(&hci_dev_list_lock);
3802
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003804 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805
Johan Hedberg19202572013-01-14 22:33:51 +02003806 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003807
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003809
Johan Hedberg99780a72014-02-18 10:40:07 +02003810err_tfm:
3811 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003812err_wqueue:
3813 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003814 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003815err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003816 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003817
David Herrmann33ca9542011-10-08 14:58:49 +02003818 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819}
3820EXPORT_SYMBOL(hci_register_dev);
3821
3822/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003823void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824{
Sasha Levin3df92b32012-05-27 22:36:56 +02003825 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003826
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003827 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
Johan Hovold94324962012-03-15 14:48:41 +01003829 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3830
Sasha Levin3df92b32012-05-27 22:36:56 +02003831 id = hdev->id;
3832
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003833 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003835 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836
3837 hci_dev_do_close(hdev);
3838
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303839 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003840 kfree_skb(hdev->reassembly[i]);
3841
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003842 cancel_work_sync(&hdev->power_on);
3843
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003844 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003845 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003846 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003847 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003848 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003849 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003850
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003851 /* mgmt_index_removed should take care of emptying the
3852 * pending list */
3853 BUG_ON(!list_empty(&hdev->mgmt_pending));
3854
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 hci_notify(hdev, HCI_DEV_UNREG);
3856
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003857 if (hdev->rfkill) {
3858 rfkill_unregister(hdev->rfkill);
3859 rfkill_destroy(hdev->rfkill);
3860 }
3861
Johan Hedberg99780a72014-02-18 10:40:07 +02003862 if (hdev->tfm_aes)
3863 crypto_free_blkcipher(hdev->tfm_aes);
3864
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003865 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003866
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003867 debugfs_remove_recursive(hdev->debugfs);
3868
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003869 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003870 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003871
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003872 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003873 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003874 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003875 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003876 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003877 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003878 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003879 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003880 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003881 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003882
David Herrmanndc946bd2012-01-07 15:47:24 +01003883 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003884
3885 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886}
3887EXPORT_SYMBOL(hci_unregister_dev);
3888
3889/* Suspend HCI device */
3890int hci_suspend_dev(struct hci_dev *hdev)
3891{
3892 hci_notify(hdev, HCI_DEV_SUSPEND);
3893 return 0;
3894}
3895EXPORT_SYMBOL(hci_suspend_dev);
3896
3897/* Resume HCI device */
3898int hci_resume_dev(struct hci_dev *hdev)
3899{
3900 hci_notify(hdev, HCI_DEV_RESUME);
3901 return 0;
3902}
3903EXPORT_SYMBOL(hci_resume_dev);
3904
Marcel Holtmann76bca882009-11-18 00:40:39 +01003905/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003906int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003907{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003908 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003909 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003910 kfree_skb(skb);
3911 return -ENXIO;
3912 }
3913
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003914 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003915 bt_cb(skb)->incoming = 1;
3916
3917 /* Time stamp */
3918 __net_timestamp(skb);
3919
Marcel Holtmann76bca882009-11-18 00:40:39 +01003920 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003921 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003922
Marcel Holtmann76bca882009-11-18 00:40:39 +01003923 return 0;
3924}
3925EXPORT_SYMBOL(hci_recv_frame);
3926
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303927static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003928 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303929{
3930 int len = 0;
3931 int hlen = 0;
3932 int remain = count;
3933 struct sk_buff *skb;
3934 struct bt_skb_cb *scb;
3935
3936 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003937 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303938 return -EILSEQ;
3939
3940 skb = hdev->reassembly[index];
3941
3942 if (!skb) {
3943 switch (type) {
3944 case HCI_ACLDATA_PKT:
3945 len = HCI_MAX_FRAME_SIZE;
3946 hlen = HCI_ACL_HDR_SIZE;
3947 break;
3948 case HCI_EVENT_PKT:
3949 len = HCI_MAX_EVENT_SIZE;
3950 hlen = HCI_EVENT_HDR_SIZE;
3951 break;
3952 case HCI_SCODATA_PKT:
3953 len = HCI_MAX_SCO_SIZE;
3954 hlen = HCI_SCO_HDR_SIZE;
3955 break;
3956 }
3957
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003958 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303959 if (!skb)
3960 return -ENOMEM;
3961
3962 scb = (void *) skb->cb;
3963 scb->expect = hlen;
3964 scb->pkt_type = type;
3965
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303966 hdev->reassembly[index] = skb;
3967 }
3968
3969 while (count) {
3970 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003971 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303972
3973 memcpy(skb_put(skb, len), data, len);
3974
3975 count -= len;
3976 data += len;
3977 scb->expect -= len;
3978 remain = count;
3979
3980 switch (type) {
3981 case HCI_EVENT_PKT:
3982 if (skb->len == HCI_EVENT_HDR_SIZE) {
3983 struct hci_event_hdr *h = hci_event_hdr(skb);
3984 scb->expect = h->plen;
3985
3986 if (skb_tailroom(skb) < scb->expect) {
3987 kfree_skb(skb);
3988 hdev->reassembly[index] = NULL;
3989 return -ENOMEM;
3990 }
3991 }
3992 break;
3993
3994 case HCI_ACLDATA_PKT:
3995 if (skb->len == HCI_ACL_HDR_SIZE) {
3996 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3997 scb->expect = __le16_to_cpu(h->dlen);
3998
3999 if (skb_tailroom(skb) < scb->expect) {
4000 kfree_skb(skb);
4001 hdev->reassembly[index] = NULL;
4002 return -ENOMEM;
4003 }
4004 }
4005 break;
4006
4007 case HCI_SCODATA_PKT:
4008 if (skb->len == HCI_SCO_HDR_SIZE) {
4009 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4010 scb->expect = h->dlen;
4011
4012 if (skb_tailroom(skb) < scb->expect) {
4013 kfree_skb(skb);
4014 hdev->reassembly[index] = NULL;
4015 return -ENOMEM;
4016 }
4017 }
4018 break;
4019 }
4020
4021 if (scb->expect == 0) {
4022 /* Complete frame */
4023
4024 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004025 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304026
4027 hdev->reassembly[index] = NULL;
4028 return remain;
4029 }
4030 }
4031
4032 return remain;
4033}
4034
Marcel Holtmannef222012007-07-11 06:42:04 +02004035int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4036{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304037 int rem = 0;
4038
Marcel Holtmannef222012007-07-11 06:42:04 +02004039 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4040 return -EILSEQ;
4041
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004042 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004043 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304044 if (rem < 0)
4045 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004046
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304047 data += (count - rem);
4048 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004049 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004050
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304051 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004052}
4053EXPORT_SYMBOL(hci_recv_fragment);
4054
Suraj Sumangala99811512010-07-14 13:02:19 +05304055#define STREAM_REASSEMBLY 0
4056
4057int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4058{
4059 int type;
4060 int rem = 0;
4061
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004062 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304063 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4064
4065 if (!skb) {
4066 struct { char type; } *pkt;
4067
4068 /* Start of the frame */
4069 pkt = data;
4070 type = pkt->type;
4071
4072 data++;
4073 count--;
4074 } else
4075 type = bt_cb(skb)->pkt_type;
4076
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004077 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004078 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304079 if (rem < 0)
4080 return rem;
4081
4082 data += (count - rem);
4083 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004084 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304085
4086 return rem;
4087}
4088EXPORT_SYMBOL(hci_recv_stream_fragment);
4089
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090/* ---- Interface to upper protocols ---- */
4091
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092int hci_register_cb(struct hci_cb *cb)
4093{
4094 BT_DBG("%p name %s", cb, cb->name);
4095
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004096 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004098 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099
4100 return 0;
4101}
4102EXPORT_SYMBOL(hci_register_cb);
4103
4104int hci_unregister_cb(struct hci_cb *cb)
4105{
4106 BT_DBG("%p name %s", cb, cb->name);
4107
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004108 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004110 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
4112 return 0;
4113}
4114EXPORT_SYMBOL(hci_unregister_cb);
4115
Marcel Holtmann51086992013-10-10 14:54:19 -07004116static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004118 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004120 /* Time stamp */
4121 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004123 /* Send copy to monitor */
4124 hci_send_to_monitor(hdev, skb);
4125
4126 if (atomic_read(&hdev->promisc)) {
4127 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004128 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 }
4130
4131 /* Get rid of skb owner, prior to sending to the driver. */
4132 skb_orphan(skb);
4133
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004134 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004135 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136}
4137
Johan Hedberg3119ae92013-03-05 20:37:44 +02004138void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4139{
4140 skb_queue_head_init(&req->cmd_q);
4141 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004142 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004143}
4144
4145int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4146{
4147 struct hci_dev *hdev = req->hdev;
4148 struct sk_buff *skb;
4149 unsigned long flags;
4150
4151 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4152
Andre Guedes5d73e032013-03-08 11:20:16 -03004153 /* If an error occured during request building, remove all HCI
4154 * commands queued on the HCI request queue.
4155 */
4156 if (req->err) {
4157 skb_queue_purge(&req->cmd_q);
4158 return req->err;
4159 }
4160
Johan Hedberg3119ae92013-03-05 20:37:44 +02004161 /* Do not allow empty requests */
4162 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004163 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004164
4165 skb = skb_peek_tail(&req->cmd_q);
4166 bt_cb(skb)->req.complete = complete;
4167
4168 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4169 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4170 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4171
4172 queue_work(hdev->workqueue, &hdev->cmd_work);
4173
4174 return 0;
4175}
4176
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004177static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004178 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179{
4180 int len = HCI_COMMAND_HDR_SIZE + plen;
4181 struct hci_command_hdr *hdr;
4182 struct sk_buff *skb;
4183
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004185 if (!skb)
4186 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187
4188 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004189 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 hdr->plen = plen;
4191
4192 if (plen)
4193 memcpy(skb_put(skb, plen), param, plen);
4194
4195 BT_DBG("skb len %d", skb->len);
4196
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004197 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004198
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004199 return skb;
4200}
4201
4202/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004203int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4204 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004205{
4206 struct sk_buff *skb;
4207
4208 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4209
4210 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4211 if (!skb) {
4212 BT_ERR("%s no memory for command", hdev->name);
4213 return -ENOMEM;
4214 }
4215
Johan Hedberg11714b32013-03-05 20:37:47 +02004216 /* Stand-alone HCI commands must be flaged as
4217 * single-command requests.
4218 */
4219 bt_cb(skb)->req.start = true;
4220
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004222 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
4224 return 0;
4225}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
Johan Hedberg71c76a12013-03-05 20:37:46 +02004227/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004228void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4229 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004230{
4231 struct hci_dev *hdev = req->hdev;
4232 struct sk_buff *skb;
4233
4234 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4235
Andre Guedes34739c12013-03-08 11:20:18 -03004236 /* If an error occured during request building, there is no point in
4237 * queueing the HCI command. We can simply return.
4238 */
4239 if (req->err)
4240 return;
4241
Johan Hedberg71c76a12013-03-05 20:37:46 +02004242 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4243 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004244 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4245 hdev->name, opcode);
4246 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004247 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004248 }
4249
4250 if (skb_queue_empty(&req->cmd_q))
4251 bt_cb(skb)->req.start = true;
4252
Johan Hedberg02350a72013-04-03 21:50:29 +03004253 bt_cb(skb)->req.event = event;
4254
Johan Hedberg71c76a12013-03-05 20:37:46 +02004255 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004256}
4257
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004258void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4259 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004260{
4261 hci_req_add_ev(req, opcode, plen, param, 0);
4262}
4263
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004265void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266{
4267 struct hci_command_hdr *hdr;
4268
4269 if (!hdev->sent_cmd)
4270 return NULL;
4271
4272 hdr = (void *) hdev->sent_cmd->data;
4273
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004274 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 return NULL;
4276
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004277 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
4279 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4280}
4281
4282/* Send ACL data */
4283static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4284{
4285 struct hci_acl_hdr *hdr;
4286 int len = skb->len;
4287
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004288 skb_push(skb, HCI_ACL_HDR_SIZE);
4289 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004290 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004291 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4292 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293}
4294
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004295static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004296 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004298 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 struct hci_dev *hdev = conn->hdev;
4300 struct sk_buff *list;
4301
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004302 skb->len = skb_headlen(skb);
4303 skb->data_len = 0;
4304
4305 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004306
4307 switch (hdev->dev_type) {
4308 case HCI_BREDR:
4309 hci_add_acl_hdr(skb, conn->handle, flags);
4310 break;
4311 case HCI_AMP:
4312 hci_add_acl_hdr(skb, chan->handle, flags);
4313 break;
4314 default:
4315 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4316 return;
4317 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004318
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004319 list = skb_shinfo(skb)->frag_list;
4320 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 /* Non fragmented */
4322 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4323
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004324 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 } else {
4326 /* Fragmented */
4327 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4328
4329 skb_shinfo(skb)->frag_list = NULL;
4330
4331 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004332 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004334 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004335
4336 flags &= ~ACL_START;
4337 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 do {
4339 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004340
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004341 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004342 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343
4344 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4345
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004346 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 } while (list);
4348
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004349 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004351}
4352
4353void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4354{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004355 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004356
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004357 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004358
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004359 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004361 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363
4364/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004365void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366{
4367 struct hci_dev *hdev = conn->hdev;
4368 struct hci_sco_hdr hdr;
4369
4370 BT_DBG("%s len %d", hdev->name, skb->len);
4371
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004372 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 hdr.dlen = skb->len;
4374
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004375 skb_push(skb, HCI_SCO_HDR_SIZE);
4376 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004377 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004379 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004380
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004382 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384
4385/* ---- HCI TX task (outgoing data) ---- */
4386
4387/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004388static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4389 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390{
4391 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004392 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004393 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004395 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004397
4398 rcu_read_lock();
4399
4400 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004401 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004403
4404 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4405 continue;
4406
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 num++;
4408
4409 if (c->sent < min) {
4410 min = c->sent;
4411 conn = c;
4412 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004413
4414 if (hci_conn_num(hdev, type) == num)
4415 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 }
4417
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004418 rcu_read_unlock();
4419
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004421 int cnt, q;
4422
4423 switch (conn->type) {
4424 case ACL_LINK:
4425 cnt = hdev->acl_cnt;
4426 break;
4427 case SCO_LINK:
4428 case ESCO_LINK:
4429 cnt = hdev->sco_cnt;
4430 break;
4431 case LE_LINK:
4432 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4433 break;
4434 default:
4435 cnt = 0;
4436 BT_ERR("Unknown link type");
4437 }
4438
4439 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 *quote = q ? q : 1;
4441 } else
4442 *quote = 0;
4443
4444 BT_DBG("conn %p quote %d", conn, *quote);
4445 return conn;
4446}
4447
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004448static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449{
4450 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004451 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452
Ville Tervobae1f5d92011-02-10 22:38:53 -03004453 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004455 rcu_read_lock();
4456
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004458 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004459 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004460 BT_ERR("%s killing stalled connection %pMR",
4461 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004462 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 }
4464 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004465
4466 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467}
4468
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004469static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4470 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004471{
4472 struct hci_conn_hash *h = &hdev->conn_hash;
4473 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004474 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004475 struct hci_conn *conn;
4476 int cnt, q, conn_num = 0;
4477
4478 BT_DBG("%s", hdev->name);
4479
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004480 rcu_read_lock();
4481
4482 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004483 struct hci_chan *tmp;
4484
4485 if (conn->type != type)
4486 continue;
4487
4488 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4489 continue;
4490
4491 conn_num++;
4492
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004493 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004494 struct sk_buff *skb;
4495
4496 if (skb_queue_empty(&tmp->data_q))
4497 continue;
4498
4499 skb = skb_peek(&tmp->data_q);
4500 if (skb->priority < cur_prio)
4501 continue;
4502
4503 if (skb->priority > cur_prio) {
4504 num = 0;
4505 min = ~0;
4506 cur_prio = skb->priority;
4507 }
4508
4509 num++;
4510
4511 if (conn->sent < min) {
4512 min = conn->sent;
4513 chan = tmp;
4514 }
4515 }
4516
4517 if (hci_conn_num(hdev, type) == conn_num)
4518 break;
4519 }
4520
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004521 rcu_read_unlock();
4522
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004523 if (!chan)
4524 return NULL;
4525
4526 switch (chan->conn->type) {
4527 case ACL_LINK:
4528 cnt = hdev->acl_cnt;
4529 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004530 case AMP_LINK:
4531 cnt = hdev->block_cnt;
4532 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004533 case SCO_LINK:
4534 case ESCO_LINK:
4535 cnt = hdev->sco_cnt;
4536 break;
4537 case LE_LINK:
4538 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4539 break;
4540 default:
4541 cnt = 0;
4542 BT_ERR("Unknown link type");
4543 }
4544
4545 q = cnt / num;
4546 *quote = q ? q : 1;
4547 BT_DBG("chan %p quote %d", chan, *quote);
4548 return chan;
4549}
4550
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004551static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4552{
4553 struct hci_conn_hash *h = &hdev->conn_hash;
4554 struct hci_conn *conn;
4555 int num = 0;
4556
4557 BT_DBG("%s", hdev->name);
4558
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004559 rcu_read_lock();
4560
4561 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004562 struct hci_chan *chan;
4563
4564 if (conn->type != type)
4565 continue;
4566
4567 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4568 continue;
4569
4570 num++;
4571
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004572 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004573 struct sk_buff *skb;
4574
4575 if (chan->sent) {
4576 chan->sent = 0;
4577 continue;
4578 }
4579
4580 if (skb_queue_empty(&chan->data_q))
4581 continue;
4582
4583 skb = skb_peek(&chan->data_q);
4584 if (skb->priority >= HCI_PRIO_MAX - 1)
4585 continue;
4586
4587 skb->priority = HCI_PRIO_MAX - 1;
4588
4589 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004590 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004591 }
4592
4593 if (hci_conn_num(hdev, type) == num)
4594 break;
4595 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004596
4597 rcu_read_unlock();
4598
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004599}
4600
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004601static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4602{
4603 /* Calculate count of blocks used by this packet */
4604 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4605}
4606
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004607static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 if (!test_bit(HCI_RAW, &hdev->flags)) {
4610 /* ACL tx timeout must be longer than maximum
4611 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004612 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004613 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004614 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004616}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004618static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004619{
4620 unsigned int cnt = hdev->acl_cnt;
4621 struct hci_chan *chan;
4622 struct sk_buff *skb;
4623 int quote;
4624
4625 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004626
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004627 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004628 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004629 u32 priority = (skb_peek(&chan->data_q))->priority;
4630 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004631 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004632 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004633
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004634 /* Stop if priority has changed */
4635 if (skb->priority < priority)
4636 break;
4637
4638 skb = skb_dequeue(&chan->data_q);
4639
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004640 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004641 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004642
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004643 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 hdev->acl_last_tx = jiffies;
4645
4646 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004647 chan->sent++;
4648 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649 }
4650 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004651
4652 if (cnt != hdev->acl_cnt)
4653 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654}
4655
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004656static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004657{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004658 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004659 struct hci_chan *chan;
4660 struct sk_buff *skb;
4661 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004662 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004663
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004664 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004665
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004666 BT_DBG("%s", hdev->name);
4667
4668 if (hdev->dev_type == HCI_AMP)
4669 type = AMP_LINK;
4670 else
4671 type = ACL_LINK;
4672
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004673 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004674 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004675 u32 priority = (skb_peek(&chan->data_q))->priority;
4676 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4677 int blocks;
4678
4679 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004680 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004681
4682 /* Stop if priority has changed */
4683 if (skb->priority < priority)
4684 break;
4685
4686 skb = skb_dequeue(&chan->data_q);
4687
4688 blocks = __get_blocks(hdev, skb);
4689 if (blocks > hdev->block_cnt)
4690 return;
4691
4692 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004693 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004694
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004695 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004696 hdev->acl_last_tx = jiffies;
4697
4698 hdev->block_cnt -= blocks;
4699 quote -= blocks;
4700
4701 chan->sent += blocks;
4702 chan->conn->sent += blocks;
4703 }
4704 }
4705
4706 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004707 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004708}
4709
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004710static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004711{
4712 BT_DBG("%s", hdev->name);
4713
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004714 /* No ACL link over BR/EDR controller */
4715 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4716 return;
4717
4718 /* No AMP link over AMP controller */
4719 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004720 return;
4721
4722 switch (hdev->flow_ctl_mode) {
4723 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4724 hci_sched_acl_pkt(hdev);
4725 break;
4726
4727 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4728 hci_sched_acl_blk(hdev);
4729 break;
4730 }
4731}
4732
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004734static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735{
4736 struct hci_conn *conn;
4737 struct sk_buff *skb;
4738 int quote;
4739
4740 BT_DBG("%s", hdev->name);
4741
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004742 if (!hci_conn_num(hdev, SCO_LINK))
4743 return;
4744
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4746 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4747 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004748 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749
4750 conn->sent++;
4751 if (conn->sent == ~0)
4752 conn->sent = 0;
4753 }
4754 }
4755}
4756
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004757static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004758{
4759 struct hci_conn *conn;
4760 struct sk_buff *skb;
4761 int quote;
4762
4763 BT_DBG("%s", hdev->name);
4764
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004765 if (!hci_conn_num(hdev, ESCO_LINK))
4766 return;
4767
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004768 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4769 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004770 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4771 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004772 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004773
4774 conn->sent++;
4775 if (conn->sent == ~0)
4776 conn->sent = 0;
4777 }
4778 }
4779}
4780
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004781static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004782{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004783 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004784 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004785 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004786
4787 BT_DBG("%s", hdev->name);
4788
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004789 if (!hci_conn_num(hdev, LE_LINK))
4790 return;
4791
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004792 if (!test_bit(HCI_RAW, &hdev->flags)) {
4793 /* LE tx timeout must be longer than maximum
4794 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004795 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004796 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004797 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004798 }
4799
4800 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004801 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004802 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004803 u32 priority = (skb_peek(&chan->data_q))->priority;
4804 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004805 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004806 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004807
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004808 /* Stop if priority has changed */
4809 if (skb->priority < priority)
4810 break;
4811
4812 skb = skb_dequeue(&chan->data_q);
4813
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004814 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004815 hdev->le_last_tx = jiffies;
4816
4817 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004818 chan->sent++;
4819 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004820 }
4821 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004822
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004823 if (hdev->le_pkts)
4824 hdev->le_cnt = cnt;
4825 else
4826 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004827
4828 if (cnt != tmp)
4829 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004830}
4831
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004832static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004834 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835 struct sk_buff *skb;
4836
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004837 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004838 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839
Marcel Holtmann52de5992013-09-03 18:08:38 -07004840 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4841 /* Schedule queues and send stuff to HCI driver */
4842 hci_sched_acl(hdev);
4843 hci_sched_sco(hdev);
4844 hci_sched_esco(hdev);
4845 hci_sched_le(hdev);
4846 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004847
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848 /* Send next queued raw (unknown type) packet */
4849 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004850 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851}
4852
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004853/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854
4855/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004856static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857{
4858 struct hci_acl_hdr *hdr = (void *) skb->data;
4859 struct hci_conn *conn;
4860 __u16 handle, flags;
4861
4862 skb_pull(skb, HCI_ACL_HDR_SIZE);
4863
4864 handle = __le16_to_cpu(hdr->handle);
4865 flags = hci_flags(handle);
4866 handle = hci_handle(handle);
4867
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004868 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004869 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870
4871 hdev->stat.acl_rx++;
4872
4873 hci_dev_lock(hdev);
4874 conn = hci_conn_hash_lookup_handle(hdev, handle);
4875 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004876
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004878 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004879
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004881 l2cap_recv_acldata(conn, skb, flags);
4882 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004884 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004885 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 }
4887
4888 kfree_skb(skb);
4889}
4890
4891/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004892static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893{
4894 struct hci_sco_hdr *hdr = (void *) skb->data;
4895 struct hci_conn *conn;
4896 __u16 handle;
4897
4898 skb_pull(skb, HCI_SCO_HDR_SIZE);
4899
4900 handle = __le16_to_cpu(hdr->handle);
4901
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004902 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903
4904 hdev->stat.sco_rx++;
4905
4906 hci_dev_lock(hdev);
4907 conn = hci_conn_hash_lookup_handle(hdev, handle);
4908 hci_dev_unlock(hdev);
4909
4910 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004912 sco_recv_scodata(conn, skb);
4913 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004915 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004916 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917 }
4918
4919 kfree_skb(skb);
4920}
4921
Johan Hedberg9238f362013-03-05 20:37:48 +02004922static bool hci_req_is_complete(struct hci_dev *hdev)
4923{
4924 struct sk_buff *skb;
4925
4926 skb = skb_peek(&hdev->cmd_q);
4927 if (!skb)
4928 return true;
4929
4930 return bt_cb(skb)->req.start;
4931}
4932
Johan Hedberg42c6b122013-03-05 20:37:49 +02004933static void hci_resend_last(struct hci_dev *hdev)
4934{
4935 struct hci_command_hdr *sent;
4936 struct sk_buff *skb;
4937 u16 opcode;
4938
4939 if (!hdev->sent_cmd)
4940 return;
4941
4942 sent = (void *) hdev->sent_cmd->data;
4943 opcode = __le16_to_cpu(sent->opcode);
4944 if (opcode == HCI_OP_RESET)
4945 return;
4946
4947 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4948 if (!skb)
4949 return;
4950
4951 skb_queue_head(&hdev->cmd_q, skb);
4952 queue_work(hdev->workqueue, &hdev->cmd_work);
4953}
4954
Johan Hedberg9238f362013-03-05 20:37:48 +02004955void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4956{
4957 hci_req_complete_t req_complete = NULL;
4958 struct sk_buff *skb;
4959 unsigned long flags;
4960
4961 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4962
Johan Hedberg42c6b122013-03-05 20:37:49 +02004963 /* If the completed command doesn't match the last one that was
4964 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004965 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004966 if (!hci_sent_cmd_data(hdev, opcode)) {
4967 /* Some CSR based controllers generate a spontaneous
4968 * reset complete event during init and any pending
4969 * command will never be completed. In such a case we
4970 * need to resend whatever was the last sent
4971 * command.
4972 */
4973 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4974 hci_resend_last(hdev);
4975
Johan Hedberg9238f362013-03-05 20:37:48 +02004976 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004977 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004978
4979 /* If the command succeeded and there's still more commands in
4980 * this request the request is not yet complete.
4981 */
4982 if (!status && !hci_req_is_complete(hdev))
4983 return;
4984
4985 /* If this was the last command in a request the complete
4986 * callback would be found in hdev->sent_cmd instead of the
4987 * command queue (hdev->cmd_q).
4988 */
4989 if (hdev->sent_cmd) {
4990 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004991
4992 if (req_complete) {
4993 /* We must set the complete callback to NULL to
4994 * avoid calling the callback more than once if
4995 * this function gets called again.
4996 */
4997 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4998
Johan Hedberg9238f362013-03-05 20:37:48 +02004999 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005000 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005001 }
5002
5003 /* Remove all pending commands belonging to this request */
5004 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5005 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5006 if (bt_cb(skb)->req.start) {
5007 __skb_queue_head(&hdev->cmd_q, skb);
5008 break;
5009 }
5010
5011 req_complete = bt_cb(skb)->req.complete;
5012 kfree_skb(skb);
5013 }
5014 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5015
5016call_complete:
5017 if (req_complete)
5018 req_complete(hdev, status);
5019}
5020
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005021static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005023 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 struct sk_buff *skb;
5025
5026 BT_DBG("%s", hdev->name);
5027
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005029 /* Send copy to monitor */
5030 hci_send_to_monitor(hdev, skb);
5031
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 if (atomic_read(&hdev->promisc)) {
5033 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005034 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 }
5036
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005037 if (test_bit(HCI_RAW, &hdev->flags) ||
5038 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 kfree_skb(skb);
5040 continue;
5041 }
5042
5043 if (test_bit(HCI_INIT, &hdev->flags)) {
5044 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005045 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 case HCI_ACLDATA_PKT:
5047 case HCI_SCODATA_PKT:
5048 kfree_skb(skb);
5049 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005050 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 }
5052
5053 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005054 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005056 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 hci_event_packet(hdev, skb);
5058 break;
5059
5060 case HCI_ACLDATA_PKT:
5061 BT_DBG("%s ACL data packet", hdev->name);
5062 hci_acldata_packet(hdev, skb);
5063 break;
5064
5065 case HCI_SCODATA_PKT:
5066 BT_DBG("%s SCO data packet", hdev->name);
5067 hci_scodata_packet(hdev, skb);
5068 break;
5069
5070 default:
5071 kfree_skb(skb);
5072 break;
5073 }
5074 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075}
5076
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005077static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005079 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 struct sk_buff *skb;
5081
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005082 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5083 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005086 if (atomic_read(&hdev->cmd_cnt)) {
5087 skb = skb_dequeue(&hdev->cmd_q);
5088 if (!skb)
5089 return;
5090
Wei Yongjun7585b972009-02-25 18:29:52 +08005091 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005093 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005094 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005096 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005097 if (test_bit(HCI_RESET, &hdev->flags))
5098 del_timer(&hdev->cmd_timer);
5099 else
5100 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005101 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102 } else {
5103 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005104 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105 }
5106 }
5107}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005108
5109void hci_req_add_le_scan_disable(struct hci_request *req)
5110{
5111 struct hci_cp_le_set_scan_enable cp;
5112
5113 memset(&cp, 0, sizeof(cp));
5114 cp.enable = LE_SCAN_DISABLE;
5115 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5116}
Andre Guedesa4790db2014-02-26 20:21:47 -03005117
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005118void hci_req_add_le_passive_scan(struct hci_request *req)
5119{
5120 struct hci_cp_le_set_scan_param param_cp;
5121 struct hci_cp_le_set_scan_enable enable_cp;
5122 struct hci_dev *hdev = req->hdev;
5123 u8 own_addr_type;
5124
5125 /* Set require_privacy to true to avoid identification from
5126 * unknown peer devices. Since this is passive scanning, no
5127 * SCAN_REQ using the local identity should be sent. Mandating
5128 * privacy is just an extra precaution.
5129 */
5130 if (hci_update_random_address(req, true, &own_addr_type))
5131 return;
5132
5133 memset(&param_cp, 0, sizeof(param_cp));
5134 param_cp.type = LE_SCAN_PASSIVE;
5135 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5136 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5137 param_cp.own_address_type = own_addr_type;
5138 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5139 &param_cp);
5140
5141 memset(&enable_cp, 0, sizeof(enable_cp));
5142 enable_cp.enable = LE_SCAN_ENABLE;
5143 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5144 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5145 &enable_cp);
5146}
5147
Andre Guedesa4790db2014-02-26 20:21:47 -03005148static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5149{
5150 if (status)
5151 BT_DBG("HCI request failed to update background scanning: "
5152 "status 0x%2.2x", status);
5153}
5154
5155/* This function controls the background scanning based on hdev->pend_le_conns
5156 * list. If there are pending LE connection we start the background scanning,
5157 * otherwise we stop it.
5158 *
5159 * This function requires the caller holds hdev->lock.
5160 */
5161void hci_update_background_scan(struct hci_dev *hdev)
5162{
Andre Guedesa4790db2014-02-26 20:21:47 -03005163 struct hci_request req;
5164 struct hci_conn *conn;
5165 int err;
5166
5167 hci_req_init(&req, hdev);
5168
5169 if (list_empty(&hdev->pend_le_conns)) {
5170 /* If there is no pending LE connections, we should stop
5171 * the background scanning.
5172 */
5173
5174 /* If controller is not scanning we are done. */
5175 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5176 return;
5177
5178 hci_req_add_le_scan_disable(&req);
5179
5180 BT_DBG("%s stopping background scanning", hdev->name);
5181 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005182 /* If there is at least one pending LE connection, we should
5183 * keep the background scan running.
5184 */
5185
5186 /* If controller is already scanning we are done. */
5187 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5188 return;
5189
5190 /* If controller is connecting, we should not start scanning
5191 * since some controllers are not able to scan and connect at
5192 * the same time.
5193 */
5194 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5195 if (conn)
5196 return;
5197
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005198 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005199
5200 BT_DBG("%s starting background scanning", hdev->name);
5201 }
5202
5203 err = hci_req_run(&req, update_background_scan_complete);
5204 if (err)
5205 BT_ERR("Failed to run HCI request: err %d", err);
5206}