blob: ab547277f909d7ac0482a323faa8fb02dd7deb50 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200585 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
Johan Hedberga1f4c312014-02-27 14:05:41 +0200590 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800591
Johan Hedberga1f4c312014-02-27 14:05:41 +0200592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800593 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700661{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669}
670
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
674{
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
679
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
682
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700688 return -EINVAL;
689
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700692
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700696}
697
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
Marcel Holtmann92202182013-10-18 16:38:10 -0700704
Marcel Holtmann3698d702014-02-18 21:54:49 -0800705static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct list_head *p, *n;
709
710 hci_dev_lock(hdev);
711 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
712 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
713 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
714 &irk->bdaddr, irk->addr_type,
715 16, irk->val, &irk->rpa);
716 }
717 hci_dev_unlock(hdev);
718
719 return 0;
720}
721
722static int identity_resolving_keys_open(struct inode *inode, struct file *file)
723{
724 return single_open(file, identity_resolving_keys_show,
725 inode->i_private);
726}
727
728static const struct file_operations identity_resolving_keys_fops = {
729 .open = identity_resolving_keys_open,
730 .read = seq_read,
731 .llseek = seq_lseek,
732 .release = single_release,
733};
734
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700735static int long_term_keys_show(struct seq_file *f, void *ptr)
736{
737 struct hci_dev *hdev = f->private;
738 struct list_head *p, *n;
739
740 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800741 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700742 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800743 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700744 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
745 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
746 8, ltk->rand, 16, ltk->val);
747 }
748 hci_dev_unlock(hdev);
749
750 return 0;
751}
752
753static int long_term_keys_open(struct inode *inode, struct file *file)
754{
755 return single_open(file, long_term_keys_show, inode->i_private);
756}
757
758static const struct file_operations long_term_keys_fops = {
759 .open = long_term_keys_open,
760 .read = seq_read,
761 .llseek = seq_lseek,
762 .release = single_release,
763};
764
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700765static int conn_min_interval_set(void *data, u64 val)
766{
767 struct hci_dev *hdev = data;
768
769 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
770 return -EINVAL;
771
772 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700773 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700774 hci_dev_unlock(hdev);
775
776 return 0;
777}
778
779static int conn_min_interval_get(void *data, u64 *val)
780{
781 struct hci_dev *hdev = data;
782
783 hci_dev_lock(hdev);
784 *val = hdev->le_conn_min_interval;
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
791 conn_min_interval_set, "%llu\n");
792
793static int conn_max_interval_set(void *data, u64 val)
794{
795 struct hci_dev *hdev = data;
796
797 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
798 return -EINVAL;
799
800 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700801 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700802 hci_dev_unlock(hdev);
803
804 return 0;
805}
806
807static int conn_max_interval_get(void *data, u64 *val)
808{
809 struct hci_dev *hdev = data;
810
811 hci_dev_lock(hdev);
812 *val = hdev->le_conn_max_interval;
813 hci_dev_unlock(hdev);
814
815 return 0;
816}
817
818DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
819 conn_max_interval_set, "%llu\n");
820
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800821static int adv_channel_map_set(void *data, u64 val)
822{
823 struct hci_dev *hdev = data;
824
825 if (val < 0x01 || val > 0x07)
826 return -EINVAL;
827
828 hci_dev_lock(hdev);
829 hdev->le_adv_channel_map = val;
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int adv_channel_map_get(void *data, u64 *val)
836{
837 struct hci_dev *hdev = data;
838
839 hci_dev_lock(hdev);
840 *val = hdev->le_adv_channel_map;
841 hci_dev_unlock(hdev);
842
843 return 0;
844}
845
846DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
847 adv_channel_map_set, "%llu\n");
848
Jukka Rissanen89863102013-12-11 17:05:38 +0200849static ssize_t lowpan_read(struct file *file, char __user *user_buf,
850 size_t count, loff_t *ppos)
851{
852 struct hci_dev *hdev = file->private_data;
853 char buf[3];
854
855 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
856 buf[1] = '\n';
857 buf[2] = '\0';
858 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
859}
860
861static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
862 size_t count, loff_t *position)
863{
864 struct hci_dev *hdev = fp->private_data;
865 bool enable;
866 char buf[32];
867 size_t buf_size = min(count, (sizeof(buf)-1));
868
869 if (copy_from_user(buf, user_buffer, buf_size))
870 return -EFAULT;
871
872 buf[buf_size] = '\0';
873
874 if (strtobool(buf, &enable) < 0)
875 return -EINVAL;
876
877 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
878 return -EALREADY;
879
880 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
881
882 return count;
883}
884
885static const struct file_operations lowpan_debugfs_fops = {
886 .open = simple_open,
887 .read = lowpan_read,
888 .write = lowpan_write,
889 .llseek = default_llseek,
890};
891
Andre Guedes7d474e02014-02-26 20:21:54 -0300892static int le_auto_conn_show(struct seq_file *sf, void *ptr)
893{
894 struct hci_dev *hdev = sf->private;
895 struct hci_conn_params *p;
896
897 hci_dev_lock(hdev);
898
899 list_for_each_entry(p, &hdev->le_conn_params, list) {
900 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
901 p->auto_connect);
902 }
903
904 hci_dev_unlock(hdev);
905
906 return 0;
907}
908
909static int le_auto_conn_open(struct inode *inode, struct file *file)
910{
911 return single_open(file, le_auto_conn_show, inode->i_private);
912}
913
914static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
915 size_t count, loff_t *offset)
916{
917 struct seq_file *sf = file->private_data;
918 struct hci_dev *hdev = sf->private;
919 u8 auto_connect = 0;
920 bdaddr_t addr;
921 u8 addr_type;
922 char *buf;
923 int err = 0;
924 int n;
925
926 /* Don't allow partial write */
927 if (*offset != 0)
928 return -EINVAL;
929
930 if (count < 3)
931 return -EINVAL;
932
933 buf = kzalloc(count, GFP_KERNEL);
934 if (!buf)
935 return -ENOMEM;
936
937 if (copy_from_user(buf, data, count)) {
938 err = -EFAULT;
939 goto done;
940 }
941
942 if (memcmp(buf, "add", 3) == 0) {
943 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
944 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
945 &addr.b[1], &addr.b[0], &addr_type,
946 &auto_connect);
947
948 if (n < 7) {
949 err = -EINVAL;
950 goto done;
951 }
952
953 hci_dev_lock(hdev);
954 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
955 hdev->le_conn_min_interval,
956 hdev->le_conn_max_interval);
957 hci_dev_unlock(hdev);
958
959 if (err)
960 goto done;
961 } else if (memcmp(buf, "del", 3) == 0) {
962 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
963 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
964 &addr.b[1], &addr.b[0], &addr_type);
965
966 if (n < 7) {
967 err = -EINVAL;
968 goto done;
969 }
970
971 hci_dev_lock(hdev);
972 hci_conn_params_del(hdev, &addr, addr_type);
973 hci_dev_unlock(hdev);
974 } else if (memcmp(buf, "clr", 3) == 0) {
975 hci_dev_lock(hdev);
976 hci_conn_params_clear(hdev);
977 hci_pend_le_conns_clear(hdev);
978 hci_update_background_scan(hdev);
979 hci_dev_unlock(hdev);
980 } else {
981 err = -EINVAL;
982 }
983
984done:
985 kfree(buf);
986
987 if (err)
988 return err;
989 else
990 return count;
991}
992
993static const struct file_operations le_auto_conn_fops = {
994 .open = le_auto_conn_open,
995 .read = seq_read,
996 .write = le_auto_conn_write,
997 .llseek = seq_lseek,
998 .release = single_release,
999};
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001/* ---- HCI requests ---- */
1002
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 if (hdev->req_status == HCI_REQ_PEND) {
1008 hdev->req_result = result;
1009 hdev->req_status = HCI_REQ_DONE;
1010 wake_up_interruptible(&hdev->req_wait_q);
1011 }
1012}
1013
1014static void hci_req_cancel(struct hci_dev *hdev, int err)
1015{
1016 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1017
1018 if (hdev->req_status == HCI_REQ_PEND) {
1019 hdev->req_result = err;
1020 hdev->req_status = HCI_REQ_CANCELED;
1021 wake_up_interruptible(&hdev->req_wait_q);
1022 }
1023}
1024
Fengguang Wu77a63e02013-04-20 16:24:31 +03001025static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1026 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001027{
1028 struct hci_ev_cmd_complete *ev;
1029 struct hci_event_hdr *hdr;
1030 struct sk_buff *skb;
1031
1032 hci_dev_lock(hdev);
1033
1034 skb = hdev->recv_evt;
1035 hdev->recv_evt = NULL;
1036
1037 hci_dev_unlock(hdev);
1038
1039 if (!skb)
1040 return ERR_PTR(-ENODATA);
1041
1042 if (skb->len < sizeof(*hdr)) {
1043 BT_ERR("Too short HCI event");
1044 goto failed;
1045 }
1046
1047 hdr = (void *) skb->data;
1048 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1049
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001050 if (event) {
1051 if (hdr->evt != event)
1052 goto failed;
1053 return skb;
1054 }
1055
Johan Hedberg75e84b72013-04-02 13:35:04 +03001056 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1057 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1058 goto failed;
1059 }
1060
1061 if (skb->len < sizeof(*ev)) {
1062 BT_ERR("Too short cmd_complete event");
1063 goto failed;
1064 }
1065
1066 ev = (void *) skb->data;
1067 skb_pull(skb, sizeof(*ev));
1068
1069 if (opcode == __le16_to_cpu(ev->opcode))
1070 return skb;
1071
1072 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1073 __le16_to_cpu(ev->opcode));
1074
1075failed:
1076 kfree_skb(skb);
1077 return ERR_PTR(-ENODATA);
1078}
1079
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001080struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001081 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001082{
1083 DECLARE_WAITQUEUE(wait, current);
1084 struct hci_request req;
1085 int err = 0;
1086
1087 BT_DBG("%s", hdev->name);
1088
1089 hci_req_init(&req, hdev);
1090
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001091 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001092
1093 hdev->req_status = HCI_REQ_PEND;
1094
1095 err = hci_req_run(&req, hci_req_sync_complete);
1096 if (err < 0)
1097 return ERR_PTR(err);
1098
1099 add_wait_queue(&hdev->req_wait_q, &wait);
1100 set_current_state(TASK_INTERRUPTIBLE);
1101
1102 schedule_timeout(timeout);
1103
1104 remove_wait_queue(&hdev->req_wait_q, &wait);
1105
1106 if (signal_pending(current))
1107 return ERR_PTR(-EINTR);
1108
1109 switch (hdev->req_status) {
1110 case HCI_REQ_DONE:
1111 err = -bt_to_errno(hdev->req_result);
1112 break;
1113
1114 case HCI_REQ_CANCELED:
1115 err = -hdev->req_result;
1116 break;
1117
1118 default:
1119 err = -ETIMEDOUT;
1120 break;
1121 }
1122
1123 hdev->req_status = hdev->req_result = 0;
1124
1125 BT_DBG("%s end: err %d", hdev->name, err);
1126
1127 if (err < 0)
1128 return ERR_PTR(err);
1129
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001130 return hci_get_cmd_complete(hdev, opcode, event);
1131}
1132EXPORT_SYMBOL(__hci_cmd_sync_ev);
1133
1134struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001135 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001136{
1137 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001138}
1139EXPORT_SYMBOL(__hci_cmd_sync);
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001142static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143 void (*func)(struct hci_request *req,
1144 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001145 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001147 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 DECLARE_WAITQUEUE(wait, current);
1149 int err = 0;
1150
1151 BT_DBG("%s start", hdev->name);
1152
Johan Hedberg42c6b122013-03-05 20:37:49 +02001153 hci_req_init(&req, hdev);
1154
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 hdev->req_status = HCI_REQ_PEND;
1156
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001158
Johan Hedberg42c6b122013-03-05 20:37:49 +02001159 err = hci_req_run(&req, hci_req_sync_complete);
1160 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001161 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001162
1163 /* ENODATA means the HCI request command queue is empty.
1164 * This can happen when a request with conditionals doesn't
1165 * trigger any commands to be sent. This is normal behavior
1166 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 */
Andre Guedes920c8302013-03-08 11:20:15 -03001168 if (err == -ENODATA)
1169 return 0;
1170
1171 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001172 }
1173
Andre Guedesbc4445c2013-03-08 11:20:13 -03001174 add_wait_queue(&hdev->req_wait_q, &wait);
1175 set_current_state(TASK_INTERRUPTIBLE);
1176
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 schedule_timeout(timeout);
1178
1179 remove_wait_queue(&hdev->req_wait_q, &wait);
1180
1181 if (signal_pending(current))
1182 return -EINTR;
1183
1184 switch (hdev->req_status) {
1185 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001186 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 break;
1188
1189 case HCI_REQ_CANCELED:
1190 err = -hdev->req_result;
1191 break;
1192
1193 default:
1194 err = -ETIMEDOUT;
1195 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Johan Hedberga5040ef2011-01-10 13:28:59 +02001198 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 BT_DBG("%s end: err %d", hdev->name, err);
1201
1202 return err;
1203}
1204
Johan Hedberg01178cd2013-03-05 20:37:41 +02001205static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 void (*req)(struct hci_request *req,
1207 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001208 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
1210 int ret;
1211
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001212 if (!test_bit(HCI_UP, &hdev->flags))
1213 return -ENETDOWN;
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 /* Serialize all requests */
1216 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001217 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 hci_req_unlock(hdev);
1219
1220 return ret;
1221}
1222
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 set_bit(HCI_RESET, &req->hdev->flags);
1229 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001234 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001239 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241
1242 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244}
1245
Johan Hedberg42c6b122013-03-05 20:37:49 +02001246static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001251 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001252
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001253 /* Read Local Supported Commands */
1254 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1255
1256 /* Read Local Supported Features */
1257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001259 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001261
1262 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001264
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001265 /* Read Flow Control Mode */
1266 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1267
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001268 /* Read Location Data */
1269 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001270}
1271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001273{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001275
1276 BT_DBG("%s %ld", hdev->name, opt);
1277
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001278 /* Reset */
1279 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001281
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001282 switch (hdev->dev_type) {
1283 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001285 break;
1286
1287 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001289 break;
1290
1291 default:
1292 BT_ERR("Unknown device type %d", hdev->dev_type);
1293 break;
1294 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001295}
1296
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001298{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001299 struct hci_dev *hdev = req->hdev;
1300
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301 __le16 param;
1302 __u8 flt_type;
1303
1304 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001306
1307 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001309
1310 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312
1313 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001315
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001316 /* Read Number of Supported IAC */
1317 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1318
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001319 /* Read Current IAC LAP */
1320 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1321
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322 /* Clear Event Filters */
1323 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001325
1326 /* Connection accept timeout ~20 secs */
1327 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001330 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1331 * but it does not support page scan related HCI commands.
1332 */
1333 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1335 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1336 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337}
1338
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001341 struct hci_dev *hdev = req->hdev;
1342
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345
1346 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001349 /* Read LE Supported States */
1350 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1351
Johan Hedberg2177bab2013-03-05 20:37:43 +02001352 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001353 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001354
1355 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001356 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001357
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001358 /* Clear LE White List */
1359 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001360
1361 /* LE-only controllers have LE implicitly enabled */
1362 if (!lmp_bredr_capable(hdev))
1363 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364}
1365
1366static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1367{
1368 if (lmp_ext_inq_capable(hdev))
1369 return 0x02;
1370
1371 if (lmp_inq_rssi_capable(hdev))
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1375 hdev->lmp_subver == 0x0757)
1376 return 0x01;
1377
1378 if (hdev->manufacturer == 15) {
1379 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1380 return 0x01;
1381 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1382 return 0x01;
1383 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1384 return 0x01;
1385 }
1386
1387 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1388 hdev->lmp_subver == 0x1805)
1389 return 0x01;
1390
1391 return 0x00;
1392}
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395{
1396 u8 mode;
1397
Johan Hedberg42c6b122013-03-05 20:37:49 +02001398 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399
Johan Hedberg42c6b122013-03-05 20:37:49 +02001400 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001401}
1402
Johan Hedberg42c6b122013-03-05 20:37:49 +02001403static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001404{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 struct hci_dev *hdev = req->hdev;
1406
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407 /* The second byte is 0xff instead of 0x9f (two reserved bits
1408 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1409 * command otherwise.
1410 */
1411 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1412
1413 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1414 * any event mask for pre 1.2 devices.
1415 */
1416 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1417 return;
1418
1419 if (lmp_bredr_capable(hdev)) {
1420 events[4] |= 0x01; /* Flow Specification Complete */
1421 events[4] |= 0x02; /* Inquiry Result with RSSI */
1422 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1423 events[5] |= 0x08; /* Synchronous Connection Complete */
1424 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001425 } else {
1426 /* Use a different default for LE-only devices */
1427 memset(events, 0, sizeof(events));
1428 events[0] |= 0x10; /* Disconnection Complete */
1429 events[0] |= 0x80; /* Encryption Change */
1430 events[1] |= 0x08; /* Read Remote Version Information Complete */
1431 events[1] |= 0x20; /* Command Complete */
1432 events[1] |= 0x40; /* Command Status */
1433 events[1] |= 0x80; /* Hardware Error */
1434 events[2] |= 0x04; /* Number of Completed Packets */
1435 events[3] |= 0x02; /* Data Buffer Overflow */
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001437 }
1438
1439 if (lmp_inq_rssi_capable(hdev))
1440 events[4] |= 0x02; /* Inquiry Result with RSSI */
1441
1442 if (lmp_sniffsubr_capable(hdev))
1443 events[5] |= 0x20; /* Sniff Subrating */
1444
1445 if (lmp_pause_enc_capable(hdev))
1446 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1447
1448 if (lmp_ext_inq_capable(hdev))
1449 events[5] |= 0x40; /* Extended Inquiry Result */
1450
1451 if (lmp_no_flush_capable(hdev))
1452 events[7] |= 0x01; /* Enhanced Flush Complete */
1453
1454 if (lmp_lsto_capable(hdev))
1455 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1456
1457 if (lmp_ssp_capable(hdev)) {
1458 events[6] |= 0x01; /* IO Capability Request */
1459 events[6] |= 0x02; /* IO Capability Response */
1460 events[6] |= 0x04; /* User Confirmation Request */
1461 events[6] |= 0x08; /* User Passkey Request */
1462 events[6] |= 0x10; /* Remote OOB Data Request */
1463 events[6] |= 0x20; /* Simple Pairing Complete */
1464 events[7] |= 0x04; /* User Passkey Notification */
1465 events[7] |= 0x08; /* Keypress Notification */
1466 events[7] |= 0x10; /* Remote Host Supported
1467 * Features Notification
1468 */
1469 }
1470
1471 if (lmp_le_capable(hdev))
1472 events[7] |= 0x20; /* LE Meta-Event */
1473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475
1476 if (lmp_le_capable(hdev)) {
1477 memset(events, 0, sizeof(events));
1478 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1480 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 }
1482}
1483
Johan Hedberg42c6b122013-03-05 20:37:49 +02001484static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001485{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 struct hci_dev *hdev = req->hdev;
1487
Johan Hedberg2177bab2013-03-05 20:37:43 +02001488 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001489 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001490 else
1491 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001492
1493 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001494 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001495
Johan Hedberg42c6b122013-03-05 20:37:49 +02001496 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001498 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1499 * local supported commands HCI command.
1500 */
1501 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001502 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503
1504 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001505 /* When SSP is available, then the host features page
1506 * should also be available as well. However some
1507 * controllers list the max_page as 0 as long as SSP
1508 * has not been enabled. To achieve proper debugging
1509 * output, force the minimum max_page to 1 at least.
1510 */
1511 hdev->max_page = 0x01;
1512
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1514 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001515 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1516 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517 } else {
1518 struct hci_cp_write_eir cp;
1519
1520 memset(hdev->eir, 0, sizeof(hdev->eir));
1521 memset(&cp, 0, sizeof(cp));
1522
Johan Hedberg42c6b122013-03-05 20:37:49 +02001523 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001524 }
1525 }
1526
1527 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529
1530 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001531 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001532
1533 if (lmp_ext_feat_capable(hdev)) {
1534 struct hci_cp_read_local_ext_features cp;
1535
1536 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001537 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1538 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539 }
1540
1541 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1542 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001543 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1544 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001545 }
1546}
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001551 struct hci_cp_write_def_link_policy cp;
1552 u16 link_policy = 0;
1553
1554 if (lmp_rswitch_capable(hdev))
1555 link_policy |= HCI_LP_RSWITCH;
1556 if (lmp_hold_capable(hdev))
1557 link_policy |= HCI_LP_HOLD;
1558 if (lmp_sniff_capable(hdev))
1559 link_policy |= HCI_LP_SNIFF;
1560 if (lmp_park_capable(hdev))
1561 link_policy |= HCI_LP_PARK;
1562
1563 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565}
1566
Johan Hedberg42c6b122013-03-05 20:37:49 +02001567static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001568{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 struct hci_cp_write_le_host_supported cp;
1571
Johan Hedbergc73eee92013-04-19 18:35:21 +03001572 /* LE-only devices do not support explicit enablement */
1573 if (!lmp_bredr_capable(hdev))
1574 return;
1575
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 memset(&cp, 0, sizeof(cp));
1577
1578 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1579 cp.le = 0x01;
1580 cp.simul = lmp_le_br_capable(hdev);
1581 }
1582
1583 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001584 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1585 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001586}
1587
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001588static void hci_set_event_mask_page_2(struct hci_request *req)
1589{
1590 struct hci_dev *hdev = req->hdev;
1591 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1592
1593 /* If Connectionless Slave Broadcast master role is supported
1594 * enable all necessary events for it.
1595 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001596 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001597 events[1] |= 0x40; /* Triggered Clock Capture */
1598 events[1] |= 0x80; /* Synchronization Train Complete */
1599 events[2] |= 0x10; /* Slave Page Response Timeout */
1600 events[2] |= 0x20; /* CSB Channel Map Change */
1601 }
1602
1603 /* If Connectionless Slave Broadcast slave role is supported
1604 * enable all necessary events for it.
1605 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001606 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001607 events[2] |= 0x01; /* Synchronization Train Received */
1608 events[2] |= 0x02; /* CSB Receive */
1609 events[2] |= 0x04; /* CSB Timeout */
1610 events[2] |= 0x08; /* Truncated Page Complete */
1611 }
1612
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001613 /* Enable Authenticated Payload Timeout Expired event if supported */
1614 if (lmp_ping_capable(hdev))
1615 events[2] |= 0x80;
1616
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001617 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1618}
1619
Johan Hedberg42c6b122013-03-05 20:37:49 +02001620static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001621{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001622 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001623 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001624
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001625 /* Some Broadcom based Bluetooth controllers do not support the
1626 * Delete Stored Link Key command. They are clearly indicating its
1627 * absence in the bit mask of supported commands.
1628 *
1629 * Check the supported commands and only if the the command is marked
1630 * as supported send it. If not supported assume that the controller
1631 * does not have actual support for stored link keys which makes this
1632 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001633 *
1634 * Some controllers indicate that they support handling deleting
1635 * stored link keys, but they don't. The quirk lets a driver
1636 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001637 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001638 if (hdev->commands[6] & 0x80 &&
1639 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001640 struct hci_cp_delete_stored_link_key cp;
1641
1642 bacpy(&cp.bdaddr, BDADDR_ANY);
1643 cp.delete_all = 0x01;
1644 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1645 sizeof(cp), &cp);
1646 }
1647
Johan Hedberg2177bab2013-03-05 20:37:43 +02001648 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001650
Johan Hedberg7bf32042014-02-23 19:42:29 +02001651 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001653
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1657
1658 cp.page = p;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660 sizeof(cp), &cp);
1661 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001662}
1663
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001664static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665{
1666 struct hci_dev *hdev = req->hdev;
1667
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1671
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001672 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001673 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001675
1676 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001677 if ((lmp_sc_capable(hdev) ||
1678 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680 u8 support = 0x01;
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1683 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001684}
1685
Johan Hedberg2177bab2013-03-05 20:37:43 +02001686static int __hci_init(struct hci_dev *hdev)
1687{
1688 int err;
1689
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691 if (err < 0)
1692 return err;
1693
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1696 */
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699 &dut_mode_fops);
1700 }
1701
Johan Hedberg2177bab2013-03-05 20:37:43 +02001702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1704 * first stage init.
1705 */
1706 if (hdev->dev_type != HCI_BREDR)
1707 return 0;
1708
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710 if (err < 0)
1711 return err;
1712
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714 if (err < 0)
1715 return err;
1716
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718 if (err < 0)
1719 return err;
1720
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1723 */
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725 return 0;
1726
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001735 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1736
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001737 if (lmp_bredr_capable(hdev)) {
1738 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1739 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001740 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1741 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001742 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1743 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001744 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1745 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001746 }
1747
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001748 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001749 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1750 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001751 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1752 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001753 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1754 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001755 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1756 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001757 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001758
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001759 if (lmp_sniff_capable(hdev)) {
1760 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1761 hdev, &idle_timeout_fops);
1762 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1763 hdev, &sniff_min_interval_fops);
1764 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1765 hdev, &sniff_max_interval_fops);
1766 }
1767
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001768 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001769 debugfs_create_file("identity", 0400, hdev->debugfs,
1770 hdev, &identity_fops);
1771 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1772 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001773 debugfs_create_file("random_address", 0444, hdev->debugfs,
1774 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001775 debugfs_create_file("static_address", 0444, hdev->debugfs,
1776 hdev, &static_address_fops);
1777
1778 /* For controllers with a public address, provide a debug
1779 * option to force the usage of the configured static
1780 * address. By default the public address is used.
1781 */
1782 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1783 debugfs_create_file("force_static_address", 0644,
1784 hdev->debugfs, hdev,
1785 &force_static_address_fops);
1786
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001787 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1788 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001789 debugfs_create_file("identity_resolving_keys", 0400,
1790 hdev->debugfs, hdev,
1791 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001792 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1793 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001794 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1795 hdev, &conn_min_interval_fops);
1796 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1797 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001798 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1799 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001800 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1801 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001802 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1803 &le_auto_conn_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001804 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001805
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001806 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001807}
1808
Johan Hedberg42c6b122013-03-05 20:37:49 +02001809static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
1811 __u8 scan = opt;
1812
Johan Hedberg42c6b122013-03-05 20:37:49 +02001813 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001816 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817}
1818
Johan Hedberg42c6b122013-03-05 20:37:49 +02001819static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
1821 __u8 auth = opt;
1822
Johan Hedberg42c6b122013-03-05 20:37:49 +02001823 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001826 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827}
1828
Johan Hedberg42c6b122013-03-05 20:37:49 +02001829static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830{
1831 __u8 encrypt = opt;
1832
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001835 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001836 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837}
1838
Johan Hedberg42c6b122013-03-05 20:37:49 +02001839static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001840{
1841 __le16 policy = cpu_to_le16(opt);
1842
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001844
1845 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001846 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001847}
1848
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001849/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 * Device is held on return. */
1851struct hci_dev *hci_dev_get(int index)
1852{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001853 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
1855 BT_DBG("%d", index);
1856
1857 if (index < 0)
1858 return NULL;
1859
1860 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001861 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 if (d->id == index) {
1863 hdev = hci_dev_hold(d);
1864 break;
1865 }
1866 }
1867 read_unlock(&hci_dev_list_lock);
1868 return hdev;
1869}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
1871/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001872
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001873bool hci_discovery_active(struct hci_dev *hdev)
1874{
1875 struct discovery_state *discov = &hdev->discovery;
1876
Andre Guedes6fbe1952012-02-03 17:47:58 -03001877 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001878 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001879 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001880 return true;
1881
Andre Guedes6fbe1952012-02-03 17:47:58 -03001882 default:
1883 return false;
1884 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001885}
1886
Johan Hedbergff9ef572012-01-04 14:23:45 +02001887void hci_discovery_set_state(struct hci_dev *hdev, int state)
1888{
1889 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1890
1891 if (hdev->discovery.state == state)
1892 return;
1893
1894 switch (state) {
1895 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001896 hci_update_background_scan(hdev);
1897
Andre Guedes7b99b652012-02-13 15:41:02 -03001898 if (hdev->discovery.state != DISCOVERY_STARTING)
1899 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001900 break;
1901 case DISCOVERY_STARTING:
1902 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001903 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001904 mgmt_discovering(hdev, 1);
1905 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001906 case DISCOVERY_RESOLVING:
1907 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001908 case DISCOVERY_STOPPING:
1909 break;
1910 }
1911
1912 hdev->discovery.state = state;
1913}
1914
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001915void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
Johan Hedberg30883512012-01-04 14:16:21 +02001917 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001918 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
Johan Hedberg561aafb2012-01-04 13:31:59 +02001920 list_for_each_entry_safe(p, n, &cache->all, all) {
1921 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001922 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001924
1925 INIT_LIST_HEAD(&cache->unknown);
1926 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927}
1928
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001929struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1930 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931{
Johan Hedberg30883512012-01-04 14:16:21 +02001932 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 struct inquiry_entry *e;
1934
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001935 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Johan Hedberg561aafb2012-01-04 13:31:59 +02001937 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001939 return e;
1940 }
1941
1942 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943}
1944
Johan Hedberg561aafb2012-01-04 13:31:59 +02001945struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001946 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001947{
Johan Hedberg30883512012-01-04 14:16:21 +02001948 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001949 struct inquiry_entry *e;
1950
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001951 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001952
1953 list_for_each_entry(e, &cache->unknown, list) {
1954 if (!bacmp(&e->data.bdaddr, bdaddr))
1955 return e;
1956 }
1957
1958 return NULL;
1959}
1960
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001961struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001962 bdaddr_t *bdaddr,
1963 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001964{
1965 struct discovery_state *cache = &hdev->discovery;
1966 struct inquiry_entry *e;
1967
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001968 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001969
1970 list_for_each_entry(e, &cache->resolve, list) {
1971 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1972 return e;
1973 if (!bacmp(&e->data.bdaddr, bdaddr))
1974 return e;
1975 }
1976
1977 return NULL;
1978}
1979
Johan Hedberga3d4e202012-01-09 00:53:02 +02001980void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001981 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001982{
1983 struct discovery_state *cache = &hdev->discovery;
1984 struct list_head *pos = &cache->resolve;
1985 struct inquiry_entry *p;
1986
1987 list_del(&ie->list);
1988
1989 list_for_each_entry(p, &cache->resolve, list) {
1990 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001991 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001992 break;
1993 pos = &p->list;
1994 }
1995
1996 list_add(&ie->list, pos);
1997}
1998
Johan Hedberg31754052012-01-04 13:39:52 +02001999bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002000 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001{
Johan Hedberg30883512012-01-04 14:16:21 +02002002 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002003 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002005 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
Szymon Janc2b2fec42012-11-20 11:38:54 +01002007 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2008
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002009 if (ssp)
2010 *ssp = data->ssp_mode;
2011
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002012 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002013 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002014 if (ie->data.ssp_mode && ssp)
2015 *ssp = true;
2016
Johan Hedberga3d4e202012-01-09 00:53:02 +02002017 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002018 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002019 ie->data.rssi = data->rssi;
2020 hci_inquiry_cache_update_resolve(hdev, ie);
2021 }
2022
Johan Hedberg561aafb2012-01-04 13:31:59 +02002023 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002024 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002025
Johan Hedberg561aafb2012-01-04 13:31:59 +02002026 /* Entry not in the cache. Add new one. */
2027 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2028 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002029 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002030
2031 list_add(&ie->all, &cache->all);
2032
2033 if (name_known) {
2034 ie->name_state = NAME_KNOWN;
2035 } else {
2036 ie->name_state = NAME_NOT_KNOWN;
2037 list_add(&ie->list, &cache->unknown);
2038 }
2039
2040update:
2041 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002042 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002043 ie->name_state = NAME_KNOWN;
2044 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 }
2046
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002047 memcpy(&ie->data, data, sizeof(*data));
2048 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002050
2051 if (ie->name_state == NAME_NOT_KNOWN)
2052 return false;
2053
2054 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
2056
2057static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2058{
Johan Hedberg30883512012-01-04 14:16:21 +02002059 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 struct inquiry_info *info = (struct inquiry_info *) buf;
2061 struct inquiry_entry *e;
2062 int copied = 0;
2063
Johan Hedberg561aafb2012-01-04 13:31:59 +02002064 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002066
2067 if (copied >= num)
2068 break;
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 bacpy(&info->bdaddr, &data->bdaddr);
2071 info->pscan_rep_mode = data->pscan_rep_mode;
2072 info->pscan_period_mode = data->pscan_period_mode;
2073 info->pscan_mode = data->pscan_mode;
2074 memcpy(info->dev_class, data->dev_class, 3);
2075 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002078 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 }
2080
2081 BT_DBG("cache %p, copied %d", cache, copied);
2082 return copied;
2083}
2084
Johan Hedberg42c6b122013-03-05 20:37:49 +02002085static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086{
2087 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002088 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 struct hci_cp_inquiry cp;
2090
2091 BT_DBG("%s", hdev->name);
2092
2093 if (test_bit(HCI_INQUIRY, &hdev->flags))
2094 return;
2095
2096 /* Start Inquiry */
2097 memcpy(&cp.lap, &ir->lap, 3);
2098 cp.length = ir->length;
2099 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002100 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101}
2102
Andre Guedes3e13fa12013-03-27 20:04:56 -03002103static int wait_inquiry(void *word)
2104{
2105 schedule();
2106 return signal_pending(current);
2107}
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109int hci_inquiry(void __user *arg)
2110{
2111 __u8 __user *ptr = arg;
2112 struct hci_inquiry_req ir;
2113 struct hci_dev *hdev;
2114 int err = 0, do_inquiry = 0, max_rsp;
2115 long timeo;
2116 __u8 *buf;
2117
2118 if (copy_from_user(&ir, ptr, sizeof(ir)))
2119 return -EFAULT;
2120
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002121 hdev = hci_dev_get(ir.dev_id);
2122 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 return -ENODEV;
2124
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002125 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2126 err = -EBUSY;
2127 goto done;
2128 }
2129
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002130 if (hdev->dev_type != HCI_BREDR) {
2131 err = -EOPNOTSUPP;
2132 goto done;
2133 }
2134
Johan Hedberg56f87902013-10-02 13:43:13 +03002135 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2136 err = -EOPNOTSUPP;
2137 goto done;
2138 }
2139
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002140 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002141 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002142 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002143 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 do_inquiry = 1;
2145 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002146 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Marcel Holtmann04837f62006-07-03 10:02:33 +02002148 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002149
2150 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002151 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2152 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002153 if (err < 0)
2154 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002155
2156 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2157 * cleared). If it is interrupted by a signal, return -EINTR.
2158 */
2159 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2160 TASK_INTERRUPTIBLE))
2161 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002164 /* for unlimited number of responses we will use buffer with
2165 * 255 entries
2166 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2168
2169 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2170 * copy it to the user space.
2171 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002172 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002173 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 err = -ENOMEM;
2175 goto done;
2176 }
2177
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002178 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002180 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182 BT_DBG("num_rsp %d", ir.num_rsp);
2183
2184 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2185 ptr += sizeof(ir);
2186 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002187 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002189 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 err = -EFAULT;
2191
2192 kfree(buf);
2193
2194done:
2195 hci_dev_put(hdev);
2196 return err;
2197}
2198
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002199static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 int ret = 0;
2202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 BT_DBG("%s %p", hdev->name, hdev);
2204
2205 hci_req_lock(hdev);
2206
Johan Hovold94324962012-03-15 14:48:41 +01002207 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2208 ret = -ENODEV;
2209 goto done;
2210 }
2211
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002212 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2213 /* Check for rfkill but allow the HCI setup stage to
2214 * proceed (which in itself doesn't cause any RF activity).
2215 */
2216 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2217 ret = -ERFKILL;
2218 goto done;
2219 }
2220
2221 /* Check for valid public address or a configured static
2222 * random adddress, but let the HCI setup proceed to
2223 * be able to determine if there is a public address
2224 * or not.
2225 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002226 * In case of user channel usage, it is not important
2227 * if a public address or static random address is
2228 * available.
2229 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002230 * This check is only valid for BR/EDR controllers
2231 * since AMP controllers do not have an address.
2232 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002233 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2234 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002235 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2236 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2237 ret = -EADDRNOTAVAIL;
2238 goto done;
2239 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002240 }
2241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 if (test_bit(HCI_UP, &hdev->flags)) {
2243 ret = -EALREADY;
2244 goto done;
2245 }
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 if (hdev->open(hdev)) {
2248 ret = -EIO;
2249 goto done;
2250 }
2251
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002252 atomic_set(&hdev->cmd_cnt, 1);
2253 set_bit(HCI_INIT, &hdev->flags);
2254
2255 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2256 ret = hdev->setup(hdev);
2257
2258 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002259 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2260 set_bit(HCI_RAW, &hdev->flags);
2261
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002262 if (!test_bit(HCI_RAW, &hdev->flags) &&
2263 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002264 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 }
2266
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002267 clear_bit(HCI_INIT, &hdev->flags);
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 if (!ret) {
2270 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002271 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 set_bit(HCI_UP, &hdev->flags);
2273 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002274 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002275 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002276 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002277 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002278 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002279 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002280 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002281 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002283 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002284 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002285 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
2287 skb_queue_purge(&hdev->cmd_q);
2288 skb_queue_purge(&hdev->rx_q);
2289
2290 if (hdev->flush)
2291 hdev->flush(hdev);
2292
2293 if (hdev->sent_cmd) {
2294 kfree_skb(hdev->sent_cmd);
2295 hdev->sent_cmd = NULL;
2296 }
2297
2298 hdev->close(hdev);
2299 hdev->flags = 0;
2300 }
2301
2302done:
2303 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 return ret;
2305}
2306
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002307/* ---- HCI ioctl helpers ---- */
2308
2309int hci_dev_open(__u16 dev)
2310{
2311 struct hci_dev *hdev;
2312 int err;
2313
2314 hdev = hci_dev_get(dev);
2315 if (!hdev)
2316 return -ENODEV;
2317
Johan Hedberge1d08f42013-10-01 22:44:50 +03002318 /* We need to ensure that no other power on/off work is pending
2319 * before proceeding to call hci_dev_do_open. This is
2320 * particularly important if the setup procedure has not yet
2321 * completed.
2322 */
2323 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2324 cancel_delayed_work(&hdev->power_off);
2325
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002326 /* After this call it is guaranteed that the setup procedure
2327 * has finished. This means that error conditions like RFKILL
2328 * or no valid public or static random address apply.
2329 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002330 flush_workqueue(hdev->req_workqueue);
2331
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002332 err = hci_dev_do_open(hdev);
2333
2334 hci_dev_put(hdev);
2335
2336 return err;
2337}
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339static int hci_dev_do_close(struct hci_dev *hdev)
2340{
2341 BT_DBG("%s %p", hdev->name, hdev);
2342
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002343 cancel_delayed_work(&hdev->power_off);
2344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 hci_req_cancel(hdev, ENODEV);
2346 hci_req_lock(hdev);
2347
2348 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002349 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 hci_req_unlock(hdev);
2351 return 0;
2352 }
2353
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002354 /* Flush RX and TX works */
2355 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002356 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002358 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002359 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002360 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002361 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002362 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002363 }
2364
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002365 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002366 cancel_delayed_work(&hdev->service_cache);
2367
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002368 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002369
2370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2371 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002372
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002373 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002374 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002376 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002377 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
2379 hci_notify(hdev, HCI_DEV_DOWN);
2380
2381 if (hdev->flush)
2382 hdev->flush(hdev);
2383
2384 /* Reset device */
2385 skb_queue_purge(&hdev->cmd_q);
2386 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002387 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002388 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002389 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002391 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 clear_bit(HCI_INIT, &hdev->flags);
2393 }
2394
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002395 /* flush cmd work */
2396 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
2398 /* Drop queues */
2399 skb_queue_purge(&hdev->rx_q);
2400 skb_queue_purge(&hdev->cmd_q);
2401 skb_queue_purge(&hdev->raw_q);
2402
2403 /* Drop last sent command */
2404 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002405 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 kfree_skb(hdev->sent_cmd);
2407 hdev->sent_cmd = NULL;
2408 }
2409
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002410 kfree_skb(hdev->recv_evt);
2411 hdev->recv_evt = NULL;
2412
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 /* After this point our queues are empty
2414 * and no tasks are scheduled. */
2415 hdev->close(hdev);
2416
Johan Hedberg35b973c2013-03-15 17:06:59 -05002417 /* Clear flags */
2418 hdev->flags = 0;
2419 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2420
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002421 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2422 if (hdev->dev_type == HCI_BREDR) {
2423 hci_dev_lock(hdev);
2424 mgmt_powered(hdev, 0);
2425 hci_dev_unlock(hdev);
2426 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002427 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002428
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002429 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002430 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002431
Johan Hedberge59fda82012-02-22 18:11:53 +02002432 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002433 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002434 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002435
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 hci_req_unlock(hdev);
2437
2438 hci_dev_put(hdev);
2439 return 0;
2440}
2441
2442int hci_dev_close(__u16 dev)
2443{
2444 struct hci_dev *hdev;
2445 int err;
2446
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002447 hdev = hci_dev_get(dev);
2448 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002450
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002451 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2452 err = -EBUSY;
2453 goto done;
2454 }
2455
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002456 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2457 cancel_delayed_work(&hdev->power_off);
2458
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002460
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002461done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 hci_dev_put(hdev);
2463 return err;
2464}
2465
2466int hci_dev_reset(__u16 dev)
2467{
2468 struct hci_dev *hdev;
2469 int ret = 0;
2470
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002471 hdev = hci_dev_get(dev);
2472 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 return -ENODEV;
2474
2475 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
Marcel Holtmann808a0492013-08-26 20:57:58 -07002477 if (!test_bit(HCI_UP, &hdev->flags)) {
2478 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002482 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2483 ret = -EBUSY;
2484 goto done;
2485 }
2486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 /* Drop queues */
2488 skb_queue_purge(&hdev->rx_q);
2489 skb_queue_purge(&hdev->cmd_q);
2490
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002491 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002492 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002494 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
2496 if (hdev->flush)
2497 hdev->flush(hdev);
2498
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002499 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002500 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
2502 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002503 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
2505done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 hci_req_unlock(hdev);
2507 hci_dev_put(hdev);
2508 return ret;
2509}
2510
2511int hci_dev_reset_stat(__u16 dev)
2512{
2513 struct hci_dev *hdev;
2514 int ret = 0;
2515
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002516 hdev = hci_dev_get(dev);
2517 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 return -ENODEV;
2519
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002520 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2521 ret = -EBUSY;
2522 goto done;
2523 }
2524
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2526
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002527done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 return ret;
2530}
2531
2532int hci_dev_cmd(unsigned int cmd, void __user *arg)
2533{
2534 struct hci_dev *hdev;
2535 struct hci_dev_req dr;
2536 int err = 0;
2537
2538 if (copy_from_user(&dr, arg, sizeof(dr)))
2539 return -EFAULT;
2540
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002541 hdev = hci_dev_get(dr.dev_id);
2542 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 return -ENODEV;
2544
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002545 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2546 err = -EBUSY;
2547 goto done;
2548 }
2549
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002550 if (hdev->dev_type != HCI_BREDR) {
2551 err = -EOPNOTSUPP;
2552 goto done;
2553 }
2554
Johan Hedberg56f87902013-10-02 13:43:13 +03002555 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2556 err = -EOPNOTSUPP;
2557 goto done;
2558 }
2559
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 switch (cmd) {
2561 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002562 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2563 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 break;
2565
2566 case HCISETENCRYPT:
2567 if (!lmp_encrypt_capable(hdev)) {
2568 err = -EOPNOTSUPP;
2569 break;
2570 }
2571
2572 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2573 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002574 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2575 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 if (err)
2577 break;
2578 }
2579
Johan Hedberg01178cd2013-03-05 20:37:41 +02002580 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2581 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 break;
2583
2584 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002585 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2586 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 break;
2588
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002589 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002590 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2591 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002592 break;
2593
2594 case HCISETLINKMODE:
2595 hdev->link_mode = ((__u16) dr.dev_opt) &
2596 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2597 break;
2598
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 case HCISETPTYPE:
2600 hdev->pkt_type = (__u16) dr.dev_opt;
2601 break;
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002604 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2605 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 break;
2607
2608 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002609 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2610 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 break;
2612
2613 default:
2614 err = -EINVAL;
2615 break;
2616 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002617
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002618done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 hci_dev_put(hdev);
2620 return err;
2621}
2622
2623int hci_get_dev_list(void __user *arg)
2624{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002625 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 struct hci_dev_list_req *dl;
2627 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 int n = 0, size, err;
2629 __u16 dev_num;
2630
2631 if (get_user(dev_num, (__u16 __user *) arg))
2632 return -EFAULT;
2633
2634 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2635 return -EINVAL;
2636
2637 size = sizeof(*dl) + dev_num * sizeof(*dr);
2638
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002639 dl = kzalloc(size, GFP_KERNEL);
2640 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 return -ENOMEM;
2642
2643 dr = dl->dev_req;
2644
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002645 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002646 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002647 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002648 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002649
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002650 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2651 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 (dr + n)->dev_id = hdev->id;
2654 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002655
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 if (++n >= dev_num)
2657 break;
2658 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002659 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661 dl->dev_num = n;
2662 size = sizeof(*dl) + n * sizeof(*dr);
2663
2664 err = copy_to_user(arg, dl, size);
2665 kfree(dl);
2666
2667 return err ? -EFAULT : 0;
2668}
2669
2670int hci_get_dev_info(void __user *arg)
2671{
2672 struct hci_dev *hdev;
2673 struct hci_dev_info di;
2674 int err = 0;
2675
2676 if (copy_from_user(&di, arg, sizeof(di)))
2677 return -EFAULT;
2678
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002679 hdev = hci_dev_get(di.dev_id);
2680 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 return -ENODEV;
2682
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002683 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002684 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002685
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002686 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2687 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002688
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 strcpy(di.name, hdev->name);
2690 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002691 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 di.flags = hdev->flags;
2693 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002694 if (lmp_bredr_capable(hdev)) {
2695 di.acl_mtu = hdev->acl_mtu;
2696 di.acl_pkts = hdev->acl_pkts;
2697 di.sco_mtu = hdev->sco_mtu;
2698 di.sco_pkts = hdev->sco_pkts;
2699 } else {
2700 di.acl_mtu = hdev->le_mtu;
2701 di.acl_pkts = hdev->le_pkts;
2702 di.sco_mtu = 0;
2703 di.sco_pkts = 0;
2704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 di.link_policy = hdev->link_policy;
2706 di.link_mode = hdev->link_mode;
2707
2708 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2709 memcpy(&di.features, &hdev->features, sizeof(di.features));
2710
2711 if (copy_to_user(arg, &di, sizeof(di)))
2712 err = -EFAULT;
2713
2714 hci_dev_put(hdev);
2715
2716 return err;
2717}
2718
2719/* ---- Interface to HCI drivers ---- */
2720
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002721static int hci_rfkill_set_block(void *data, bool blocked)
2722{
2723 struct hci_dev *hdev = data;
2724
2725 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2726
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002727 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2728 return -EBUSY;
2729
Johan Hedberg5e130362013-09-13 08:58:17 +03002730 if (blocked) {
2731 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002732 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2733 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002734 } else {
2735 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002736 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002737
2738 return 0;
2739}
2740
2741static const struct rfkill_ops hci_rfkill_ops = {
2742 .set_block = hci_rfkill_set_block,
2743};
2744
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002745static void hci_power_on(struct work_struct *work)
2746{
2747 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002748 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002749
2750 BT_DBG("%s", hdev->name);
2751
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002752 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002753 if (err < 0) {
2754 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002755 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002756 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002757
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002758 /* During the HCI setup phase, a few error conditions are
2759 * ignored and they need to be checked now. If they are still
2760 * valid, it is important to turn the device back off.
2761 */
2762 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2763 (hdev->dev_type == HCI_BREDR &&
2764 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2765 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002766 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2767 hci_dev_do_close(hdev);
2768 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002769 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2770 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002771 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002772
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002773 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002774 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002775}
2776
2777static void hci_power_off(struct work_struct *work)
2778{
Johan Hedberg32435532011-11-07 22:16:04 +02002779 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002780 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002781
2782 BT_DBG("%s", hdev->name);
2783
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002784 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002785}
2786
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002787static void hci_discov_off(struct work_struct *work)
2788{
2789 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002790
2791 hdev = container_of(work, struct hci_dev, discov_off.work);
2792
2793 BT_DBG("%s", hdev->name);
2794
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002795 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002796}
2797
Johan Hedberg35f74982014-02-18 17:14:32 +02002798void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002799{
Johan Hedberg48210022013-01-27 00:31:28 +02002800 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002801
Johan Hedberg48210022013-01-27 00:31:28 +02002802 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2803 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002804 kfree(uuid);
2805 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002806}
2807
Johan Hedberg35f74982014-02-18 17:14:32 +02002808void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002809{
2810 struct list_head *p, *n;
2811
2812 list_for_each_safe(p, n, &hdev->link_keys) {
2813 struct link_key *key;
2814
2815 key = list_entry(p, struct link_key, list);
2816
2817 list_del(p);
2818 kfree(key);
2819 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002820}
2821
Johan Hedberg35f74982014-02-18 17:14:32 +02002822void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002823{
2824 struct smp_ltk *k, *tmp;
2825
2826 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2827 list_del(&k->list);
2828 kfree(k);
2829 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002830}
2831
Johan Hedberg970c4e42014-02-18 10:19:33 +02002832void hci_smp_irks_clear(struct hci_dev *hdev)
2833{
2834 struct smp_irk *k, *tmp;
2835
2836 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2837 list_del(&k->list);
2838 kfree(k);
2839 }
2840}
2841
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002842struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2843{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002844 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002845
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002846 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002847 if (bacmp(bdaddr, &k->bdaddr) == 0)
2848 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002849
2850 return NULL;
2851}
2852
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302853static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002854 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002855{
2856 /* Legacy key */
2857 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302858 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002859
2860 /* Debug keys are insecure so don't store them persistently */
2861 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302862 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002863
2864 /* Changed combination key and there's no previous one */
2865 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302866 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002867
2868 /* Security mode 3 case */
2869 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302870 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002871
2872 /* Neither local nor remote side had no-bonding as requirement */
2873 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302874 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002875
2876 /* Local side had dedicated bonding as requirement */
2877 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302878 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002879
2880 /* Remote side had dedicated bonding as requirement */
2881 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302882 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002883
2884 /* If none of the above criteria match, then don't store the key
2885 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302886 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002887}
2888
Johan Hedberg98a0b842014-01-30 19:40:00 -08002889static bool ltk_type_master(u8 type)
2890{
2891 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2892 return true;
2893
2894 return false;
2895}
2896
2897struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2898 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002899{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002900 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002901
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002902 list_for_each_entry(k, &hdev->long_term_keys, list) {
2903 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002904 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002905 continue;
2906
Johan Hedberg98a0b842014-01-30 19:40:00 -08002907 if (ltk_type_master(k->type) != master)
2908 continue;
2909
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002910 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002911 }
2912
2913 return NULL;
2914}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002915
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002916struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002917 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002918{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002919 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002920
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002921 list_for_each_entry(k, &hdev->long_term_keys, list)
2922 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002923 bacmp(bdaddr, &k->bdaddr) == 0 &&
2924 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002925 return k;
2926
2927 return NULL;
2928}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002929
Johan Hedberg970c4e42014-02-18 10:19:33 +02002930struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2931{
2932 struct smp_irk *irk;
2933
2934 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2935 if (!bacmp(&irk->rpa, rpa))
2936 return irk;
2937 }
2938
2939 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2940 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2941 bacpy(&irk->rpa, rpa);
2942 return irk;
2943 }
2944 }
2945
2946 return NULL;
2947}
2948
2949struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2950 u8 addr_type)
2951{
2952 struct smp_irk *irk;
2953
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002954 /* Identity Address must be public or static random */
2955 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2956 return NULL;
2957
Johan Hedberg970c4e42014-02-18 10:19:33 +02002958 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2959 if (addr_type == irk->addr_type &&
2960 bacmp(bdaddr, &irk->bdaddr) == 0)
2961 return irk;
2962 }
2963
2964 return NULL;
2965}
2966
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002967int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002968 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002969{
2970 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302971 u8 old_key_type;
2972 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002973
2974 old_key = hci_find_link_key(hdev, bdaddr);
2975 if (old_key) {
2976 old_key_type = old_key->type;
2977 key = old_key;
2978 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002979 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002980 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002981 if (!key)
2982 return -ENOMEM;
2983 list_add(&key->list, &hdev->link_keys);
2984 }
2985
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002986 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002987
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002988 /* Some buggy controller combinations generate a changed
2989 * combination key for legacy pairing even when there's no
2990 * previous key */
2991 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002992 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002993 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002994 if (conn)
2995 conn->key_type = type;
2996 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002997
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002998 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002999 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003000 key->pin_len = pin_len;
3001
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003002 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003003 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003004 else
3005 key->type = type;
3006
Johan Hedberg4df378a2011-04-28 11:29:03 -07003007 if (!new_key)
3008 return 0;
3009
3010 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3011
Johan Hedberg744cf192011-11-08 20:40:14 +02003012 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003013
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303014 if (conn)
3015 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003016
3017 return 0;
3018}
3019
Johan Hedbergca9142b2014-02-19 14:57:44 +02003020struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003021 u8 addr_type, u8 type, u8 authenticated,
3022 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003023{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003024 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003025 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003026
Johan Hedberg98a0b842014-01-30 19:40:00 -08003027 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003028 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003029 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003030 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003031 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003032 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003033 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003034 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003035 }
3036
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003037 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003038 key->bdaddr_type = addr_type;
3039 memcpy(key->val, tk, sizeof(key->val));
3040 key->authenticated = authenticated;
3041 key->ediv = ediv;
3042 key->enc_size = enc_size;
3043 key->type = type;
3044 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003045
Johan Hedbergca9142b2014-02-19 14:57:44 +02003046 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003047}
3048
Johan Hedbergca9142b2014-02-19 14:57:44 +02003049struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3050 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003051{
3052 struct smp_irk *irk;
3053
3054 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3055 if (!irk) {
3056 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3057 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003058 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003059
3060 bacpy(&irk->bdaddr, bdaddr);
3061 irk->addr_type = addr_type;
3062
3063 list_add(&irk->list, &hdev->identity_resolving_keys);
3064 }
3065
3066 memcpy(irk->val, val, 16);
3067 bacpy(&irk->rpa, rpa);
3068
Johan Hedbergca9142b2014-02-19 14:57:44 +02003069 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003070}
3071
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003072int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3073{
3074 struct link_key *key;
3075
3076 key = hci_find_link_key(hdev, bdaddr);
3077 if (!key)
3078 return -ENOENT;
3079
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003080 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003081
3082 list_del(&key->list);
3083 kfree(key);
3084
3085 return 0;
3086}
3087
Johan Hedberge0b2b272014-02-18 17:14:31 +02003088int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003089{
3090 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003091 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003092
3093 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003094 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003095 continue;
3096
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003097 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003098
3099 list_del(&k->list);
3100 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003101 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003102 }
3103
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003104 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003105}
3106
Johan Hedberga7ec7332014-02-18 17:14:35 +02003107void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3108{
3109 struct smp_irk *k, *tmp;
3110
Johan Hedberg668b7b12014-02-21 16:03:31 +02003111 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003112 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3113 continue;
3114
3115 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3116
3117 list_del(&k->list);
3118 kfree(k);
3119 }
3120}
3121
Ville Tervo6bd32322011-02-16 16:32:41 +02003122/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003123static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003124{
3125 struct hci_dev *hdev = (void *) arg;
3126
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003127 if (hdev->sent_cmd) {
3128 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3129 u16 opcode = __le16_to_cpu(sent->opcode);
3130
3131 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3132 } else {
3133 BT_ERR("%s command tx timeout", hdev->name);
3134 }
3135
Ville Tervo6bd32322011-02-16 16:32:41 +02003136 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003137 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003138}
3139
Szymon Janc2763eda2011-03-22 13:12:22 +01003140struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003141 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003142{
3143 struct oob_data *data;
3144
3145 list_for_each_entry(data, &hdev->remote_oob_data, list)
3146 if (bacmp(bdaddr, &data->bdaddr) == 0)
3147 return data;
3148
3149 return NULL;
3150}
3151
3152int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3153{
3154 struct oob_data *data;
3155
3156 data = hci_find_remote_oob_data(hdev, bdaddr);
3157 if (!data)
3158 return -ENOENT;
3159
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003160 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003161
3162 list_del(&data->list);
3163 kfree(data);
3164
3165 return 0;
3166}
3167
Johan Hedberg35f74982014-02-18 17:14:32 +02003168void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003169{
3170 struct oob_data *data, *n;
3171
3172 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3173 list_del(&data->list);
3174 kfree(data);
3175 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003176}
3177
Marcel Holtmann07988722014-01-10 02:07:29 -08003178int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3179 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003180{
3181 struct oob_data *data;
3182
3183 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003184 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003185 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003186 if (!data)
3187 return -ENOMEM;
3188
3189 bacpy(&data->bdaddr, bdaddr);
3190 list_add(&data->list, &hdev->remote_oob_data);
3191 }
3192
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003193 memcpy(data->hash192, hash, sizeof(data->hash192));
3194 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003195
Marcel Holtmann07988722014-01-10 02:07:29 -08003196 memset(data->hash256, 0, sizeof(data->hash256));
3197 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3198
3199 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3200
3201 return 0;
3202}
3203
3204int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3205 u8 *hash192, u8 *randomizer192,
3206 u8 *hash256, u8 *randomizer256)
3207{
3208 struct oob_data *data;
3209
3210 data = hci_find_remote_oob_data(hdev, bdaddr);
3211 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003212 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003213 if (!data)
3214 return -ENOMEM;
3215
3216 bacpy(&data->bdaddr, bdaddr);
3217 list_add(&data->list, &hdev->remote_oob_data);
3218 }
3219
3220 memcpy(data->hash192, hash192, sizeof(data->hash192));
3221 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3222
3223 memcpy(data->hash256, hash256, sizeof(data->hash256));
3224 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3225
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003226 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003227
3228 return 0;
3229}
3230
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003231struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3232 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003233{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003234 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003235
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003236 list_for_each_entry(b, &hdev->blacklist, list) {
3237 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003238 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003239 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003240
3241 return NULL;
3242}
3243
Marcel Holtmannc9507492014-02-27 19:35:54 -08003244static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003245{
3246 struct list_head *p, *n;
3247
3248 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003249 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003250
3251 list_del(p);
3252 kfree(b);
3253 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254}
3255
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003256int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003257{
3258 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003260 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003261 return -EBADF;
3262
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003263 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003264 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003265
3266 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003267 if (!entry)
3268 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269
3270 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003271 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003272
3273 list_add(&entry->list, &hdev->blacklist);
3274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003275 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276}
3277
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003278int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003279{
3280 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003281
Johan Hedberg35f74982014-02-18 17:14:32 +02003282 if (!bacmp(bdaddr, BDADDR_ANY)) {
3283 hci_blacklist_clear(hdev);
3284 return 0;
3285 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003286
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003287 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003288 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003289 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003290
3291 list_del(&entry->list);
3292 kfree(entry);
3293
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003294 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003295}
3296
Andre Guedes15819a72014-02-03 13:56:18 -03003297/* This function requires the caller holds hdev->lock */
3298struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3299 bdaddr_t *addr, u8 addr_type)
3300{
3301 struct hci_conn_params *params;
3302
3303 list_for_each_entry(params, &hdev->le_conn_params, list) {
3304 if (bacmp(&params->addr, addr) == 0 &&
3305 params->addr_type == addr_type) {
3306 return params;
3307 }
3308 }
3309
3310 return NULL;
3311}
3312
Andre Guedescef952c2014-02-26 20:21:49 -03003313static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3314{
3315 struct hci_conn *conn;
3316
3317 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3318 if (!conn)
3319 return false;
3320
3321 if (conn->dst_type != type)
3322 return false;
3323
3324 if (conn->state != BT_CONNECTED)
3325 return false;
3326
3327 return true;
3328}
3329
Andre Guedesa9b0a042014-02-26 20:21:52 -03003330static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3331{
3332 if (addr_type == ADDR_LE_DEV_PUBLIC)
3333 return true;
3334
3335 /* Check for Random Static address type */
3336 if ((addr->b[5] & 0xc0) == 0xc0)
3337 return true;
3338
3339 return false;
3340}
3341
Andre Guedes15819a72014-02-03 13:56:18 -03003342/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003343int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3344 u8 auto_connect, u16 conn_min_interval,
3345 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003346{
3347 struct hci_conn_params *params;
3348
Andre Guedesa9b0a042014-02-26 20:21:52 -03003349 if (!is_identity_address(addr, addr_type))
3350 return -EINVAL;
3351
Andre Guedes15819a72014-02-03 13:56:18 -03003352 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003353 if (params)
3354 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003355
3356 params = kzalloc(sizeof(*params), GFP_KERNEL);
3357 if (!params) {
3358 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003359 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003360 }
3361
3362 bacpy(&params->addr, addr);
3363 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003364
3365 list_add(&params->list, &hdev->le_conn_params);
3366
3367update:
Andre Guedes15819a72014-02-03 13:56:18 -03003368 params->conn_min_interval = conn_min_interval;
3369 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003370 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003371
Andre Guedescef952c2014-02-26 20:21:49 -03003372 switch (auto_connect) {
3373 case HCI_AUTO_CONN_DISABLED:
3374 case HCI_AUTO_CONN_LINK_LOSS:
3375 hci_pend_le_conn_del(hdev, addr, addr_type);
3376 break;
3377 case HCI_AUTO_CONN_ALWAYS:
3378 if (!is_connected(hdev, addr, addr_type))
3379 hci_pend_le_conn_add(hdev, addr, addr_type);
3380 break;
3381 }
Andre Guedes15819a72014-02-03 13:56:18 -03003382
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003383 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3384 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3385 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003386
3387 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003388}
3389
3390/* This function requires the caller holds hdev->lock */
3391void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3392{
3393 struct hci_conn_params *params;
3394
3395 params = hci_conn_params_lookup(hdev, addr, addr_type);
3396 if (!params)
3397 return;
3398
Andre Guedescef952c2014-02-26 20:21:49 -03003399 hci_pend_le_conn_del(hdev, addr, addr_type);
3400
Andre Guedes15819a72014-02-03 13:56:18 -03003401 list_del(&params->list);
3402 kfree(params);
3403
3404 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3405}
3406
3407/* This function requires the caller holds hdev->lock */
3408void hci_conn_params_clear(struct hci_dev *hdev)
3409{
3410 struct hci_conn_params *params, *tmp;
3411
3412 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3413 list_del(&params->list);
3414 kfree(params);
3415 }
3416
3417 BT_DBG("All LE connection parameters were removed");
3418}
3419
Andre Guedes77a77a32014-02-26 20:21:46 -03003420/* This function requires the caller holds hdev->lock */
3421struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3422 bdaddr_t *addr, u8 addr_type)
3423{
3424 struct bdaddr_list *entry;
3425
3426 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3427 if (bacmp(&entry->bdaddr, addr) == 0 &&
3428 entry->bdaddr_type == addr_type)
3429 return entry;
3430 }
3431
3432 return NULL;
3433}
3434
3435/* This function requires the caller holds hdev->lock */
3436void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3437{
3438 struct bdaddr_list *entry;
3439
3440 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3441 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003442 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003443
3444 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3445 if (!entry) {
3446 BT_ERR("Out of memory");
3447 return;
3448 }
3449
3450 bacpy(&entry->bdaddr, addr);
3451 entry->bdaddr_type = addr_type;
3452
3453 list_add(&entry->list, &hdev->pend_le_conns);
3454
3455 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003456
3457done:
3458 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003459}
3460
3461/* This function requires the caller holds hdev->lock */
3462void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3463{
3464 struct bdaddr_list *entry;
3465
3466 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3467 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003468 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003469
3470 list_del(&entry->list);
3471 kfree(entry);
3472
3473 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003474
3475done:
3476 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003477}
3478
3479/* This function requires the caller holds hdev->lock */
3480void hci_pend_le_conns_clear(struct hci_dev *hdev)
3481{
3482 struct bdaddr_list *entry, *tmp;
3483
3484 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3485 list_del(&entry->list);
3486 kfree(entry);
3487 }
3488
3489 BT_DBG("All LE pending connections cleared");
3490}
3491
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003492static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003493{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003494 if (status) {
3495 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003496
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003497 hci_dev_lock(hdev);
3498 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3499 hci_dev_unlock(hdev);
3500 return;
3501 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003502}
3503
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003504static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003505{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003506 /* General inquiry access code (GIAC) */
3507 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3508 struct hci_request req;
3509 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003510 int err;
3511
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003512 if (status) {
3513 BT_ERR("Failed to disable LE scanning: status %d", status);
3514 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003515 }
3516
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003517 switch (hdev->discovery.type) {
3518 case DISCOV_TYPE_LE:
3519 hci_dev_lock(hdev);
3520 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3521 hci_dev_unlock(hdev);
3522 break;
3523
3524 case DISCOV_TYPE_INTERLEAVED:
3525 hci_req_init(&req, hdev);
3526
3527 memset(&cp, 0, sizeof(cp));
3528 memcpy(&cp.lap, lap, sizeof(cp.lap));
3529 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3530 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3531
3532 hci_dev_lock(hdev);
3533
3534 hci_inquiry_cache_flush(hdev);
3535
3536 err = hci_req_run(&req, inquiry_complete);
3537 if (err) {
3538 BT_ERR("Inquiry request failed: err %d", err);
3539 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3540 }
3541
3542 hci_dev_unlock(hdev);
3543 break;
3544 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003545}
3546
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003547static void le_scan_disable_work(struct work_struct *work)
3548{
3549 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003550 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003551 struct hci_request req;
3552 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003553
3554 BT_DBG("%s", hdev->name);
3555
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003556 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003557
Andre Guedesb1efcc22014-02-26 20:21:40 -03003558 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003559
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003560 err = hci_req_run(&req, le_scan_disable_work_complete);
3561 if (err)
3562 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003563}
3564
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003565int hci_update_random_address(struct hci_request *req, bool require_privacy,
3566 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003567{
3568 struct hci_dev *hdev = req->hdev;
3569 int err;
3570
3571 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003572 * current RPA has expired or there is something else than
3573 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003574 */
3575 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003576 int to;
3577
3578 *own_addr_type = ADDR_LE_DEV_RANDOM;
3579
3580 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003581 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003582 return 0;
3583
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003584 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003585 if (err < 0) {
3586 BT_ERR("%s failed to generate new RPA", hdev->name);
3587 return err;
3588 }
3589
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003590 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003591
3592 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3593 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3594
3595 return 0;
3596 }
3597
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003598 /* In case of required privacy without resolvable private address,
3599 * use an unresolvable private address. This is useful for active
3600 * scanning and non-connectable advertising.
3601 */
3602 if (require_privacy) {
3603 bdaddr_t urpa;
3604
3605 get_random_bytes(&urpa, 6);
3606 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3607
3608 *own_addr_type = ADDR_LE_DEV_RANDOM;
3609 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3610 return 0;
3611 }
3612
Johan Hedbergebd3a742014-02-23 19:42:21 +02003613 /* If forcing static address is in use or there is no public
3614 * address use the static address as random address (but skip
3615 * the HCI command if the current random address is already the
3616 * static one.
3617 */
3618 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3619 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3620 *own_addr_type = ADDR_LE_DEV_RANDOM;
3621 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3622 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3623 &hdev->static_addr);
3624 return 0;
3625 }
3626
3627 /* Neither privacy nor static address is being used so use a
3628 * public address.
3629 */
3630 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3631
3632 return 0;
3633}
3634
Johan Hedberga1f4c312014-02-27 14:05:41 +02003635/* Copy the Identity Address of the controller.
3636 *
3637 * If the controller has a public BD_ADDR, then by default use that one.
3638 * If this is a LE only controller without a public address, default to
3639 * the static random address.
3640 *
3641 * For debugging purposes it is possible to force controllers with a
3642 * public address to use the static random address instead.
3643 */
3644void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3645 u8 *bdaddr_type)
3646{
3647 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3648 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3649 bacpy(bdaddr, &hdev->static_addr);
3650 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3651 } else {
3652 bacpy(bdaddr, &hdev->bdaddr);
3653 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3654 }
3655}
3656
David Herrmann9be0dab2012-04-22 14:39:57 +02003657/* Alloc HCI device */
3658struct hci_dev *hci_alloc_dev(void)
3659{
3660 struct hci_dev *hdev;
3661
3662 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3663 if (!hdev)
3664 return NULL;
3665
David Herrmannb1b813d2012-04-22 14:39:58 +02003666 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3667 hdev->esco_type = (ESCO_HV1);
3668 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003669 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3670 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003671 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3672 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003673
David Herrmannb1b813d2012-04-22 14:39:58 +02003674 hdev->sniff_max_interval = 800;
3675 hdev->sniff_min_interval = 80;
3676
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003677 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003678 hdev->le_scan_interval = 0x0060;
3679 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003680 hdev->le_conn_min_interval = 0x0028;
3681 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003682
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003683 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3684
David Herrmannb1b813d2012-04-22 14:39:58 +02003685 mutex_init(&hdev->lock);
3686 mutex_init(&hdev->req_lock);
3687
3688 INIT_LIST_HEAD(&hdev->mgmt_pending);
3689 INIT_LIST_HEAD(&hdev->blacklist);
3690 INIT_LIST_HEAD(&hdev->uuids);
3691 INIT_LIST_HEAD(&hdev->link_keys);
3692 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003693 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003694 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003695 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003696 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003697 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003698
3699 INIT_WORK(&hdev->rx_work, hci_rx_work);
3700 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3701 INIT_WORK(&hdev->tx_work, hci_tx_work);
3702 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003703
David Herrmannb1b813d2012-04-22 14:39:58 +02003704 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3705 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3706 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3707
David Herrmannb1b813d2012-04-22 14:39:58 +02003708 skb_queue_head_init(&hdev->rx_q);
3709 skb_queue_head_init(&hdev->cmd_q);
3710 skb_queue_head_init(&hdev->raw_q);
3711
3712 init_waitqueue_head(&hdev->req_wait_q);
3713
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003714 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003715
David Herrmannb1b813d2012-04-22 14:39:58 +02003716 hci_init_sysfs(hdev);
3717 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003718
3719 return hdev;
3720}
3721EXPORT_SYMBOL(hci_alloc_dev);
3722
3723/* Free HCI device */
3724void hci_free_dev(struct hci_dev *hdev)
3725{
David Herrmann9be0dab2012-04-22 14:39:57 +02003726 /* will free via device release */
3727 put_device(&hdev->dev);
3728}
3729EXPORT_SYMBOL(hci_free_dev);
3730
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731/* Register HCI device */
3732int hci_register_dev(struct hci_dev *hdev)
3733{
David Herrmannb1b813d2012-04-22 14:39:58 +02003734 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735
David Herrmann010666a2012-01-07 15:47:07 +01003736 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 return -EINVAL;
3738
Mat Martineau08add512011-11-02 16:18:36 -07003739 /* Do not allow HCI_AMP devices to register at index 0,
3740 * so the index can be used as the AMP controller ID.
3741 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003742 switch (hdev->dev_type) {
3743 case HCI_BREDR:
3744 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3745 break;
3746 case HCI_AMP:
3747 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3748 break;
3749 default:
3750 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003752
Sasha Levin3df92b32012-05-27 22:36:56 +02003753 if (id < 0)
3754 return id;
3755
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 sprintf(hdev->name, "hci%d", id);
3757 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003758
3759 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3760
Kees Cookd8537542013-07-03 15:04:57 -07003761 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3762 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003763 if (!hdev->workqueue) {
3764 error = -ENOMEM;
3765 goto err;
3766 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003767
Kees Cookd8537542013-07-03 15:04:57 -07003768 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3769 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003770 if (!hdev->req_workqueue) {
3771 destroy_workqueue(hdev->workqueue);
3772 error = -ENOMEM;
3773 goto err;
3774 }
3775
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003776 if (!IS_ERR_OR_NULL(bt_debugfs))
3777 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3778
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003779 dev_set_name(&hdev->dev, "%s", hdev->name);
3780
Johan Hedberg99780a72014-02-18 10:40:07 +02003781 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3782 CRYPTO_ALG_ASYNC);
3783 if (IS_ERR(hdev->tfm_aes)) {
3784 BT_ERR("Unable to create crypto context");
3785 error = PTR_ERR(hdev->tfm_aes);
3786 hdev->tfm_aes = NULL;
3787 goto err_wqueue;
3788 }
3789
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003790 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003791 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003792 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3796 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003797 if (hdev->rfkill) {
3798 if (rfkill_register(hdev->rfkill) < 0) {
3799 rfkill_destroy(hdev->rfkill);
3800 hdev->rfkill = NULL;
3801 }
3802 }
3803
Johan Hedberg5e130362013-09-13 08:58:17 +03003804 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3805 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3806
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003807 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003808 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003809
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003810 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003811 /* Assume BR/EDR support until proven otherwise (such as
3812 * through reading supported features during init.
3813 */
3814 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3815 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003816
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003817 write_lock(&hci_dev_list_lock);
3818 list_add(&hdev->list, &hci_dev_list);
3819 write_unlock(&hci_dev_list_lock);
3820
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003822 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
Johan Hedberg19202572013-01-14 22:33:51 +02003824 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003825
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003827
Johan Hedberg99780a72014-02-18 10:40:07 +02003828err_tfm:
3829 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003830err_wqueue:
3831 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003832 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003833err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003834 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003835
David Herrmann33ca9542011-10-08 14:58:49 +02003836 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837}
3838EXPORT_SYMBOL(hci_register_dev);
3839
3840/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003841void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842{
Sasha Levin3df92b32012-05-27 22:36:56 +02003843 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003844
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003845 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846
Johan Hovold94324962012-03-15 14:48:41 +01003847 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3848
Sasha Levin3df92b32012-05-27 22:36:56 +02003849 id = hdev->id;
3850
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003851 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003853 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854
3855 hci_dev_do_close(hdev);
3856
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303857 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003858 kfree_skb(hdev->reassembly[i]);
3859
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003860 cancel_work_sync(&hdev->power_on);
3861
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003862 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003863 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003864 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003865 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003866 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003867 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003868
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003869 /* mgmt_index_removed should take care of emptying the
3870 * pending list */
3871 BUG_ON(!list_empty(&hdev->mgmt_pending));
3872
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 hci_notify(hdev, HCI_DEV_UNREG);
3874
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003875 if (hdev->rfkill) {
3876 rfkill_unregister(hdev->rfkill);
3877 rfkill_destroy(hdev->rfkill);
3878 }
3879
Johan Hedberg99780a72014-02-18 10:40:07 +02003880 if (hdev->tfm_aes)
3881 crypto_free_blkcipher(hdev->tfm_aes);
3882
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003883 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003884
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003885 debugfs_remove_recursive(hdev->debugfs);
3886
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003887 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003888 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003889
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003890 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003891 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003892 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003893 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003894 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003895 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003896 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003897 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003898 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003899 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003900
David Herrmanndc946bd2012-01-07 15:47:24 +01003901 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003902
3903 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904}
3905EXPORT_SYMBOL(hci_unregister_dev);
3906
3907/* Suspend HCI device */
3908int hci_suspend_dev(struct hci_dev *hdev)
3909{
3910 hci_notify(hdev, HCI_DEV_SUSPEND);
3911 return 0;
3912}
3913EXPORT_SYMBOL(hci_suspend_dev);
3914
3915/* Resume HCI device */
3916int hci_resume_dev(struct hci_dev *hdev)
3917{
3918 hci_notify(hdev, HCI_DEV_RESUME);
3919 return 0;
3920}
3921EXPORT_SYMBOL(hci_resume_dev);
3922
Marcel Holtmann76bca882009-11-18 00:40:39 +01003923/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003924int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003925{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003926 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003927 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003928 kfree_skb(skb);
3929 return -ENXIO;
3930 }
3931
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003932 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003933 bt_cb(skb)->incoming = 1;
3934
3935 /* Time stamp */
3936 __net_timestamp(skb);
3937
Marcel Holtmann76bca882009-11-18 00:40:39 +01003938 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003939 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003940
Marcel Holtmann76bca882009-11-18 00:40:39 +01003941 return 0;
3942}
3943EXPORT_SYMBOL(hci_recv_frame);
3944
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303945static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003946 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303947{
3948 int len = 0;
3949 int hlen = 0;
3950 int remain = count;
3951 struct sk_buff *skb;
3952 struct bt_skb_cb *scb;
3953
3954 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003955 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303956 return -EILSEQ;
3957
3958 skb = hdev->reassembly[index];
3959
3960 if (!skb) {
3961 switch (type) {
3962 case HCI_ACLDATA_PKT:
3963 len = HCI_MAX_FRAME_SIZE;
3964 hlen = HCI_ACL_HDR_SIZE;
3965 break;
3966 case HCI_EVENT_PKT:
3967 len = HCI_MAX_EVENT_SIZE;
3968 hlen = HCI_EVENT_HDR_SIZE;
3969 break;
3970 case HCI_SCODATA_PKT:
3971 len = HCI_MAX_SCO_SIZE;
3972 hlen = HCI_SCO_HDR_SIZE;
3973 break;
3974 }
3975
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003976 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303977 if (!skb)
3978 return -ENOMEM;
3979
3980 scb = (void *) skb->cb;
3981 scb->expect = hlen;
3982 scb->pkt_type = type;
3983
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303984 hdev->reassembly[index] = skb;
3985 }
3986
3987 while (count) {
3988 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003989 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303990
3991 memcpy(skb_put(skb, len), data, len);
3992
3993 count -= len;
3994 data += len;
3995 scb->expect -= len;
3996 remain = count;
3997
3998 switch (type) {
3999 case HCI_EVENT_PKT:
4000 if (skb->len == HCI_EVENT_HDR_SIZE) {
4001 struct hci_event_hdr *h = hci_event_hdr(skb);
4002 scb->expect = h->plen;
4003
4004 if (skb_tailroom(skb) < scb->expect) {
4005 kfree_skb(skb);
4006 hdev->reassembly[index] = NULL;
4007 return -ENOMEM;
4008 }
4009 }
4010 break;
4011
4012 case HCI_ACLDATA_PKT:
4013 if (skb->len == HCI_ACL_HDR_SIZE) {
4014 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4015 scb->expect = __le16_to_cpu(h->dlen);
4016
4017 if (skb_tailroom(skb) < scb->expect) {
4018 kfree_skb(skb);
4019 hdev->reassembly[index] = NULL;
4020 return -ENOMEM;
4021 }
4022 }
4023 break;
4024
4025 case HCI_SCODATA_PKT:
4026 if (skb->len == HCI_SCO_HDR_SIZE) {
4027 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4028 scb->expect = h->dlen;
4029
4030 if (skb_tailroom(skb) < scb->expect) {
4031 kfree_skb(skb);
4032 hdev->reassembly[index] = NULL;
4033 return -ENOMEM;
4034 }
4035 }
4036 break;
4037 }
4038
4039 if (scb->expect == 0) {
4040 /* Complete frame */
4041
4042 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004043 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304044
4045 hdev->reassembly[index] = NULL;
4046 return remain;
4047 }
4048 }
4049
4050 return remain;
4051}
4052
Marcel Holtmannef222012007-07-11 06:42:04 +02004053int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4054{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304055 int rem = 0;
4056
Marcel Holtmannef222012007-07-11 06:42:04 +02004057 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4058 return -EILSEQ;
4059
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004060 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004061 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304062 if (rem < 0)
4063 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004064
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304065 data += (count - rem);
4066 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004067 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004068
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304069 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004070}
4071EXPORT_SYMBOL(hci_recv_fragment);
4072
Suraj Sumangala99811512010-07-14 13:02:19 +05304073#define STREAM_REASSEMBLY 0
4074
4075int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4076{
4077 int type;
4078 int rem = 0;
4079
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004080 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304081 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4082
4083 if (!skb) {
4084 struct { char type; } *pkt;
4085
4086 /* Start of the frame */
4087 pkt = data;
4088 type = pkt->type;
4089
4090 data++;
4091 count--;
4092 } else
4093 type = bt_cb(skb)->pkt_type;
4094
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004095 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004096 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304097 if (rem < 0)
4098 return rem;
4099
4100 data += (count - rem);
4101 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004102 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304103
4104 return rem;
4105}
4106EXPORT_SYMBOL(hci_recv_stream_fragment);
4107
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108/* ---- Interface to upper protocols ---- */
4109
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110int hci_register_cb(struct hci_cb *cb)
4111{
4112 BT_DBG("%p name %s", cb, cb->name);
4113
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004114 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004116 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117
4118 return 0;
4119}
4120EXPORT_SYMBOL(hci_register_cb);
4121
4122int hci_unregister_cb(struct hci_cb *cb)
4123{
4124 BT_DBG("%p name %s", cb, cb->name);
4125
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004126 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004128 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129
4130 return 0;
4131}
4132EXPORT_SYMBOL(hci_unregister_cb);
4133
Marcel Holtmann51086992013-10-10 14:54:19 -07004134static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004136 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004138 /* Time stamp */
4139 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004141 /* Send copy to monitor */
4142 hci_send_to_monitor(hdev, skb);
4143
4144 if (atomic_read(&hdev->promisc)) {
4145 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004146 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 }
4148
4149 /* Get rid of skb owner, prior to sending to the driver. */
4150 skb_orphan(skb);
4151
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004152 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004153 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154}
4155
Johan Hedberg3119ae92013-03-05 20:37:44 +02004156void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4157{
4158 skb_queue_head_init(&req->cmd_q);
4159 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004160 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004161}
4162
4163int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4164{
4165 struct hci_dev *hdev = req->hdev;
4166 struct sk_buff *skb;
4167 unsigned long flags;
4168
4169 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4170
Andre Guedes5d73e032013-03-08 11:20:16 -03004171 /* If an error occured during request building, remove all HCI
4172 * commands queued on the HCI request queue.
4173 */
4174 if (req->err) {
4175 skb_queue_purge(&req->cmd_q);
4176 return req->err;
4177 }
4178
Johan Hedberg3119ae92013-03-05 20:37:44 +02004179 /* Do not allow empty requests */
4180 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004181 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004182
4183 skb = skb_peek_tail(&req->cmd_q);
4184 bt_cb(skb)->req.complete = complete;
4185
4186 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4187 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4188 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4189
4190 queue_work(hdev->workqueue, &hdev->cmd_work);
4191
4192 return 0;
4193}
4194
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004195static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004196 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197{
4198 int len = HCI_COMMAND_HDR_SIZE + plen;
4199 struct hci_command_hdr *hdr;
4200 struct sk_buff *skb;
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004203 if (!skb)
4204 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205
4206 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004207 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 hdr->plen = plen;
4209
4210 if (plen)
4211 memcpy(skb_put(skb, plen), param, plen);
4212
4213 BT_DBG("skb len %d", skb->len);
4214
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004216
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004217 return skb;
4218}
4219
4220/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004221int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4222 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004223{
4224 struct sk_buff *skb;
4225
4226 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4227
4228 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4229 if (!skb) {
4230 BT_ERR("%s no memory for command", hdev->name);
4231 return -ENOMEM;
4232 }
4233
Johan Hedberg11714b32013-03-05 20:37:47 +02004234 /* Stand-alone HCI commands must be flaged as
4235 * single-command requests.
4236 */
4237 bt_cb(skb)->req.start = true;
4238
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004240 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241
4242 return 0;
4243}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
Johan Hedberg71c76a12013-03-05 20:37:46 +02004245/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004246void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4247 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004248{
4249 struct hci_dev *hdev = req->hdev;
4250 struct sk_buff *skb;
4251
4252 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4253
Andre Guedes34739c12013-03-08 11:20:18 -03004254 /* If an error occured during request building, there is no point in
4255 * queueing the HCI command. We can simply return.
4256 */
4257 if (req->err)
4258 return;
4259
Johan Hedberg71c76a12013-03-05 20:37:46 +02004260 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4261 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004262 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4263 hdev->name, opcode);
4264 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004265 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004266 }
4267
4268 if (skb_queue_empty(&req->cmd_q))
4269 bt_cb(skb)->req.start = true;
4270
Johan Hedberg02350a72013-04-03 21:50:29 +03004271 bt_cb(skb)->req.event = event;
4272
Johan Hedberg71c76a12013-03-05 20:37:46 +02004273 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004274}
4275
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004276void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4277 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004278{
4279 hci_req_add_ev(req, opcode, plen, param, 0);
4280}
4281
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004283void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284{
4285 struct hci_command_hdr *hdr;
4286
4287 if (!hdev->sent_cmd)
4288 return NULL;
4289
4290 hdr = (void *) hdev->sent_cmd->data;
4291
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004292 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 return NULL;
4294
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004295 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296
4297 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4298}
4299
4300/* Send ACL data */
4301static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4302{
4303 struct hci_acl_hdr *hdr;
4304 int len = skb->len;
4305
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004306 skb_push(skb, HCI_ACL_HDR_SIZE);
4307 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004308 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004309 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4310 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311}
4312
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004313static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004314 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004316 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 struct hci_dev *hdev = conn->hdev;
4318 struct sk_buff *list;
4319
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004320 skb->len = skb_headlen(skb);
4321 skb->data_len = 0;
4322
4323 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004324
4325 switch (hdev->dev_type) {
4326 case HCI_BREDR:
4327 hci_add_acl_hdr(skb, conn->handle, flags);
4328 break;
4329 case HCI_AMP:
4330 hci_add_acl_hdr(skb, chan->handle, flags);
4331 break;
4332 default:
4333 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4334 return;
4335 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004336
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004337 list = skb_shinfo(skb)->frag_list;
4338 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 /* Non fragmented */
4340 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4341
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004342 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343 } else {
4344 /* Fragmented */
4345 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4346
4347 skb_shinfo(skb)->frag_list = NULL;
4348
4349 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004350 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004352 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004353
4354 flags &= ~ACL_START;
4355 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 do {
4357 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004358
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004359 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004360 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361
4362 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4363
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004364 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365 } while (list);
4366
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004367 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004369}
4370
4371void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4372{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004373 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004374
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004375 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004376
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004377 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004379 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
4382/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004383void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384{
4385 struct hci_dev *hdev = conn->hdev;
4386 struct hci_sco_hdr hdr;
4387
4388 BT_DBG("%s len %d", hdev->name, skb->len);
4389
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004390 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 hdr.dlen = skb->len;
4392
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004393 skb_push(skb, HCI_SCO_HDR_SIZE);
4394 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004395 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004397 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004398
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004400 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402
4403/* ---- HCI TX task (outgoing data) ---- */
4404
4405/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004406static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4407 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408{
4409 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004410 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004411 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004413 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004415
4416 rcu_read_lock();
4417
4418 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004419 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004421
4422 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4423 continue;
4424
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 num++;
4426
4427 if (c->sent < min) {
4428 min = c->sent;
4429 conn = c;
4430 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004431
4432 if (hci_conn_num(hdev, type) == num)
4433 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434 }
4435
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004436 rcu_read_unlock();
4437
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004439 int cnt, q;
4440
4441 switch (conn->type) {
4442 case ACL_LINK:
4443 cnt = hdev->acl_cnt;
4444 break;
4445 case SCO_LINK:
4446 case ESCO_LINK:
4447 cnt = hdev->sco_cnt;
4448 break;
4449 case LE_LINK:
4450 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4451 break;
4452 default:
4453 cnt = 0;
4454 BT_ERR("Unknown link type");
4455 }
4456
4457 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 *quote = q ? q : 1;
4459 } else
4460 *quote = 0;
4461
4462 BT_DBG("conn %p quote %d", conn, *quote);
4463 return conn;
4464}
4465
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004466static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467{
4468 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004469 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
Ville Tervobae1f5d92011-02-10 22:38:53 -03004471 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004473 rcu_read_lock();
4474
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004476 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004477 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004478 BT_ERR("%s killing stalled connection %pMR",
4479 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004480 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 }
4482 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004483
4484 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485}
4486
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004487static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4488 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004489{
4490 struct hci_conn_hash *h = &hdev->conn_hash;
4491 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004492 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004493 struct hci_conn *conn;
4494 int cnt, q, conn_num = 0;
4495
4496 BT_DBG("%s", hdev->name);
4497
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004498 rcu_read_lock();
4499
4500 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004501 struct hci_chan *tmp;
4502
4503 if (conn->type != type)
4504 continue;
4505
4506 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4507 continue;
4508
4509 conn_num++;
4510
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004511 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004512 struct sk_buff *skb;
4513
4514 if (skb_queue_empty(&tmp->data_q))
4515 continue;
4516
4517 skb = skb_peek(&tmp->data_q);
4518 if (skb->priority < cur_prio)
4519 continue;
4520
4521 if (skb->priority > cur_prio) {
4522 num = 0;
4523 min = ~0;
4524 cur_prio = skb->priority;
4525 }
4526
4527 num++;
4528
4529 if (conn->sent < min) {
4530 min = conn->sent;
4531 chan = tmp;
4532 }
4533 }
4534
4535 if (hci_conn_num(hdev, type) == conn_num)
4536 break;
4537 }
4538
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004539 rcu_read_unlock();
4540
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004541 if (!chan)
4542 return NULL;
4543
4544 switch (chan->conn->type) {
4545 case ACL_LINK:
4546 cnt = hdev->acl_cnt;
4547 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004548 case AMP_LINK:
4549 cnt = hdev->block_cnt;
4550 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004551 case SCO_LINK:
4552 case ESCO_LINK:
4553 cnt = hdev->sco_cnt;
4554 break;
4555 case LE_LINK:
4556 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4557 break;
4558 default:
4559 cnt = 0;
4560 BT_ERR("Unknown link type");
4561 }
4562
4563 q = cnt / num;
4564 *quote = q ? q : 1;
4565 BT_DBG("chan %p quote %d", chan, *quote);
4566 return chan;
4567}
4568
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004569static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4570{
4571 struct hci_conn_hash *h = &hdev->conn_hash;
4572 struct hci_conn *conn;
4573 int num = 0;
4574
4575 BT_DBG("%s", hdev->name);
4576
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004577 rcu_read_lock();
4578
4579 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004580 struct hci_chan *chan;
4581
4582 if (conn->type != type)
4583 continue;
4584
4585 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4586 continue;
4587
4588 num++;
4589
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004590 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004591 struct sk_buff *skb;
4592
4593 if (chan->sent) {
4594 chan->sent = 0;
4595 continue;
4596 }
4597
4598 if (skb_queue_empty(&chan->data_q))
4599 continue;
4600
4601 skb = skb_peek(&chan->data_q);
4602 if (skb->priority >= HCI_PRIO_MAX - 1)
4603 continue;
4604
4605 skb->priority = HCI_PRIO_MAX - 1;
4606
4607 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004608 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004609 }
4610
4611 if (hci_conn_num(hdev, type) == num)
4612 break;
4613 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004614
4615 rcu_read_unlock();
4616
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004617}
4618
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004619static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4620{
4621 /* Calculate count of blocks used by this packet */
4622 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4623}
4624
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004625static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 if (!test_bit(HCI_RAW, &hdev->flags)) {
4628 /* ACL tx timeout must be longer than maximum
4629 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004630 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004631 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004632 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004634}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004636static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004637{
4638 unsigned int cnt = hdev->acl_cnt;
4639 struct hci_chan *chan;
4640 struct sk_buff *skb;
4641 int quote;
4642
4643 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004644
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004645 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004646 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004647 u32 priority = (skb_peek(&chan->data_q))->priority;
4648 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004649 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004650 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004651
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004652 /* Stop if priority has changed */
4653 if (skb->priority < priority)
4654 break;
4655
4656 skb = skb_dequeue(&chan->data_q);
4657
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004658 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004659 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004660
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004661 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 hdev->acl_last_tx = jiffies;
4663
4664 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004665 chan->sent++;
4666 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 }
4668 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004669
4670 if (cnt != hdev->acl_cnt)
4671 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672}
4673
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004674static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004675{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004676 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004677 struct hci_chan *chan;
4678 struct sk_buff *skb;
4679 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004680 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004681
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004682 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004683
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004684 BT_DBG("%s", hdev->name);
4685
4686 if (hdev->dev_type == HCI_AMP)
4687 type = AMP_LINK;
4688 else
4689 type = ACL_LINK;
4690
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004691 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004692 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004693 u32 priority = (skb_peek(&chan->data_q))->priority;
4694 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4695 int blocks;
4696
4697 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004698 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004699
4700 /* Stop if priority has changed */
4701 if (skb->priority < priority)
4702 break;
4703
4704 skb = skb_dequeue(&chan->data_q);
4705
4706 blocks = __get_blocks(hdev, skb);
4707 if (blocks > hdev->block_cnt)
4708 return;
4709
4710 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004711 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004712
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004713 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004714 hdev->acl_last_tx = jiffies;
4715
4716 hdev->block_cnt -= blocks;
4717 quote -= blocks;
4718
4719 chan->sent += blocks;
4720 chan->conn->sent += blocks;
4721 }
4722 }
4723
4724 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004725 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004726}
4727
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004728static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004729{
4730 BT_DBG("%s", hdev->name);
4731
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004732 /* No ACL link over BR/EDR controller */
4733 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4734 return;
4735
4736 /* No AMP link over AMP controller */
4737 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004738 return;
4739
4740 switch (hdev->flow_ctl_mode) {
4741 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4742 hci_sched_acl_pkt(hdev);
4743 break;
4744
4745 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4746 hci_sched_acl_blk(hdev);
4747 break;
4748 }
4749}
4750
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004752static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753{
4754 struct hci_conn *conn;
4755 struct sk_buff *skb;
4756 int quote;
4757
4758 BT_DBG("%s", hdev->name);
4759
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004760 if (!hci_conn_num(hdev, SCO_LINK))
4761 return;
4762
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4764 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4765 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004766 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767
4768 conn->sent++;
4769 if (conn->sent == ~0)
4770 conn->sent = 0;
4771 }
4772 }
4773}
4774
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004775static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004776{
4777 struct hci_conn *conn;
4778 struct sk_buff *skb;
4779 int quote;
4780
4781 BT_DBG("%s", hdev->name);
4782
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004783 if (!hci_conn_num(hdev, ESCO_LINK))
4784 return;
4785
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004786 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4787 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004788 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4789 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004790 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004791
4792 conn->sent++;
4793 if (conn->sent == ~0)
4794 conn->sent = 0;
4795 }
4796 }
4797}
4798
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004799static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004800{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004801 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004802 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004803 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004804
4805 BT_DBG("%s", hdev->name);
4806
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004807 if (!hci_conn_num(hdev, LE_LINK))
4808 return;
4809
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004810 if (!test_bit(HCI_RAW, &hdev->flags)) {
4811 /* LE tx timeout must be longer than maximum
4812 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004813 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004814 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004815 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004816 }
4817
4818 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004819 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004820 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004821 u32 priority = (skb_peek(&chan->data_q))->priority;
4822 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004823 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004824 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004825
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004826 /* Stop if priority has changed */
4827 if (skb->priority < priority)
4828 break;
4829
4830 skb = skb_dequeue(&chan->data_q);
4831
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004832 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004833 hdev->le_last_tx = jiffies;
4834
4835 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004836 chan->sent++;
4837 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004838 }
4839 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004840
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004841 if (hdev->le_pkts)
4842 hdev->le_cnt = cnt;
4843 else
4844 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004845
4846 if (cnt != tmp)
4847 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004848}
4849
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004850static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004852 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 struct sk_buff *skb;
4854
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004855 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004856 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857
Marcel Holtmann52de5992013-09-03 18:08:38 -07004858 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4859 /* Schedule queues and send stuff to HCI driver */
4860 hci_sched_acl(hdev);
4861 hci_sched_sco(hdev);
4862 hci_sched_esco(hdev);
4863 hci_sched_le(hdev);
4864 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004865
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 /* Send next queued raw (unknown type) packet */
4867 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004868 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869}
4870
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004871/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872
4873/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004874static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875{
4876 struct hci_acl_hdr *hdr = (void *) skb->data;
4877 struct hci_conn *conn;
4878 __u16 handle, flags;
4879
4880 skb_pull(skb, HCI_ACL_HDR_SIZE);
4881
4882 handle = __le16_to_cpu(hdr->handle);
4883 flags = hci_flags(handle);
4884 handle = hci_handle(handle);
4885
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004886 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004887 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888
4889 hdev->stat.acl_rx++;
4890
4891 hci_dev_lock(hdev);
4892 conn = hci_conn_hash_lookup_handle(hdev, handle);
4893 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004894
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004896 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004897
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004899 l2cap_recv_acldata(conn, skb, flags);
4900 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004902 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004903 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904 }
4905
4906 kfree_skb(skb);
4907}
4908
4909/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004910static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911{
4912 struct hci_sco_hdr *hdr = (void *) skb->data;
4913 struct hci_conn *conn;
4914 __u16 handle;
4915
4916 skb_pull(skb, HCI_SCO_HDR_SIZE);
4917
4918 handle = __le16_to_cpu(hdr->handle);
4919
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004920 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921
4922 hdev->stat.sco_rx++;
4923
4924 hci_dev_lock(hdev);
4925 conn = hci_conn_hash_lookup_handle(hdev, handle);
4926 hci_dev_unlock(hdev);
4927
4928 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004930 sco_recv_scodata(conn, skb);
4931 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004932 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004933 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004934 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935 }
4936
4937 kfree_skb(skb);
4938}
4939
Johan Hedberg9238f362013-03-05 20:37:48 +02004940static bool hci_req_is_complete(struct hci_dev *hdev)
4941{
4942 struct sk_buff *skb;
4943
4944 skb = skb_peek(&hdev->cmd_q);
4945 if (!skb)
4946 return true;
4947
4948 return bt_cb(skb)->req.start;
4949}
4950
Johan Hedberg42c6b122013-03-05 20:37:49 +02004951static void hci_resend_last(struct hci_dev *hdev)
4952{
4953 struct hci_command_hdr *sent;
4954 struct sk_buff *skb;
4955 u16 opcode;
4956
4957 if (!hdev->sent_cmd)
4958 return;
4959
4960 sent = (void *) hdev->sent_cmd->data;
4961 opcode = __le16_to_cpu(sent->opcode);
4962 if (opcode == HCI_OP_RESET)
4963 return;
4964
4965 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4966 if (!skb)
4967 return;
4968
4969 skb_queue_head(&hdev->cmd_q, skb);
4970 queue_work(hdev->workqueue, &hdev->cmd_work);
4971}
4972
Johan Hedberg9238f362013-03-05 20:37:48 +02004973void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4974{
4975 hci_req_complete_t req_complete = NULL;
4976 struct sk_buff *skb;
4977 unsigned long flags;
4978
4979 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4980
Johan Hedberg42c6b122013-03-05 20:37:49 +02004981 /* If the completed command doesn't match the last one that was
4982 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004983 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004984 if (!hci_sent_cmd_data(hdev, opcode)) {
4985 /* Some CSR based controllers generate a spontaneous
4986 * reset complete event during init and any pending
4987 * command will never be completed. In such a case we
4988 * need to resend whatever was the last sent
4989 * command.
4990 */
4991 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4992 hci_resend_last(hdev);
4993
Johan Hedberg9238f362013-03-05 20:37:48 +02004994 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004995 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004996
4997 /* If the command succeeded and there's still more commands in
4998 * this request the request is not yet complete.
4999 */
5000 if (!status && !hci_req_is_complete(hdev))
5001 return;
5002
5003 /* If this was the last command in a request the complete
5004 * callback would be found in hdev->sent_cmd instead of the
5005 * command queue (hdev->cmd_q).
5006 */
5007 if (hdev->sent_cmd) {
5008 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005009
5010 if (req_complete) {
5011 /* We must set the complete callback to NULL to
5012 * avoid calling the callback more than once if
5013 * this function gets called again.
5014 */
5015 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5016
Johan Hedberg9238f362013-03-05 20:37:48 +02005017 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005018 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005019 }
5020
5021 /* Remove all pending commands belonging to this request */
5022 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5023 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5024 if (bt_cb(skb)->req.start) {
5025 __skb_queue_head(&hdev->cmd_q, skb);
5026 break;
5027 }
5028
5029 req_complete = bt_cb(skb)->req.complete;
5030 kfree_skb(skb);
5031 }
5032 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5033
5034call_complete:
5035 if (req_complete)
5036 req_complete(hdev, status);
5037}
5038
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005039static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005041 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042 struct sk_buff *skb;
5043
5044 BT_DBG("%s", hdev->name);
5045
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005047 /* Send copy to monitor */
5048 hci_send_to_monitor(hdev, skb);
5049
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050 if (atomic_read(&hdev->promisc)) {
5051 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005052 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 }
5054
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005055 if (test_bit(HCI_RAW, &hdev->flags) ||
5056 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 kfree_skb(skb);
5058 continue;
5059 }
5060
5061 if (test_bit(HCI_INIT, &hdev->flags)) {
5062 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005063 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064 case HCI_ACLDATA_PKT:
5065 case HCI_SCODATA_PKT:
5066 kfree_skb(skb);
5067 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005068 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 }
5070
5071 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005072 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005074 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 hci_event_packet(hdev, skb);
5076 break;
5077
5078 case HCI_ACLDATA_PKT:
5079 BT_DBG("%s ACL data packet", hdev->name);
5080 hci_acldata_packet(hdev, skb);
5081 break;
5082
5083 case HCI_SCODATA_PKT:
5084 BT_DBG("%s SCO data packet", hdev->name);
5085 hci_scodata_packet(hdev, skb);
5086 break;
5087
5088 default:
5089 kfree_skb(skb);
5090 break;
5091 }
5092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093}
5094
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005095static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005097 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098 struct sk_buff *skb;
5099
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005100 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5101 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005104 if (atomic_read(&hdev->cmd_cnt)) {
5105 skb = skb_dequeue(&hdev->cmd_q);
5106 if (!skb)
5107 return;
5108
Wei Yongjun7585b972009-02-25 18:29:52 +08005109 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005111 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005112 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005114 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005115 if (test_bit(HCI_RESET, &hdev->flags))
5116 del_timer(&hdev->cmd_timer);
5117 else
5118 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005119 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120 } else {
5121 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005122 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 }
5124 }
5125}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005126
5127void hci_req_add_le_scan_disable(struct hci_request *req)
5128{
5129 struct hci_cp_le_set_scan_enable cp;
5130
5131 memset(&cp, 0, sizeof(cp));
5132 cp.enable = LE_SCAN_DISABLE;
5133 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5134}
Andre Guedesa4790db2014-02-26 20:21:47 -03005135
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005136void hci_req_add_le_passive_scan(struct hci_request *req)
5137{
5138 struct hci_cp_le_set_scan_param param_cp;
5139 struct hci_cp_le_set_scan_enable enable_cp;
5140 struct hci_dev *hdev = req->hdev;
5141 u8 own_addr_type;
5142
5143 /* Set require_privacy to true to avoid identification from
5144 * unknown peer devices. Since this is passive scanning, no
5145 * SCAN_REQ using the local identity should be sent. Mandating
5146 * privacy is just an extra precaution.
5147 */
5148 if (hci_update_random_address(req, true, &own_addr_type))
5149 return;
5150
5151 memset(&param_cp, 0, sizeof(param_cp));
5152 param_cp.type = LE_SCAN_PASSIVE;
5153 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5154 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5155 param_cp.own_address_type = own_addr_type;
5156 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5157 &param_cp);
5158
5159 memset(&enable_cp, 0, sizeof(enable_cp));
5160 enable_cp.enable = LE_SCAN_ENABLE;
5161 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5162 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5163 &enable_cp);
5164}
5165
Andre Guedesa4790db2014-02-26 20:21:47 -03005166static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5167{
5168 if (status)
5169 BT_DBG("HCI request failed to update background scanning: "
5170 "status 0x%2.2x", status);
5171}
5172
5173/* This function controls the background scanning based on hdev->pend_le_conns
5174 * list. If there are pending LE connection we start the background scanning,
5175 * otherwise we stop it.
5176 *
5177 * This function requires the caller holds hdev->lock.
5178 */
5179void hci_update_background_scan(struct hci_dev *hdev)
5180{
Andre Guedesa4790db2014-02-26 20:21:47 -03005181 struct hci_request req;
5182 struct hci_conn *conn;
5183 int err;
5184
5185 hci_req_init(&req, hdev);
5186
5187 if (list_empty(&hdev->pend_le_conns)) {
5188 /* If there is no pending LE connections, we should stop
5189 * the background scanning.
5190 */
5191
5192 /* If controller is not scanning we are done. */
5193 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5194 return;
5195
5196 hci_req_add_le_scan_disable(&req);
5197
5198 BT_DBG("%s stopping background scanning", hdev->name);
5199 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005200 /* If there is at least one pending LE connection, we should
5201 * keep the background scan running.
5202 */
5203
5204 /* If controller is already scanning we are done. */
5205 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5206 return;
5207
5208 /* If controller is connecting, we should not start scanning
5209 * since some controllers are not able to scan and connect at
5210 * the same time.
5211 */
5212 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5213 if (conn)
5214 return;
5215
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005216 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005217
5218 BT_DBG("%s starting background scanning", hdev->name);
5219 }
5220
5221 err = hci_req_run(&req, update_background_scan_complete);
5222 if (err)
5223 BT_ERR("Failed to run HCI request: err %d", err);
5224}